aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
commitaecdc33e111b2c447b622e287c6003726daa1426 (patch)
tree3e7657eae4b785e1a1fb5dfb225dbae0b2f0cfc6 /drivers
parenta20acf99f75e49271381d65db097c9763060a1e8 (diff)
parenta3a6cab5ea10cca64d036851fe0d932448f2fe4f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) GRE now works over ipv6, from Dmitry Kozlov. 2) Make SCTP more network namespace aware, from Eric Biederman. 3) TEAM driver now works with non-ethernet devices, from Jiri Pirko. 4) Make openvswitch network namespace aware, from Pravin B Shelar. 5) IPV6 NAT implementation, from Patrick McHardy. 6) Server side support for TCP Fast Open, from Jerry Chu and others. 7) Packet BPF filter supports MOD and XOR, from Eric Dumazet and Daniel Borkmann. 8) Increate the loopback default MTU to 64K, from Eric Dumazet. 9) Use a per-task rather than per-socket page fragment allocator for outgoing networking traffic. This benefits processes that have very many mostly idle sockets, which is quite common. From Eric Dumazet. 10) Use up to 32K for page fragment allocations, with fallbacks to smaller sizes when higher order page allocations fail. Benefits are a) less segments for driver to process b) less calls to page allocator c) less waste of space. From Eric Dumazet. 11) Allow GRO to be used on GRE tunnels, from Eric Dumazet. 12) VXLAN device driver, one way to handle VLAN issues such as the limitation of 4096 VLAN IDs yet still have some level of isolation. From Stephen Hemminger. 13) As usual there is a large boatload of driver changes, with the scale perhaps tilted towards the wireless side this time around. Fix up various fairly trivial conflicts, mostly caused by the user namespace changes. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1012 commits) hyperv: Add buffer for extended info after the RNDIS response message. hyperv: Report actual status in receive completion packet hyperv: Remove extra allocated space for recv_pkt_list elements hyperv: Fix page buffer handling in rndis_filter_send_request() hyperv: Fix the missing return value in rndis_filter_set_packet_filter() hyperv: Fix the max_xfer_size in RNDIS initialization vxlan: put UDP socket in correct namespace vxlan: Depend on CONFIG_INET sfc: Fix the reported priorities of different filter types sfc: Remove EFX_FILTER_FLAG_RX_OVERRIDE_IP sfc: Fix loopback self-test with separate_tx_channels=1 sfc: Fix MCDI structure field lookup sfc: Add parentheses around use of bitfield macro arguments sfc: Fix null function pointer in efx_sriov_channel_type vxlan: virtual extensible lan igmp: export symbol ip_mc_leave_group netlink: add attributes to fdb interface tg3: unconditionally select HWMON support when tg3 is enabled. Revert "net: ti cpsw ethernet: allow reading phy interface mode from DT" gre: fix sparse warning ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/bcma/Kconfig4
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/core.c2
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c28
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c9
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c123
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/bcma/driver_pci_host.c8
-rw-r--r--drivers/bcma/host_pci.c12
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/main.c27
-rw-r--r--drivers/bcma/sprom.c2
-rw-r--r--drivers/bluetooth/bcm203x.c8
-rw-r--r--drivers/bluetooth/bfusb.c12
-rw-r--r--drivers/bluetooth/bluecard_cs.c7
-rw-r--r--drivers/bluetooth/bpa10x.c8
-rw-r--r--drivers/bluetooth/bt3c_cs.c5
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c18
-rw-r--r--drivers/bluetooth/btsdio.c8
-rw-r--r--drivers/bluetooth/btuart_cs.c7
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/btwilink.c24
-rw-r--r--drivers/bluetooth/dtl1_cs.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c34
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c41
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c172
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c124
-rw-r--r--drivers/isdn/gigaset/common.c1
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bonding/bond_main.c140
-rw-r--r--drivers/net/can/c_can/c_can.c130
-rw-r--r--drivers/net/can/c_can/c_can.h14
-rw-r--r--drivers/net/can/c_can/c_can_pci.c6
-rw-r--r--drivers/net/can/c_can/c_can_platform.c123
-rw-r--r--drivers/net/can/flexcan.c29
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c31
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c8
-rw-r--r--drivers/net/ethernet/Kconfig9
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1701
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c35
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c116
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c534
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c954
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c341
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c734
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h185
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h97
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c549
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.h52
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h11
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c274
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig2
-rw-r--r--drivers/net/ethernet/i825xx/znet.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h41
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c198
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c711
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c677
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c300
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c573
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c272
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mipsnet.c345
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig7
-rw-r--r--drivers/net/ethernet/sfc/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h22
-rw-r--r--drivers/net/ethernet/sfc/efx.c250
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c108
-rw-r--r--drivers/net/ethernet/sfc/filter.h7
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c49
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h29
-rw-r--r--drivers/net/ethernet/sfc/mtd.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h78
-rw-r--r--drivers/net/ethernet/sfc/nic.c6
-rw-r--r--drivers/net/ethernet/sfc/nic.h36
-rw-r--r--drivers/net/ethernet/sfc/ptp.c1484
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
-rw-r--r--drivers/net/ethernet/sfc/selftest.c3
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c8
-rw-r--r--drivers/net/ethernet/sfc/tx.c627
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c39
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c179
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c41
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc.c22
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c60
-rw-r--r--drivers/net/ieee802154/Kconfig (renamed from drivers/ieee802154/Kconfig)11
-rw-r--r--drivers/net/ieee802154/Makefile (renamed from drivers/ieee802154/Makefile)1
-rw-r--r--drivers/net/ieee802154/at86rf230.c (renamed from drivers/ieee802154/at86rf230.c)12
-rw-r--r--drivers/net/ieee802154/fakehard.c (renamed from drivers/ieee802154/fakehard.c)1
-rw-r--r--drivers/net/ieee802154/fakelb.c (renamed from drivers/ieee802154/fakelb.c)0
-rw-r--r--drivers/net/ieee802154/mrf24j40.c767
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/lxt.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c132
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c171
-rw-r--r--drivers/net/phy/phy.c74
-rw-r--r--drivers/net/ppp/ppp_generic.c58
-rw-r--r--drivers/net/team/Kconfig4
-rw-r--r--drivers/net/team/team.c342
-rw-r--r--drivers/net/team/team_mode_broadcast.c8
-rw-r--r--drivers/net/team/team_mode_roundrobin.c8
-rw-r--r--drivers/net/usb/asix_devices.c40
-rw-r--r--drivers/net/usb/catc.c55
-rw-r--r--drivers/net/usb/cx82310_eth.c11
-rw-r--r--drivers/net/usb/gl620a.c10
-rw-r--r--drivers/net/usb/kaweth.c134
-rw-r--r--drivers/net/usb/net1080.c51
-rw-r--r--drivers/net/usb/qmi_wwan.c47
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/sierra_net.c25
-rw-r--r--drivers/net/usb/smsc75xx.c240
-rw-r--r--drivers/net/usb/smsc95xx.c560
-rw-r--r--drivers/net/usb/smsc95xx.h12
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c1219
-rw-r--r--drivers/net/wimax/i2400m/driver.c3
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo.c7
-rw-r--r--drivers/net/wireless/at76c50x-usb.c58
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c45
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c117
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c288
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h95
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h1231
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c72
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c819
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c16
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c16
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h10
-rw-r--r--drivers/net/wireless/b43/main.c54
-rw-r--r--drivers/net/wireless/b43/phy_common.c17
-rw-r--r--drivers/net/wireless/b43/phy_common.h6
-rw-r--r--drivers/net/wireless/b43/phy_n.c668
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2057.c141
-rw-r--r--drivers/net/wireless/b43/radio_2057.h430
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c75
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h10
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c27
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c73
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c65
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1047
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c353
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c3135
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h296
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h5
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c11
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c12
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c26
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h8
-rw-r--r--drivers/net/wireless/iwlegacy/common.c19
-rw-r--r--drivers/net/wireless/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c56
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c167
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c91
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c112
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c51
-rw-r--r--drivers/net/wireless/libertas/cmd.c16
-rw-r--r--drivers/net/wireless/libertas/cmd.h1
-rw-r--r--drivers/net/wireless/libertas/main.c4
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c59
-rw-r--r--drivers/net/wireless/mwifiex/11n.c64
-rw-r--r--drivers/net/wireless/mwifiex/11n.h20
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c115
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h10
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c460
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h93
-rw-r--r--drivers/net/wireless/mwifiex/ie.c88
-rw-r--r--drivers/net/wireless/mwifiex/init.c126
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h14
-rw-r--r--drivers/net/wireless/mwifiex/main.c39
-rw-r--r--drivers/net/wireless/mwifiex/main.h87
-rw-r--r--drivers/net/wireless/mwifiex/scan.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c150
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c74
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c124
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c11
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c62
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c290
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c340
-rw-r--r--drivers/net/wireless/mwifiex/util.c40
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c77
-rw-r--r--drivers/net/wireless/mwl8k.c17
-rw-r--r--drivers/net/wireless/orinoco/wext.c7
-rw-r--r--drivers/net/wireless/p54/eeprom.c108
-rw-r--r--drivers/net/wireless/p54/eeprom.h12
-rw-r--r--drivers/net/wireless/p54/lmac.h4
-rw-r--r--drivers/net/wireless/p54/main.c15
-rw-r--r--drivers/net/wireless/p54/p54pci.c88
-rw-r--r--drivers/net/wireless/p54/p54pci.h1
-rw-r--r--drivers/net/wireless/p54/txrx.c15
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h18
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c397
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c62
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h28
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h34
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c17
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h121
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c79
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h7
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c129
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h7
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c21
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h5
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h16
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c32
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c12
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c372
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c112
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h23
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h13
-rw-r--r--drivers/net/wireless/wl3501_cs.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c9
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nfc/Kconfig14
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/nfcwilink.c20
-rw-r--r--drivers/nfc/pn533.c107
-rw-r--r--drivers/nfc/pn544.c893
-rw-r--r--drivers/nfc/pn544_hci.c177
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_ixp46x.c2
-rw-r--r--drivers/ptp/ptp_pch.c2
-rw-r--r--drivers/ptp/ptp_private.h1
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c78
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/scsi_netlink.c557
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/ssb/driver_mipscore.c28
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c4
-rw-r--r--drivers/staging/winbond/wbusb.c4
455 files changed, 25320 insertions, 11076 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index acb48fa4531c..03da5b663aef 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -123,7 +123,6 @@ obj-$(CONFIG_VHOST_NET) += vhost/
123obj-$(CONFIG_VLYNQ) += vlynq/ 123obj-$(CONFIG_VLYNQ) += vlynq/
124obj-$(CONFIG_STAGING) += staging/ 124obj-$(CONFIG_STAGING) += staging/
125obj-y += platform/ 125obj-y += platform/
126obj-y += ieee802154/
127#common clk code 126#common clk code
128obj-y += clk/ 127obj-y += clk/
129 128
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 06b3207adebd..a533af218368 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
48 48
49config BCMA_SFLASH 49config BCMA_SFLASH
50 bool 50 bool
51 depends on BCMA_DRIVER_MIPS && BROKEN 51 depends on BCMA_DRIVER_MIPS
52 default y 52 default y
53 53
54config BCMA_NFLASH 54config BCMA_NFLASH
55 bool 55 bool
56 depends on BCMA_DRIVER_MIPS && BROKEN 56 depends on BCMA_DRIVER_MIPS
57 default y 57 default y
58 58
59config BCMA_DRIVER_GMAC_CMN 59config BCMA_DRIVER_GMAC_CMN
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 3cf9cc923cd2..169fc58427d3 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
54#ifdef CONFIG_BCMA_SFLASH 54#ifdef CONFIG_BCMA_SFLASH
55/* driver_chipcommon_sflash.c */ 55/* driver_chipcommon_sflash.c */
56int bcma_sflash_init(struct bcma_drv_cc *cc); 56int bcma_sflash_init(struct bcma_drv_cc *cc);
57extern struct platform_device bcma_sflash_dev;
57#else 58#else
58static inline int bcma_sflash_init(struct bcma_drv_cc *cc) 59static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
59{ 60{
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
65#ifdef CONFIG_BCMA_NFLASH 66#ifdef CONFIG_BCMA_NFLASH
66/* driver_chipcommon_nflash.c */ 67/* driver_chipcommon_nflash.c */
67int bcma_nflash_init(struct bcma_drv_cc *cc); 68int bcma_nflash_init(struct bcma_drv_cc *cc);
69extern struct platform_device bcma_nflash_dev;
68#else 70#else
69static inline int bcma_nflash_init(struct bcma_drv_cc *cc) 71static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
70{ 72{
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 63c8b470536f..03bbe104338f 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -65,7 +65,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
65 switch (clkmode) { 65 switch (clkmode) {
66 case BCMA_CLKMODE_FAST: 66 case BCMA_CLKMODE_FAST:
67 bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 67 bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
68 udelay(64); 68 usleep_range(64, 300);
69 for (i = 0; i < 1500; i++) { 69 for (i = 0; i < 1500; i++) {
70 if (bcma_read32(core, BCMA_CLKCTLST) & 70 if (bcma_read32(core, BCMA_CLKCTLST) &
71 BCMA_CLKCTLST_HAVEHT) { 71 BCMA_CLKCTLST_HAVEHT) {
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 574d62435bc2..9042781edec3 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -5,15 +5,37 @@
5 * Licensed under the GNU/GPL. See COPYING for details. 5 * Licensed under the GNU/GPL. See COPYING for details.
6 */ 6 */
7 7
8#include <linux/platform_device.h>
8#include <linux/bcma/bcma.h> 9#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11 10
12#include "bcma_private.h" 11#include "bcma_private.h"
13 12
13struct platform_device bcma_nflash_dev = {
14 .name = "bcma_nflash",
15 .num_resources = 0,
16};
17
14/* Initialize NAND flash access */ 18/* Initialize NAND flash access */
15int bcma_nflash_init(struct bcma_drv_cc *cc) 19int bcma_nflash_init(struct bcma_drv_cc *cc)
16{ 20{
17 bcma_err(cc->core->bus, "NAND flash support is broken\n"); 21 struct bcma_bus *bus = cc->core->bus;
22
23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
24 cc->core->id.rev != 0x38) {
25 bcma_err(bus, "NAND flash on unsupported board!\n");
26 return -ENOTSUPP;
27 }
28
29 if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
30 bcma_err(bus, "NAND flash not present according to ChipCommon\n");
31 return -ENODEV;
32 }
33
34 cc->nflash.present = true;
35
36 /* Prepare platform device, but don't register it yet. It's too early,
37 * malloc (required by device_private_init) is not available yet. */
38 bcma_nflash_dev.dev.platform_data = &cc->nflash;
39
18 return 0; 40 return 0;
19} 41}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index c9a4f46c5143..201faf106b3f 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -76,7 +76,10 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
76 if (max_msk) 76 if (max_msk)
77 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); 77 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
78 78
79 /* Add some delay; allow resources to come up and settle. */ 79 /*
80 * Add some delay; allow resources to come up and settle.
81 * Delay is required for SoC (early init).
82 */
80 mdelay(2); 83 mdelay(2);
81} 84}
82 85
@@ -101,7 +104,7 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
101 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val); 104 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
102} 105}
103 106
104void bcma_pmu_workarounds(struct bcma_drv_cc *cc) 107static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
105{ 108{
106 struct bcma_bus *bus = cc->core->bus; 109 struct bcma_bus *bus = cc->core->bus;
107 110
@@ -257,7 +260,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
257} 260}
258 261
259/* query bus clock frequency for PMU-enabled chipcommon */ 262/* query bus clock frequency for PMU-enabled chipcommon */
260u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc) 263static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
261{ 264{
262 struct bcma_bus *bus = cc->core->bus; 265 struct bcma_bus *bus = cc->core->bus;
263 266
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 6e157a58a1d7..2c4eec2ca5a0 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -5,15 +5,132 @@
5 * Licensed under the GNU/GPL. See COPYING for details. 5 * Licensed under the GNU/GPL. See COPYING for details.
6 */ 6 */
7 7
8#include <linux/platform_device.h>
8#include <linux/bcma/bcma.h> 9#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11 10
12#include "bcma_private.h" 11#include "bcma_private.h"
13 12
13static struct resource bcma_sflash_resource = {
14 .name = "bcma_sflash",
15 .start = BCMA_SFLASH,
16 .end = 0,
17 .flags = IORESOURCE_MEM | IORESOURCE_READONLY,
18};
19
20struct platform_device bcma_sflash_dev = {
21 .name = "bcma_sflash",
22 .resource = &bcma_sflash_resource,
23 .num_resources = 1,
24};
25
26struct bcma_sflash_tbl_e {
27 char *name;
28 u32 id;
29 u32 blocksize;
30 u16 numblocks;
31};
32
33static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
34 { "", 0x14, 0x10000, 32, },
35 { 0 },
36};
37
38static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
39 { 0 },
40};
41
42static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
43 { 0 },
44};
45
46static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
47{
48 int i;
49 bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
50 BCMA_CC_FLASHCTL_START | opcode);
51 for (i = 0; i < 1000; i++) {
52 if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
53 BCMA_CC_FLASHCTL_BUSY))
54 return;
55 cpu_relax();
56 }
57 bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
58}
59
14/* Initialize serial flash access */ 60/* Initialize serial flash access */
15int bcma_sflash_init(struct bcma_drv_cc *cc) 61int bcma_sflash_init(struct bcma_drv_cc *cc)
16{ 62{
17 bcma_err(cc->core->bus, "Serial flash support is broken\n"); 63 struct bcma_bus *bus = cc->core->bus;
64 struct bcma_sflash *sflash = &cc->sflash;
65 struct bcma_sflash_tbl_e *e;
66 u32 id, id2;
67
68 switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
69 case BCMA_CC_FLASHT_STSER:
70 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
71
72 bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
73 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
74 id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
75
76 bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
77 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
78 id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
79
80 switch (id) {
81 case 0xbf:
82 for (e = bcma_sflash_sst_tbl; e->name; e++) {
83 if (e->id == id2)
84 break;
85 }
86 break;
87 default:
88 for (e = bcma_sflash_st_tbl; e->name; e++) {
89 if (e->id == id)
90 break;
91 }
92 break;
93 }
94 if (!e->name) {
95 bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
96 return -ENOTSUPP;
97 }
98
99 break;
100 case BCMA_CC_FLASHT_ATSER:
101 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
102 id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
103
104 for (e = bcma_sflash_at_tbl; e->name; e++) {
105 if (e->id == id)
106 break;
107 }
108 if (!e->name) {
109 bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
110 return -ENOTSUPP;
111 }
112
113 break;
114 default:
115 bcma_err(bus, "Unsupported flash type\n");
116 return -ENOTSUPP;
117 }
118
119 sflash->window = BCMA_SFLASH;
120 sflash->blocksize = e->blocksize;
121 sflash->numblocks = e->numblocks;
122 sflash->size = sflash->blocksize * sflash->numblocks;
123 sflash->present = true;
124
125 bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
126 e->name, sflash->size / 1024, sflash->blocksize,
127 sflash->numblocks);
128
129 /* Prepare platform device, but don't register it yet. It's too early,
130 * malloc (required by device_private_init) is not available yet. */
131 bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
132 sflash->size;
133 bcma_sflash_dev.dev.platform_data = sflash;
134
18 return 0; 135 return 0;
19} 136}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c32ebd537abe..c39ee6d45850 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -51,7 +51,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
51 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); 51 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
52 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) 52 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
53 break; 53 break;
54 msleep(1); 54 usleep_range(1000, 2000);
55 } 55 }
56} 56}
57 57
@@ -92,7 +92,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
92 ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA); 92 ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
93 break; 93 break;
94 } 94 }
95 msleep(1); 95 usleep_range(1000, 2000);
96 } 96 }
97 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); 97 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
98 return ret; 98 return ret;
@@ -132,7 +132,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
132 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); 132 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
133 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) 133 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
134 break; 134 break;
135 msleep(1); 135 usleep_range(1000, 2000);
136 } 136 }
137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); 137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
138} 138}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index cbae2c231336..9baf886e82df 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -425,9 +425,9 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
425 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; 425 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
426 426
427 /* Reset RC */ 427 /* Reset RC */
428 udelay(3000); 428 usleep_range(3000, 5000);
429 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE); 429 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
430 udelay(1000); 430 usleep_range(1000, 2000);
431 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST | 431 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
432 BCMA_CORE_PCI_CTL_RST_OE); 432 BCMA_CORE_PCI_CTL_RST_OE);
433 433
@@ -481,7 +481,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
481 * before issuing configuration requests to PCI Express 481 * before issuing configuration requests to PCI Express
482 * devices. 482 * devices.
483 */ 483 */
484 udelay(100000); 484 msleep(100);
485 485
486 bcma_core_pci_enable_crs(pc); 486 bcma_core_pci_enable_crs(pc);
487 487
@@ -501,7 +501,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
501 set_io_port_base(pc_host->pci_controller.io_map_base); 501 set_io_port_base(pc_host->pci_controller.io_map_base);
502 /* Give some time to the PCI controller to configure itself with the new 502 /* Give some time to the PCI controller to configure itself with the new
503 * values. Not waiting at this point causes crashes of the machine. */ 503 * values. Not waiting at this point causes crashes of the machine. */
504 mdelay(10); 504 usleep_range(10000, 15000);
505 register_pci_controller(&pc_host->pci_controller); 505 register_pci_controller(&pc_host->pci_controller);
506 return; 506 return;
507} 507}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a6e5672c67e7..b6b4b5ebd4c2 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -77,8 +77,8 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
77} 77}
78 78
79#ifdef CONFIG_BCMA_BLOCKIO 79#ifdef CONFIG_BCMA_BLOCKIO
80void bcma_host_pci_block_read(struct bcma_device *core, void *buffer, 80static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
81 size_t count, u16 offset, u8 reg_width) 81 size_t count, u16 offset, u8 reg_width)
82{ 82{
83 void __iomem *addr = core->bus->mmio + offset; 83 void __iomem *addr = core->bus->mmio + offset;
84 if (core->bus->mapped_core != core) 84 if (core->bus->mapped_core != core)
@@ -100,8 +100,9 @@ void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
100 } 100 }
101} 101}
102 102
103void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer, 103static void bcma_host_pci_block_write(struct bcma_device *core,
104 size_t count, u16 offset, u8 reg_width) 104 const void *buffer, size_t count,
105 u16 offset, u8 reg_width)
105{ 106{
106 void __iomem *addr = core->bus->mmio + offset; 107 void __iomem *addr = core->bus->mmio + offset;
107 if (core->bus->mapped_core != core) 108 if (core->bus->mapped_core != core)
@@ -139,7 +140,7 @@ static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
139 iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); 140 iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
140} 141}
141 142
142const struct bcma_host_ops bcma_host_pci_ops = { 143static const struct bcma_host_ops bcma_host_pci_ops = {
143 .read8 = bcma_host_pci_read8, 144 .read8 = bcma_host_pci_read8,
144 .read16 = bcma_host_pci_read16, 145 .read16 = bcma_host_pci_read16,
145 .read32 = bcma_host_pci_read32, 146 .read32 = bcma_host_pci_read32,
@@ -272,6 +273,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, 277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
277 { 0, }, 279 { 0, },
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 3c381fb8f9c4..3475e600011a 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -143,7 +143,7 @@ static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
143 writel(value, core->io_wrap + offset); 143 writel(value, core->io_wrap + offset);
144} 144}
145 145
146const struct bcma_host_ops bcma_host_soc_ops = { 146static const struct bcma_host_ops bcma_host_soc_ops = {
147 .read8 = bcma_host_soc_read8, 147 .read8 = bcma_host_soc_read8,
148 .read16 = bcma_host_soc_read16, 148 .read16 = bcma_host_soc_read16,
149 .read32 = bcma_host_soc_read32, 149 .read32 = bcma_host_soc_read32,
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 758af9ccdef0..432aeeedfd5e 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,6 +7,7 @@
7 7
8#include "bcma_private.h" 8#include "bcma_private.h"
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/platform_device.h>
10#include <linux/bcma/bcma.h> 11#include <linux/bcma/bcma.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12 13
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
136 dev_id++; 137 dev_id++;
137 } 138 }
138 139
140#ifdef CONFIG_BCMA_SFLASH
141 if (bus->drv_cc.sflash.present) {
142 err = platform_device_register(&bcma_sflash_dev);
143 if (err)
144 bcma_err(bus, "Error registering serial flash\n");
145 }
146#endif
147
148#ifdef CONFIG_BCMA_NFLASH
149 if (bus->drv_cc.nflash.present) {
150 err = platform_device_register(&bcma_nflash_dev);
151 if (err)
152 bcma_err(bus, "Error registering NAND flash\n");
153 }
154#endif
155
139 return 0; 156 return 0;
140} 157}
141 158
@@ -210,7 +227,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
210 227
211void bcma_bus_unregister(struct bcma_bus *bus) 228void bcma_bus_unregister(struct bcma_bus *bus)
212{ 229{
230 struct bcma_device *cores[3];
231
232 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
233 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
234 cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
235
213 bcma_unregister_cores(bus); 236 bcma_unregister_cores(bus);
237
238 kfree(cores[2]);
239 kfree(cores[1]);
240 kfree(cores[0]);
214} 241}
215 242
216int __init bcma_bus_early_register(struct bcma_bus *bus, 243int __init bcma_bus_early_register(struct bcma_bus *bus,
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 9ea4627dc0c2..0d546b64be34 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
507 /* for these chips OTP is always available */ 507 /* for these chips OTP is always available */
508 present = true; 508 present = true;
509 break; 509 break;
510 case BCMA_CHIP_ID_BCM43227:
510 case BCMA_CHIP_ID_BCM43228: 511 case BCMA_CHIP_ID_BCM43228:
512 case BCMA_CHIP_ID_BCM43428:
511 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT; 513 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
512 break; 514 break;
513 default: 515 default:
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 37ae175162f3..364f82b34d03 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -177,7 +177,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
177 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 177 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
178 return -ENODEV; 178 return -ENODEV;
179 179
180 data = kzalloc(sizeof(*data), GFP_KERNEL); 180 data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
181 if (!data) { 181 if (!data) {
182 BT_ERR("Can't allocate memory for data structure"); 182 BT_ERR("Can't allocate memory for data structure");
183 return -ENOMEM; 183 return -ENOMEM;
@@ -189,14 +189,12 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
189 data->urb = usb_alloc_urb(0, GFP_KERNEL); 189 data->urb = usb_alloc_urb(0, GFP_KERNEL);
190 if (!data->urb) { 190 if (!data->urb) {
191 BT_ERR("Can't allocate URB"); 191 BT_ERR("Can't allocate URB");
192 kfree(data);
193 return -ENOMEM; 192 return -ENOMEM;
194 } 193 }
195 194
196 if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) { 195 if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
197 BT_ERR("Mini driver request failed"); 196 BT_ERR("Mini driver request failed");
198 usb_free_urb(data->urb); 197 usb_free_urb(data->urb);
199 kfree(data);
200 return -EIO; 198 return -EIO;
201 } 199 }
202 200
@@ -209,7 +207,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
209 BT_ERR("Can't allocate memory for mini driver"); 207 BT_ERR("Can't allocate memory for mini driver");
210 release_firmware(firmware); 208 release_firmware(firmware);
211 usb_free_urb(data->urb); 209 usb_free_urb(data->urb);
212 kfree(data);
213 return -ENOMEM; 210 return -ENOMEM;
214 } 211 }
215 212
@@ -224,7 +221,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
224 BT_ERR("Firmware request failed"); 221 BT_ERR("Firmware request failed");
225 usb_free_urb(data->urb); 222 usb_free_urb(data->urb);
226 kfree(data->buffer); 223 kfree(data->buffer);
227 kfree(data);
228 return -EIO; 224 return -EIO;
229 } 225 }
230 226
@@ -236,7 +232,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
236 release_firmware(firmware); 232 release_firmware(firmware);
237 usb_free_urb(data->urb); 233 usb_free_urb(data->urb);
238 kfree(data->buffer); 234 kfree(data->buffer);
239 kfree(data);
240 return -ENOMEM; 235 return -ENOMEM;
241 } 236 }
242 237
@@ -271,7 +266,6 @@ static void bcm203x_disconnect(struct usb_interface *intf)
271 usb_free_urb(data->urb); 266 usb_free_urb(data->urb);
272 kfree(data->fw_data); 267 kfree(data->fw_data);
273 kfree(data->buffer); 268 kfree(data->buffer);
274 kfree(data);
275} 269}
276 270
277static struct usb_driver bcm203x_driver = { 271static struct usb_driver bcm203x_driver = {
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 32e825144fe9..995aee9cba22 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -653,7 +653,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
653 } 653 }
654 654
655 /* Initialize control structure and load firmware */ 655 /* Initialize control structure and load firmware */
656 data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL); 656 data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
657 if (!data) { 657 if (!data) {
658 BT_ERR("Can't allocate memory for control structure"); 658 BT_ERR("Can't allocate memory for control structure");
659 goto done; 659 goto done;
@@ -674,7 +674,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
674 674
675 if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) { 675 if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
676 BT_ERR("Firmware request failed"); 676 BT_ERR("Firmware request failed");
677 goto error; 677 goto done;
678 } 678 }
679 679
680 BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); 680 BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -690,7 +690,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
690 hdev = hci_alloc_dev(); 690 hdev = hci_alloc_dev();
691 if (!hdev) { 691 if (!hdev) {
692 BT_ERR("Can't allocate HCI device"); 692 BT_ERR("Can't allocate HCI device");
693 goto error; 693 goto done;
694 } 694 }
695 695
696 data->hdev = hdev; 696 data->hdev = hdev;
@@ -708,7 +708,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
708 if (hci_register_dev(hdev) < 0) { 708 if (hci_register_dev(hdev) < 0) {
709 BT_ERR("Can't register HCI device"); 709 BT_ERR("Can't register HCI device");
710 hci_free_dev(hdev); 710 hci_free_dev(hdev);
711 goto error; 711 goto done;
712 } 712 }
713 713
714 usb_set_intfdata(intf, data); 714 usb_set_intfdata(intf, data);
@@ -718,9 +718,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
718release: 718release:
719 release_firmware(firmware); 719 release_firmware(firmware);
720 720
721error:
722 kfree(data);
723
724done: 721done:
725 return -EIO; 722 return -EIO;
726} 723}
@@ -741,7 +738,6 @@ static void bfusb_disconnect(struct usb_interface *intf)
741 738
742 hci_unregister_dev(hdev); 739 hci_unregister_dev(hdev);
743 hci_free_dev(hdev); 740 hci_free_dev(hdev);
744 kfree(data);
745} 741}
746 742
747static struct usb_driver bfusb_driver = { 743static struct usb_driver bfusb_driver = {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 66c3a6770c41..0d26851d6e49 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -681,7 +681,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
681 case HCI_SCODATA_PKT: 681 case HCI_SCODATA_PKT:
682 hdev->stat.sco_tx++; 682 hdev->stat.sco_tx++;
683 break; 683 break;
684 }; 684 }
685 685
686 /* Prepend skb with frame type */ 686 /* Prepend skb with frame type */
687 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 687 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -849,7 +849,7 @@ static int bluecard_probe(struct pcmcia_device *link)
849 bluecard_info_t *info; 849 bluecard_info_t *info;
850 850
851 /* Create new info device */ 851 /* Create new info device */
852 info = kzalloc(sizeof(*info), GFP_KERNEL); 852 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
853 if (!info) 853 if (!info)
854 return -ENOMEM; 854 return -ENOMEM;
855 855
@@ -864,10 +864,7 @@ static int bluecard_probe(struct pcmcia_device *link)
864 864
865static void bluecard_detach(struct pcmcia_device *link) 865static void bluecard_detach(struct pcmcia_device *link)
866{ 866{
867 bluecard_info_t *info = link->priv;
868
869 bluecard_release(link); 867 bluecard_release(link);
870 kfree(info);
871} 868}
872 869
873 870
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 29caaed2d715..2fe4a8031348 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -443,7 +443,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
443 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 443 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
444 return -ENODEV; 444 return -ENODEV;
445 445
446 data = kzalloc(sizeof(*data), GFP_KERNEL); 446 data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
447 if (!data) 447 if (!data)
448 return -ENOMEM; 448 return -ENOMEM;
449 449
@@ -453,10 +453,8 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
453 init_usb_anchor(&data->rx_anchor); 453 init_usb_anchor(&data->rx_anchor);
454 454
455 hdev = hci_alloc_dev(); 455 hdev = hci_alloc_dev();
456 if (!hdev) { 456 if (!hdev)
457 kfree(data);
458 return -ENOMEM; 457 return -ENOMEM;
459 }
460 458
461 hdev->bus = HCI_USB; 459 hdev->bus = HCI_USB;
462 hci_set_drvdata(hdev, data); 460 hci_set_drvdata(hdev, data);
@@ -475,7 +473,6 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
475 err = hci_register_dev(hdev); 473 err = hci_register_dev(hdev);
476 if (err < 0) { 474 if (err < 0) {
477 hci_free_dev(hdev); 475 hci_free_dev(hdev);
478 kfree(data);
479 return err; 476 return err;
480 } 477 }
481 478
@@ -500,7 +497,6 @@ static void bpa10x_disconnect(struct usb_interface *intf)
500 hci_free_dev(data->hdev); 497 hci_free_dev(data->hdev);
501 kfree_skb(data->rx_skb[0]); 498 kfree_skb(data->rx_skb[0]);
502 kfree_skb(data->rx_skb[1]); 499 kfree_skb(data->rx_skb[1]);
503 kfree(data);
504} 500}
505 501
506static struct usb_driver bpa10x_driver = { 502static struct usb_driver bpa10x_driver = {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 8925b6d672a6..7ffd3f407144 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -638,7 +638,7 @@ static int bt3c_probe(struct pcmcia_device *link)
638 bt3c_info_t *info; 638 bt3c_info_t *info;
639 639
640 /* Create new info device */ 640 /* Create new info device */
641 info = kzalloc(sizeof(*info), GFP_KERNEL); 641 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
642 if (!info) 642 if (!info)
643 return -ENOMEM; 643 return -ENOMEM;
644 644
@@ -654,10 +654,7 @@ static int bt3c_probe(struct pcmcia_device *link)
654 654
655static void bt3c_detach(struct pcmcia_device *link) 655static void bt3c_detach(struct pcmcia_device *link)
656{ 656{
657 bt3c_info_t *info = link->priv;
658
659 bt3c_release(link); 657 bt3c_release(link);
660 kfree(info);
661} 658}
662 659
663static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data) 660static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 6a9e9717d3ab..3f4bfc814dc7 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -600,8 +600,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
600exit: 600exit:
601 if (ret) { 601 if (ret) {
602 hdev->stat.err_rx++; 602 hdev->stat.err_rx++;
603 if (skb) 603 kfree_skb(skb);
604 kfree_skb(skb);
605 } 604 }
606 605
607 return ret; 606 return ret;
@@ -956,11 +955,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
956 BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", 955 BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
957 id->vendor, id->device, id->class, func->num); 956 id->vendor, id->device, id->class, func->num);
958 957
959 card = kzalloc(sizeof(*card), GFP_KERNEL); 958 card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
960 if (!card) { 959 if (!card)
961 ret = -ENOMEM; 960 return -ENOMEM;
962 goto done;
963 }
964 961
965 card->func = func; 962 card->func = func;
966 963
@@ -974,8 +971,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
974 971
975 if (btmrvl_sdio_register_dev(card) < 0) { 972 if (btmrvl_sdio_register_dev(card) < 0) {
976 BT_ERR("Failed to register BT device!"); 973 BT_ERR("Failed to register BT device!");
977 ret = -ENODEV; 974 return -ENODEV;
978 goto free_card;
979 } 975 }
980 976
981 /* Disable the interrupts on the card */ 977 /* Disable the interrupts on the card */
@@ -1023,9 +1019,6 @@ disable_host_int:
1023 btmrvl_sdio_disable_host_int(card); 1019 btmrvl_sdio_disable_host_int(card);
1024unreg_dev: 1020unreg_dev:
1025 btmrvl_sdio_unregister_dev(card); 1021 btmrvl_sdio_unregister_dev(card);
1026free_card:
1027 kfree(card);
1028done:
1029 return ret; 1022 return ret;
1030} 1023}
1031 1024
@@ -1047,7 +1040,6 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
1047 BT_DBG("unregester dev"); 1040 BT_DBG("unregester dev");
1048 btmrvl_sdio_unregister_dev(card); 1041 btmrvl_sdio_unregister_dev(card);
1049 btmrvl_remove_card(card->priv); 1042 btmrvl_remove_card(card->priv);
1050 kfree(card);
1051 } 1043 }
1052 } 1044 }
1053} 1045}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index e10ea0347051..4a9909713874 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -304,7 +304,7 @@ static int btsdio_probe(struct sdio_func *func,
304 tuple = tuple->next; 304 tuple = tuple->next;
305 } 305 }
306 306
307 data = kzalloc(sizeof(*data), GFP_KERNEL); 307 data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
308 if (!data) 308 if (!data)
309 return -ENOMEM; 309 return -ENOMEM;
310 310
@@ -315,10 +315,8 @@ static int btsdio_probe(struct sdio_func *func,
315 skb_queue_head_init(&data->txq); 315 skb_queue_head_init(&data->txq);
316 316
317 hdev = hci_alloc_dev(); 317 hdev = hci_alloc_dev();
318 if (!hdev) { 318 if (!hdev)
319 kfree(data);
320 return -ENOMEM; 319 return -ENOMEM;
321 }
322 320
323 hdev->bus = HCI_SDIO; 321 hdev->bus = HCI_SDIO;
324 hci_set_drvdata(hdev, data); 322 hci_set_drvdata(hdev, data);
@@ -340,7 +338,6 @@ static int btsdio_probe(struct sdio_func *func,
340 err = hci_register_dev(hdev); 338 err = hci_register_dev(hdev);
341 if (err < 0) { 339 if (err < 0) {
342 hci_free_dev(hdev); 340 hci_free_dev(hdev);
343 kfree(data);
344 return err; 341 return err;
345 } 342 }
346 343
@@ -366,7 +363,6 @@ static void btsdio_remove(struct sdio_func *func)
366 hci_unregister_dev(hdev); 363 hci_unregister_dev(hdev);
367 364
368 hci_free_dev(hdev); 365 hci_free_dev(hdev);
369 kfree(data);
370} 366}
371 367
372static struct sdio_driver btsdio_driver = { 368static struct sdio_driver btsdio_driver = {
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 21e803a6a281..35a553a90616 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -446,7 +446,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
446 case HCI_SCODATA_PKT: 446 case HCI_SCODATA_PKT:
447 hdev->stat.sco_tx++; 447 hdev->stat.sco_tx++;
448 break; 448 break;
449 }; 449 }
450 450
451 /* Prepend skb with frame type */ 451 /* Prepend skb with frame type */
452 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 452 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -567,7 +567,7 @@ static int btuart_probe(struct pcmcia_device *link)
567 btuart_info_t *info; 567 btuart_info_t *info;
568 568
569 /* Create new info device */ 569 /* Create new info device */
570 info = kzalloc(sizeof(*info), GFP_KERNEL); 570 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
571 if (!info) 571 if (!info)
572 return -ENOMEM; 572 return -ENOMEM;
573 573
@@ -583,10 +583,7 @@ static int btuart_probe(struct pcmcia_device *link)
583 583
584static void btuart_detach(struct pcmcia_device *link) 584static void btuart_detach(struct pcmcia_device *link)
585{ 585{
586 btuart_info_t *info = link->priv;
587
588 btuart_release(link); 586 btuart_release(link);
589 kfree(info);
590} 587}
591 588
592static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data) 589static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 654e248763ef..debda27df9b0 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -96,11 +96,12 @@ static struct usb_device_id btusb_table[] = {
96 { USB_DEVICE(0x0c10, 0x0000) }, 96 { USB_DEVICE(0x0c10, 0x0000) },
97 97
98 /* Broadcom BCM20702A0 */ 98 /* Broadcom BCM20702A0 */
99 { USB_DEVICE(0x04ca, 0x2003) },
99 { USB_DEVICE(0x0489, 0xe042) }, 100 { USB_DEVICE(0x0489, 0xe042) },
100 { USB_DEVICE(0x413c, 0x8197) }, 101 { USB_DEVICE(0x413c, 0x8197) },
101 102
102 /* Foxconn - Hon Hai */ 103 /* Foxconn - Hon Hai */
103 { USB_DEVICE(0x0489, 0xe033) }, 104 { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
104 105
105 /*Broadcom devices with vendor specific id */ 106 /*Broadcom devices with vendor specific id */
106 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 107 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
@@ -956,7 +957,7 @@ static int btusb_probe(struct usb_interface *intf,
956 return -ENODEV; 957 return -ENODEV;
957 } 958 }
958 959
959 data = kzalloc(sizeof(*data), GFP_KERNEL); 960 data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
960 if (!data) 961 if (!data)
961 return -ENOMEM; 962 return -ENOMEM;
962 963
@@ -979,10 +980,8 @@ static int btusb_probe(struct usb_interface *intf,
979 } 980 }
980 } 981 }
981 982
982 if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { 983 if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
983 kfree(data);
984 return -ENODEV; 984 return -ENODEV;
985 }
986 985
987 data->cmdreq_type = USB_TYPE_CLASS; 986 data->cmdreq_type = USB_TYPE_CLASS;
988 987
@@ -1002,10 +1001,8 @@ static int btusb_probe(struct usb_interface *intf,
1002 init_usb_anchor(&data->deferred); 1001 init_usb_anchor(&data->deferred);
1003 1002
1004 hdev = hci_alloc_dev(); 1003 hdev = hci_alloc_dev();
1005 if (!hdev) { 1004 if (!hdev)
1006 kfree(data);
1007 return -ENOMEM; 1005 return -ENOMEM;
1008 }
1009 1006
1010 hdev->bus = HCI_USB; 1007 hdev->bus = HCI_USB;
1011 hci_set_drvdata(hdev, data); 1008 hci_set_drvdata(hdev, data);
@@ -1073,7 +1070,6 @@ static int btusb_probe(struct usb_interface *intf,
1073 data->isoc, data); 1070 data->isoc, data);
1074 if (err < 0) { 1071 if (err < 0) {
1075 hci_free_dev(hdev); 1072 hci_free_dev(hdev);
1076 kfree(data);
1077 return err; 1073 return err;
1078 } 1074 }
1079 } 1075 }
@@ -1081,7 +1077,6 @@ static int btusb_probe(struct usb_interface *intf,
1081 err = hci_register_dev(hdev); 1077 err = hci_register_dev(hdev);
1082 if (err < 0) { 1078 if (err < 0) {
1083 hci_free_dev(hdev); 1079 hci_free_dev(hdev);
1084 kfree(data);
1085 return err; 1080 return err;
1086 } 1081 }
1087 1082
@@ -1114,7 +1109,6 @@ static void btusb_disconnect(struct usb_interface *intf)
1114 usb_driver_release_interface(&btusb_driver, data->isoc); 1109 usb_driver_release_interface(&btusb_driver, data->isoc);
1115 1110
1116 hci_free_dev(hdev); 1111 hci_free_dev(hdev);
1117 kfree(data);
1118} 1112}
1119 1113
1120#ifdef CONFIG_PM 1114#ifdef CONFIG_PM
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 88694697f34f..60abf596f60e 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -297,16 +297,14 @@ static int bt_ti_probe(struct platform_device *pdev)
297 struct hci_dev *hdev; 297 struct hci_dev *hdev;
298 int err; 298 int err;
299 299
300 hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL); 300 hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
301 if (!hst) 301 if (!hst)
302 return -ENOMEM; 302 return -ENOMEM;
303 303
304 /* Expose "hciX" device to user space */ 304 /* Expose "hciX" device to user space */
305 hdev = hci_alloc_dev(); 305 hdev = hci_alloc_dev();
306 if (!hdev) { 306 if (!hdev)
307 kfree(hst);
308 return -ENOMEM; 307 return -ENOMEM;
309 }
310 308
311 BT_DBG("hdev %p", hdev); 309 BT_DBG("hdev %p", hdev);
312 310
@@ -321,7 +319,6 @@ static int bt_ti_probe(struct platform_device *pdev)
321 err = hci_register_dev(hdev); 319 err = hci_register_dev(hdev);
322 if (err < 0) { 320 if (err < 0) {
323 BT_ERR("Can't register HCI device error %d", err); 321 BT_ERR("Can't register HCI device error %d", err);
324 kfree(hst);
325 hci_free_dev(hdev); 322 hci_free_dev(hdev);
326 return err; 323 return err;
327 } 324 }
@@ -347,7 +344,6 @@ static int bt_ti_remove(struct platform_device *pdev)
347 hci_unregister_dev(hdev); 344 hci_unregister_dev(hdev);
348 345
349 hci_free_dev(hdev); 346 hci_free_dev(hdev);
350 kfree(hst);
351 347
352 dev_set_drvdata(&pdev->dev, NULL); 348 dev_set_drvdata(&pdev->dev, NULL);
353 return 0; 349 return 0;
@@ -362,21 +358,7 @@ static struct platform_driver btwilink_driver = {
362 }, 358 },
363}; 359};
364 360
365/* ------- Module Init/Exit interfaces ------ */ 361module_platform_driver(btwilink_driver);
366static int __init btwilink_init(void)
367{
368 BT_INFO("Bluetooth Driver for TI WiLink - Version %s", VERSION);
369
370 return platform_driver_register(&btwilink_driver);
371}
372
373static void __exit btwilink_exit(void)
374{
375 platform_driver_unregister(&btwilink_driver);
376}
377
378module_init(btwilink_init);
379module_exit(btwilink_exit);
380 362
381/* ------ Module Info ------ */ 363/* ------ Module Info ------ */
382 364
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 97a7784db4a2..036cb366fe6e 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -550,7 +550,7 @@ static int dtl1_probe(struct pcmcia_device *link)
550 dtl1_info_t *info; 550 dtl1_info_t *info;
551 551
552 /* Create new info device */ 552 /* Create new info device */
553 info = kzalloc(sizeof(*info), GFP_KERNEL); 553 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
554 if (!info) 554 if (!info)
555 return -ENOMEM; 555 return -ENOMEM;
556 556
@@ -569,7 +569,6 @@ static void dtl1_detach(struct pcmcia_device *link)
569 569
570 dtl1_close(info); 570 dtl1_close(info);
571 pcmcia_disable_device(link); 571 pcmcia_disable_device(link);
572 kfree(info);
573} 572}
574 573
575static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) 574static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 74e0966b3ead..c8abce3d2d9c 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -531,7 +531,7 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
531 default: 531 default:
532 err = n_tty_ioctl_helper(tty, file, cmd, arg); 532 err = n_tty_ioctl_helper(tty, file, cmd, arg);
533 break; 533 break;
534 }; 534 }
535 535
536 return err; 536 return err;
537} 537}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index ff6d589c34a5..cfc767938589 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -481,7 +481,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
481 hu->hdev->stat.err_rx++; 481 hu->hdev->stat.err_rx++;
482 ptr++; count--; 482 ptr++; count--;
483 continue; 483 continue;
484 }; 484 }
485 485
486 ptr++; count--; 486 ptr++; count--;
487 487
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 3f72595a6017..d8b7aed6e4a9 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -156,7 +156,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
156 case HCI_SCODATA_PKT: 156 case HCI_SCODATA_PKT:
157 data->hdev->stat.sco_tx++; 157 data->hdev->stat.sco_tx++;
158 break; 158 break;
159 }; 159 }
160 160
161 return total; 161 return total;
162} 162}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 82fa4f0f91d6..965b7811e04f 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -264,8 +264,7 @@ static int __devinit cn_init(void)
264 .input = dev->input, 264 .input = dev->input,
265 }; 265 };
266 266
267 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 267 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
268 THIS_MODULE, &cfg);
269 if (!dev->nls) 268 if (!dev->nls)
270 return -EIO; 269 return -EIO;
271 270
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 3ae2bfd31015..fe10a949aef9 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -177,7 +177,7 @@ int __init ibnl_init(void)
177 .input = ibnl_rcv, 177 .input = ibnl_rcv,
178 }; 178 };
179 179
180 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg); 180 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
181 if (!nls) { 181 if (!nls) {
182 pr_warn("Failed to create netlink socket\n"); 182 pr_warn("Failed to create netlink socket\n");
183 return -ENOMEM; 183 return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 45aedf1d9338..5213bab2d19b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1155,7 +1155,7 @@ static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1155 */ 1155 */
1156 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) < 1156 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1157 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) { 1157 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
1158 writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db); 1158 writel(QID(qid) | PIDX(inc), qhp->wq.db);
1159 break; 1159 break;
1160 } 1160 }
1161 set_current_state(TASK_UNINTERRUPTIBLE); 1161 set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 3090100f0de7..e5430dd50764 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -5,7 +5,8 @@ ib_ipoib-y := ipoib_main.o \
5 ipoib_multicast.o \ 5 ipoib_multicast.o \
6 ipoib_verbs.o \ 6 ipoib_verbs.o \
7 ipoib_vlan.o \ 7 ipoib_vlan.o \
8 ipoib_ethtool.o 8 ipoib_ethtool.o \
9 ipoib_netlink.o
9ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o 10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o 11ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
11 12
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0af216d21f87..196eb52f0035 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -104,6 +104,10 @@ enum {
104 104
105 MAX_SEND_CQE = 16, 105 MAX_SEND_CQE = 16,
106 IPOIB_CM_COPYBREAK = 256, 106 IPOIB_CM_COPYBREAK = 256,
107
108 IPOIB_NON_CHILD = 0,
109 IPOIB_LEGACY_CHILD = 1,
110 IPOIB_RTNL_CHILD = 2,
107}; 111};
108 112
109#define IPOIB_OP_RECV (1ul << 31) 113#define IPOIB_OP_RECV (1ul << 31)
@@ -353,6 +357,7 @@ struct ipoib_dev_priv {
353 struct net_device *parent; 357 struct net_device *parent;
354 struct list_head child_intfs; 358 struct list_head child_intfs;
355 struct list_head list; 359 struct list_head list;
360 int child_type;
356 361
357#ifdef CONFIG_INFINIBAND_IPOIB_CM 362#ifdef CONFIG_INFINIBAND_IPOIB_CM
358 struct ipoib_cm_dev_priv cm; 363 struct ipoib_cm_dev_priv cm;
@@ -512,6 +517,17 @@ void ipoib_event(struct ib_event_handler *handler,
512int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); 517int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
513int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); 518int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
514 519
520int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
521 u16 pkey, int child_type);
522
523int __init ipoib_netlink_init(void);
524void __exit ipoib_netlink_fini(void);
525
526void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
527int ipoib_set_mode(struct net_device *dev, const char *buf);
528
529void ipoib_setup(struct net_device *dev);
530
515void ipoib_pkey_poll(struct work_struct *work); 531void ipoib_pkey_poll(struct work_struct *work);
516int ipoib_pkey_dev_delay_open(struct net_device *dev); 532int ipoib_pkey_dev_delay_open(struct net_device *dev);
517void ipoib_drain_cq(struct net_device *dev); 533void ipoib_drain_cq(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 24683fda8e21..175581cf478c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1448,15 +1448,10 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1448 return sprintf(buf, "datagram\n"); 1448 return sprintf(buf, "datagram\n");
1449} 1449}
1450 1450
1451static ssize_t set_mode(struct device *d, struct device_attribute *attr, 1451int ipoib_set_mode(struct net_device *dev, const char *buf)
1452 const char *buf, size_t count)
1453{ 1452{
1454 struct net_device *dev = to_net_dev(d);
1455 struct ipoib_dev_priv *priv = netdev_priv(dev); 1453 struct ipoib_dev_priv *priv = netdev_priv(dev);
1456 1454
1457 if (!rtnl_trylock())
1458 return restart_syscall();
1459
1460 /* flush paths if we switch modes so that connections are restarted */ 1455 /* flush paths if we switch modes so that connections are restarted */
1461 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 1456 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1462 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1457 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
@@ -1467,7 +1462,8 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1467 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 1462 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1468 1463
1469 ipoib_flush_paths(dev); 1464 ipoib_flush_paths(dev);
1470 return count; 1465 rtnl_lock();
1466 return 0;
1471 } 1467 }
1472 1468
1473 if (!strcmp(buf, "datagram\n")) { 1469 if (!strcmp(buf, "datagram\n")) {
@@ -1476,14 +1472,32 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1476 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 1472 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
1477 rtnl_unlock(); 1473 rtnl_unlock();
1478 ipoib_flush_paths(dev); 1474 ipoib_flush_paths(dev);
1479 1475 rtnl_lock();
1480 return count; 1476 return 0;
1481 } 1477 }
1482 rtnl_unlock();
1483 1478
1484 return -EINVAL; 1479 return -EINVAL;
1485} 1480}
1486 1481
1482static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1483 const char *buf, size_t count)
1484{
1485 struct net_device *dev = to_net_dev(d);
1486 int ret;
1487
1488 if (!rtnl_trylock())
1489 return restart_syscall();
1490
1491 ret = ipoib_set_mode(dev, buf);
1492
1493 rtnl_unlock();
1494
1495 if (!ret)
1496 return count;
1497
1498 return ret;
1499}
1500
1487static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); 1501static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1488 1502
1489int ipoib_cm_add_mode_attr(struct net_device *dev) 1503int ipoib_cm_add_mode_attr(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1e19b5ae7c47..3f9a9ba2f9ec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -173,6 +173,11 @@ static int ipoib_stop(struct net_device *dev)
173 return 0; 173 return 0;
174} 174}
175 175
176static void ipoib_uninit(struct net_device *dev)
177{
178 ipoib_dev_cleanup(dev);
179}
180
176static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 181static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
177{ 182{
178 struct ipoib_dev_priv *priv = netdev_priv(dev); 183 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1257,6 +1262,9 @@ out:
1257void ipoib_dev_cleanup(struct net_device *dev) 1262void ipoib_dev_cleanup(struct net_device *dev)
1258{ 1263{
1259 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 1264 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1265 LIST_HEAD(head);
1266
1267 ASSERT_RTNL();
1260 1268
1261 ipoib_delete_debug_files(dev); 1269 ipoib_delete_debug_files(dev);
1262 1270
@@ -1265,10 +1273,9 @@ void ipoib_dev_cleanup(struct net_device *dev)
1265 /* Stop GC on child */ 1273 /* Stop GC on child */
1266 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1274 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1267 cancel_delayed_work(&cpriv->neigh_reap_task); 1275 cancel_delayed_work(&cpriv->neigh_reap_task);
1268 unregister_netdev(cpriv->dev); 1276 unregister_netdevice_queue(cpriv->dev, &head);
1269 ipoib_dev_cleanup(cpriv->dev);
1270 free_netdev(cpriv->dev);
1271 } 1277 }
1278 unregister_netdevice_many(&head);
1272 1279
1273 ipoib_ib_dev_cleanup(dev); 1280 ipoib_ib_dev_cleanup(dev);
1274 1281
@@ -1286,6 +1293,7 @@ static const struct header_ops ipoib_header_ops = {
1286}; 1293};
1287 1294
1288static const struct net_device_ops ipoib_netdev_ops = { 1295static const struct net_device_ops ipoib_netdev_ops = {
1296 .ndo_uninit = ipoib_uninit,
1289 .ndo_open = ipoib_open, 1297 .ndo_open = ipoib_open,
1290 .ndo_stop = ipoib_stop, 1298 .ndo_stop = ipoib_stop,
1291 .ndo_change_mtu = ipoib_change_mtu, 1299 .ndo_change_mtu = ipoib_change_mtu,
@@ -1295,7 +1303,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
1295 .ndo_set_rx_mode = ipoib_set_mcast_list, 1303 .ndo_set_rx_mode = ipoib_set_mcast_list,
1296}; 1304};
1297 1305
1298static void ipoib_setup(struct net_device *dev) 1306void ipoib_setup(struct net_device *dev)
1299{ 1307{
1300 struct ipoib_dev_priv *priv = netdev_priv(dev); 1308 struct ipoib_dev_priv *priv = netdev_priv(dev);
1301 1309
@@ -1373,12 +1381,9 @@ static ssize_t show_umcast(struct device *dev,
1373 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1381 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1374} 1382}
1375 1383
1376static ssize_t set_umcast(struct device *dev, 1384void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1377 struct device_attribute *attr,
1378 const char *buf, size_t count)
1379{ 1385{
1380 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1386 struct ipoib_dev_priv *priv = netdev_priv(ndev);
1381 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1382 1387
1383 if (umcast_val > 0) { 1388 if (umcast_val > 0) {
1384 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1389 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
@@ -1386,6 +1391,15 @@ static ssize_t set_umcast(struct device *dev,
1386 "by userspace\n"); 1391 "by userspace\n");
1387 } else 1392 } else
1388 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1393 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1394}
1395
1396static ssize_t set_umcast(struct device *dev,
1397 struct device_attribute *attr,
1398 const char *buf, size_t count)
1399{
1400 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1401
1402 ipoib_set_umcast(to_net_dev(dev), umcast_val);
1389 1403
1390 return count; 1404 return count;
1391} 1405}
@@ -1657,7 +1671,6 @@ static void ipoib_remove_one(struct ib_device *device)
1657 flush_workqueue(ipoib_workqueue); 1671 flush_workqueue(ipoib_workqueue);
1658 1672
1659 unregister_netdev(priv->dev); 1673 unregister_netdev(priv->dev);
1660 ipoib_dev_cleanup(priv->dev);
1661 free_netdev(priv->dev); 1674 free_netdev(priv->dev);
1662 } 1675 }
1663 1676
@@ -1709,8 +1722,15 @@ static int __init ipoib_init_module(void)
1709 if (ret) 1722 if (ret)
1710 goto err_sa; 1723 goto err_sa;
1711 1724
1725 ret = ipoib_netlink_init();
1726 if (ret)
1727 goto err_client;
1728
1712 return 0; 1729 return 0;
1713 1730
1731err_client:
1732 ib_unregister_client(&ipoib_client);
1733
1714err_sa: 1734err_sa:
1715 ib_sa_unregister_client(&ipoib_sa_client); 1735 ib_sa_unregister_client(&ipoib_sa_client);
1716 destroy_workqueue(ipoib_workqueue); 1736 destroy_workqueue(ipoib_workqueue);
@@ -1723,6 +1743,7 @@ err_fs:
1723 1743
1724static void __exit ipoib_cleanup_module(void) 1744static void __exit ipoib_cleanup_module(void)
1725{ 1745{
1746 ipoib_netlink_fini();
1726 ib_unregister_client(&ipoib_client); 1747 ib_unregister_client(&ipoib_client);
1727 ib_sa_unregister_client(&ipoib_sa_client); 1748 ib_sa_unregister_client(&ipoib_sa_client);
1728 ipoib_unregister_debugfs(); 1749 ipoib_unregister_debugfs();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
new file mode 100644
index 000000000000..74685936c948
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright (c) 2012 Mellanox Technologies. - All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/netdevice.h>
34#include <linux/module.h>
35#include <net/rtnetlink.h>
36#include "ipoib.h"
37
38static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
39 [IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
40 [IFLA_IPOIB_MODE] = { .type = NLA_U16 },
41 [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
42};
43
44static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
45{
46 struct ipoib_dev_priv *priv = netdev_priv(dev);
47 u16 val;
48
49 if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
50 goto nla_put_failure;
51
52 val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
53 if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
54 goto nla_put_failure;
55
56 val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
57 if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
58 goto nla_put_failure;
59
60 return 0;
61
62nla_put_failure:
63 return -EMSGSIZE;
64}
65
66static int ipoib_changelink(struct net_device *dev,
67 struct nlattr *tb[], struct nlattr *data[])
68{
69 u16 mode, umcast;
70 int ret = 0;
71
72 if (data[IFLA_IPOIB_MODE]) {
73 mode = nla_get_u16(data[IFLA_IPOIB_MODE]);
74 if (mode == IPOIB_MODE_DATAGRAM)
75 ret = ipoib_set_mode(dev, "datagram\n");
76 else if (mode == IPOIB_MODE_CONNECTED)
77 ret = ipoib_set_mode(dev, "connected\n");
78 else
79 ret = -EINVAL;
80
81 if (ret < 0)
82 goto out_err;
83 }
84
85 if (data[IFLA_IPOIB_UMCAST]) {
86 umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
87 ipoib_set_umcast(dev, umcast);
88 }
89
90out_err:
91 return ret;
92}
93
94static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
95 struct nlattr *tb[], struct nlattr *data[])
96{
97 struct net_device *pdev;
98 struct ipoib_dev_priv *ppriv;
99 u16 child_pkey;
100 int err;
101
102 if (!tb[IFLA_LINK])
103 return -EINVAL;
104
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev)
107 return -ENODEV;
108
109 ppriv = netdev_priv(pdev);
110
111 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
112 ipoib_warn(ppriv, "child creation disallowed for child devices\n");
113 return -EINVAL;
114 }
115
116 if (!data || !data[IFLA_IPOIB_PKEY]) {
117 ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
118 child_pkey = ppriv->pkey;
119 } else
120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
121
122 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
123
124 if (!err && data)
125 err = ipoib_changelink(dev, tb, data);
126 return err;
127}
128
129static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
130{
131 struct ipoib_dev_priv *priv, *ppriv;
132
133 priv = netdev_priv(dev);
134 ppriv = netdev_priv(priv->parent);
135
136 mutex_lock(&ppriv->vlan_mutex);
137 unregister_netdevice_queue(dev, head);
138 list_del(&priv->list);
139 mutex_unlock(&ppriv->vlan_mutex);
140}
141
142static size_t ipoib_get_size(const struct net_device *dev)
143{
144 return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
145 nla_total_size(2) + /* IFLA_IPOIB_MODE */
146 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
147}
148
149static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
150 .kind = "ipoib",
151 .maxtype = IFLA_IPOIB_MAX,
152 .policy = ipoib_policy,
153 .priv_size = sizeof(struct ipoib_dev_priv),
154 .setup = ipoib_setup,
155 .newlink = ipoib_new_child_link,
156 .changelink = ipoib_changelink,
157 .dellink = ipoib_unregister_child_dev,
158 .get_size = ipoib_get_size,
159 .fill_info = ipoib_fill_info,
160};
161
162int __init ipoib_netlink_init(void)
163{
164 return rtnl_link_register(&ipoib_link_ops);
165}
166
167void __exit ipoib_netlink_fini(void)
168{
169 rtnl_link_unregister(&ipoib_link_ops);
170}
171
172MODULE_ALIAS_RTNL_LINK("ipoib");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index d7e9740c7248..8292554bccb5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -49,47 +49,11 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
49} 49}
50static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL); 50static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
51 51
52int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) 52int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
53 u16 pkey, int type)
53{ 54{
54 struct ipoib_dev_priv *ppriv, *priv;
55 char intf_name[IFNAMSIZ];
56 int result; 55 int result;
57 56
58 if (!capable(CAP_NET_ADMIN))
59 return -EPERM;
60
61 ppriv = netdev_priv(pdev);
62
63 if (!rtnl_trylock())
64 return restart_syscall();
65 mutex_lock(&ppriv->vlan_mutex);
66
67 /*
68 * First ensure this isn't a duplicate. We check the parent device and
69 * then all of the child interfaces to make sure the Pkey doesn't match.
70 */
71 if (ppriv->pkey == pkey) {
72 result = -ENOTUNIQ;
73 priv = NULL;
74 goto err;
75 }
76
77 list_for_each_entry(priv, &ppriv->child_intfs, list) {
78 if (priv->pkey == pkey) {
79 result = -ENOTUNIQ;
80 priv = NULL;
81 goto err;
82 }
83 }
84
85 snprintf(intf_name, sizeof intf_name, "%s.%04x",
86 ppriv->dev->name, pkey);
87 priv = ipoib_intf_alloc(intf_name);
88 if (!priv) {
89 result = -ENOMEM;
90 goto err;
91 }
92
93 priv->max_ib_mtu = ppriv->max_ib_mtu; 57 priv->max_ib_mtu = ppriv->max_ib_mtu;
94 /* MTU will be reset when mcast join happens */ 58 /* MTU will be reset when mcast join happens */
95 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 59 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
@@ -124,24 +88,27 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
124 88
125 ipoib_create_debug_files(priv->dev); 89 ipoib_create_debug_files(priv->dev);
126 90
127 if (ipoib_cm_add_mode_attr(priv->dev)) 91 /* RTNL childs don't need proprietary sysfs entries */
128 goto sysfs_failed; 92 if (type == IPOIB_LEGACY_CHILD) {
129 if (ipoib_add_pkey_attr(priv->dev)) 93 if (ipoib_cm_add_mode_attr(priv->dev))
130 goto sysfs_failed; 94 goto sysfs_failed;
131 if (ipoib_add_umcast_attr(priv->dev)) 95 if (ipoib_add_pkey_attr(priv->dev))
132 goto sysfs_failed; 96 goto sysfs_failed;
133 97 if (ipoib_add_umcast_attr(priv->dev))
134 if (device_create_file(&priv->dev->dev, &dev_attr_parent)) 98 goto sysfs_failed;
135 goto sysfs_failed; 99
100 if (device_create_file(&priv->dev->dev, &dev_attr_parent))
101 goto sysfs_failed;
102 }
136 103
104 priv->child_type = type;
105 priv->dev->iflink = ppriv->dev->ifindex;
137 list_add_tail(&priv->list, &ppriv->child_intfs); 106 list_add_tail(&priv->list, &ppriv->child_intfs);
138 107
139 mutex_unlock(&ppriv->vlan_mutex);
140 rtnl_unlock();
141
142 return 0; 108 return 0;
143 109
144sysfs_failed: 110sysfs_failed:
111 result = -ENOMEM;
145 ipoib_delete_debug_files(priv->dev); 112 ipoib_delete_debug_files(priv->dev);
146 unregister_netdevice(priv->dev); 113 unregister_netdevice(priv->dev);
147 114
@@ -149,11 +116,60 @@ register_failed:
149 ipoib_dev_cleanup(priv->dev); 116 ipoib_dev_cleanup(priv->dev);
150 117
151err: 118err:
119 return result;
120}
121
122int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
123{
124 struct ipoib_dev_priv *ppriv, *priv;
125 char intf_name[IFNAMSIZ];
126 struct ipoib_dev_priv *tpriv;
127 int result;
128
129 if (!capable(CAP_NET_ADMIN))
130 return -EPERM;
131
132 ppriv = netdev_priv(pdev);
133
134 snprintf(intf_name, sizeof intf_name, "%s.%04x",
135 ppriv->dev->name, pkey);
136 priv = ipoib_intf_alloc(intf_name);
137 if (!priv)
138 return -ENOMEM;
139
140 if (!rtnl_trylock())
141 return restart_syscall();
142
143 mutex_lock(&ppriv->vlan_mutex);
144
145 /*
146 * First ensure this isn't a duplicate. We check the parent device and
147 * then all of the legacy child interfaces to make sure the Pkey
148 * doesn't match.
149 */
150 if (ppriv->pkey == pkey) {
151 result = -ENOTUNIQ;
152 goto out;
153 }
154
155 list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
156 if (tpriv->pkey == pkey &&
157 tpriv->child_type == IPOIB_LEGACY_CHILD) {
158 result = -ENOTUNIQ;
159 goto out;
160 }
161 }
162
163 result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
164
165out:
152 mutex_unlock(&ppriv->vlan_mutex); 166 mutex_unlock(&ppriv->vlan_mutex);
153 rtnl_unlock(); 167
154 if (priv) 168 if (result)
155 free_netdev(priv->dev); 169 free_netdev(priv->dev);
156 170
171 rtnl_unlock();
172
157 return result; 173 return result;
158} 174}
159 175
@@ -171,9 +187,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
171 return restart_syscall(); 187 return restart_syscall();
172 mutex_lock(&ppriv->vlan_mutex); 188 mutex_lock(&ppriv->vlan_mutex);
173 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 189 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
174 if (priv->pkey == pkey) { 190 if (priv->pkey == pkey &&
191 priv->child_type == IPOIB_LEGACY_CHILD) {
175 unregister_netdevice(priv->dev); 192 unregister_netdevice(priv->dev);
176 ipoib_dev_cleanup(priv->dev);
177 list_del(&priv->list); 193 list_del(&priv->list);
178 dev = priv->dev; 194 dev = priv->dev;
179 break; 195 break;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index aa41485bc594..30a6b174fbb0 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -1123,7 +1123,6 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
1123 return drv; 1123 return drv;
1124 1124
1125error: 1125error:
1126 kfree(drv->cs);
1127 kfree(drv); 1126 kfree(drv);
1128 return NULL; 1127 return NULL;
1129} 1128}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c2bd806950e..6a70184c3f23 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -107,8 +107,6 @@ config MII
107 or internal device. It is safe to say Y or M here even if your 107 or internal device. It is safe to say Y or M here even if your
108 ethernet card lacks MII. 108 ethernet card lacks MII.
109 109
110source "drivers/ieee802154/Kconfig"
111
112config IFB 110config IFB
113 tristate "Intermediate Functional Block support" 111 tristate "Intermediate Functional Block support"
114 depends on NET_CLS_ACT 112 depends on NET_CLS_ACT
@@ -151,6 +149,19 @@ config MACVTAP
151 To compile this driver as a module, choose M here: the module 149 To compile this driver as a module, choose M here: the module
152 will be called macvtap. 150 will be called macvtap.
153 151
152config VXLAN
153 tristate "Virtual eXtensible Local Area Network (VXLAN)"
154 depends on EXPERIMENTAL && INET
155 ---help---
156 This allows one to create vxlan virtual interfaces that provide
157 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
158 to tunnel virtual network infrastructure in virtualized environments.
159 For more information see:
160 http://tools.ietf.org/html/draft-mahalingam-dutt-dcops-vxlan-02
161
162 To compile this driver as a module, choose M here: the module
163 will be called vxlan.
164
154config NETCONSOLE 165config NETCONSOLE
155 tristate "Network console logging support" 166 tristate "Network console logging support"
156 ---help--- 167 ---help---
@@ -290,6 +301,8 @@ source "drivers/net/wimax/Kconfig"
290 301
291source "drivers/net/wan/Kconfig" 302source "drivers/net/wan/Kconfig"
292 303
304source "drivers/net/ieee802154/Kconfig"
305
293config XEN_NETDEV_FRONTEND 306config XEN_NETDEV_FRONTEND
294 tristate "Xen network device frontend driver" 307 tristate "Xen network device frontend driver"
295 depends on XEN 308 depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3d375ca128a6..335db78fd987 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_TEAM) += team/
21obj-$(CONFIG_TUN) += tun.o 21obj-$(CONFIG_TUN) += tun.o
22obj-$(CONFIG_VETH) += veth.o 22obj-$(CONFIG_VETH) += veth.o
23obj-$(CONFIG_VIRTIO_NET) += virtio_net.o 23obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
24obj-$(CONFIG_VXLAN) += vxlan.o
24 25
25# 26#
26# Networking Drivers 27# Networking Drivers
@@ -53,6 +54,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
53obj-$(CONFIG_WAN) += wan/ 54obj-$(CONFIG_WAN) += wan/
54obj-$(CONFIG_WLAN) += wireless/ 55obj-$(CONFIG_WLAN) += wireless/
55obj-$(CONFIG_WIMAX) += wimax/ 56obj-$(CONFIG_WIMAX) += wimax/
57obj-$(CONFIG_IEEE802154) += ieee802154/
56 58
57obj-$(CONFIG_VMXNET3) += vmxnet3/ 59obj-$(CONFIG_VMXNET3) += vmxnet3/
58obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o 60obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d688a8af432c..7858c58df4a3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1120 write_unlock_bh(&bond->curr_slave_lock); 1120 write_unlock_bh(&bond->curr_slave_lock);
1121 read_unlock(&bond->lock); 1121 read_unlock(&bond->lock);
1122 1122
1123 netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); 1123 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1124 if (should_notify_peers) 1124 if (should_notify_peers)
1125 netdev_bonding_change(bond->dev, 1125 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1126 NETDEV_NOTIFY_PEERS); 1126 bond->dev);
1127 1127
1128 read_lock(&bond->lock); 1128 read_lock(&bond->lock);
1129 write_lock_bh(&bond->curr_slave_lock); 1129 write_lock_bh(&bond->curr_slave_lock);
@@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1558 bond_dev->name, 1558 bond_dev->name,
1559 bond_dev->type, slave_dev->type); 1559 bond_dev->type, slave_dev->type);
1560 1560
1561 res = netdev_bonding_change(bond_dev, 1561 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1562 NETDEV_PRE_TYPE_CHANGE); 1562 bond_dev);
1563 res = notifier_to_errno(res); 1563 res = notifier_to_errno(res);
1564 if (res) { 1564 if (res) {
1565 pr_err("%s: refused to change device type\n", 1565 pr_err("%s: refused to change device type\n",
@@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1579 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1579 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1580 } 1580 }
1581 1581
1582 netdev_bonding_change(bond_dev, 1582 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1583 NETDEV_POST_TYPE_CHANGE); 1583 bond_dev);
1584 } 1584 }
1585 } else if (bond_dev->type != slave_dev->type) { 1585 } else if (bond_dev->type != slave_dev->type) {
1586 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", 1586 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1941 } 1941 }
1942 1942
1943 block_netpoll_tx(); 1943 block_netpoll_tx();
1944 netdev_bonding_change(bond_dev, NETDEV_RELEASE); 1944 call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
1945 write_lock_bh(&bond->lock); 1945 write_lock_bh(&bond->lock);
1946 1946
1947 slave = bond_get_slave_by_dev(bond, slave_dev); 1947 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2584,7 +2584,7 @@ re_arm:
2584 read_unlock(&bond->lock); 2584 read_unlock(&bond->lock);
2585 return; 2585 return;
2586 } 2586 }
2587 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); 2587 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2588 rtnl_unlock(); 2588 rtnl_unlock();
2589 } 2589 }
2590} 2590}
@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2811 arp_work.work); 2811 arp_work.work);
2812 struct slave *slave, *oldcurrent; 2812 struct slave *slave, *oldcurrent;
2813 int do_failover = 0; 2813 int do_failover = 0;
2814 int delta_in_ticks; 2814 int delta_in_ticks, extra_ticks;
2815 int i; 2815 int i;
2816 2816
2817 read_lock(&bond->lock); 2817 read_lock(&bond->lock);
2818 2818
2819 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2819 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2820 extra_ticks = delta_in_ticks / 2;
2820 2821
2821 if (bond->slave_cnt == 0) 2822 if (bond->slave_cnt == 0)
2822 goto re_arm; 2823 goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2839 if (slave->link != BOND_LINK_UP) { 2840 if (slave->link != BOND_LINK_UP) {
2840 if (time_in_range(jiffies, 2841 if (time_in_range(jiffies,
2841 trans_start - delta_in_ticks, 2842 trans_start - delta_in_ticks,
2842 trans_start + delta_in_ticks) && 2843 trans_start + delta_in_ticks + extra_ticks) &&
2843 time_in_range(jiffies, 2844 time_in_range(jiffies,
2844 slave->dev->last_rx - delta_in_ticks, 2845 slave->dev->last_rx - delta_in_ticks,
2845 slave->dev->last_rx + delta_in_ticks)) { 2846 slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
2846 2847
2847 slave->link = BOND_LINK_UP; 2848 slave->link = BOND_LINK_UP;
2848 bond_set_active_slave(slave); 2849 bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2872 */ 2873 */
2873 if (!time_in_range(jiffies, 2874 if (!time_in_range(jiffies,
2874 trans_start - delta_in_ticks, 2875 trans_start - delta_in_ticks,
2875 trans_start + 2 * delta_in_ticks) || 2876 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2876 !time_in_range(jiffies, 2877 !time_in_range(jiffies,
2877 slave->dev->last_rx - delta_in_ticks, 2878 slave->dev->last_rx - delta_in_ticks,
2878 slave->dev->last_rx + 2 * delta_in_ticks)) { 2879 slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
2879 2880
2880 slave->link = BOND_LINK_DOWN; 2881 slave->link = BOND_LINK_DOWN;
2881 bond_set_backup_slave(slave); 2882 bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2933 struct slave *slave; 2934 struct slave *slave;
2934 int i, commit = 0; 2935 int i, commit = 0;
2935 unsigned long trans_start; 2936 unsigned long trans_start;
2937 int extra_ticks;
2938
2939 /* All the time comparisons below need some extra time. Otherwise, on
2940 * fast networks the ARP probe/reply may arrive within the same jiffy
2941 * as it was sent. Then, the next time the ARP monitor is run, one
2942 * arp_interval will already have passed in the comparisons.
2943 */
2944 extra_ticks = delta_in_ticks / 2;
2936 2945
2937 bond_for_each_slave(bond, slave, i) { 2946 bond_for_each_slave(bond, slave, i) {
2938 slave->new_link = BOND_LINK_NOCHANGE; 2947 slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2940 if (slave->link != BOND_LINK_UP) { 2949 if (slave->link != BOND_LINK_UP) {
2941 if (time_in_range(jiffies, 2950 if (time_in_range(jiffies,
2942 slave_last_rx(bond, slave) - delta_in_ticks, 2951 slave_last_rx(bond, slave) - delta_in_ticks,
2943 slave_last_rx(bond, slave) + delta_in_ticks)) { 2952 slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
2944 2953
2945 slave->new_link = BOND_LINK_UP; 2954 slave->new_link = BOND_LINK_UP;
2946 commit++; 2955 commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2956 */ 2965 */
2957 if (time_in_range(jiffies, 2966 if (time_in_range(jiffies,
2958 slave->jiffies - delta_in_ticks, 2967 slave->jiffies - delta_in_ticks,
2959 slave->jiffies + 2 * delta_in_ticks)) 2968 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
2960 continue; 2969 continue;
2961 2970
2962 /* 2971 /*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2976 !bond->current_arp_slave && 2985 !bond->current_arp_slave &&
2977 !time_in_range(jiffies, 2986 !time_in_range(jiffies,
2978 slave_last_rx(bond, slave) - delta_in_ticks, 2987 slave_last_rx(bond, slave) - delta_in_ticks,
2979 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { 2988 slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
2980 2989
2981 slave->new_link = BOND_LINK_DOWN; 2990 slave->new_link = BOND_LINK_DOWN;
2982 commit++; 2991 commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2992 if (bond_is_active_slave(slave) && 3001 if (bond_is_active_slave(slave) &&
2993 (!time_in_range(jiffies, 3002 (!time_in_range(jiffies,
2994 trans_start - delta_in_ticks, 3003 trans_start - delta_in_ticks,
2995 trans_start + 2 * delta_in_ticks) || 3004 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2996 !time_in_range(jiffies, 3005 !time_in_range(jiffies,
2997 slave_last_rx(bond, slave) - delta_in_ticks, 3006 slave_last_rx(bond, slave) - delta_in_ticks,
2998 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { 3007 slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
2999 3008
3000 slave->new_link = BOND_LINK_DOWN; 3009 slave->new_link = BOND_LINK_DOWN;
3001 commit++; 3010 commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3027 if ((!bond->curr_active_slave && 3036 if ((!bond->curr_active_slave &&
3028 time_in_range(jiffies, 3037 time_in_range(jiffies,
3029 trans_start - delta_in_ticks, 3038 trans_start - delta_in_ticks,
3030 trans_start + delta_in_ticks)) || 3039 trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
3031 bond->curr_active_slave != slave) { 3040 bond->curr_active_slave != slave) {
3032 slave->link = BOND_LINK_UP; 3041 slave->link = BOND_LINK_UP;
3033 if (bond->current_arp_slave) { 3042 if (bond->current_arp_slave) {
@@ -3203,7 +3212,7 @@ re_arm:
3203 read_unlock(&bond->lock); 3212 read_unlock(&bond->lock);
3204 return; 3213 return;
3205 } 3214 }
3206 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); 3215 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
3207 rtnl_unlock(); 3216 rtnl_unlock();
3208 } 3217 }
3209} 3218}
@@ -3352,56 +3361,93 @@ static struct notifier_block bond_netdev_notifier = {
3352/*---------------------------- Hashing Policies -----------------------------*/ 3361/*---------------------------- Hashing Policies -----------------------------*/
3353 3362
3354/* 3363/*
3364 * Hash for the output device based upon layer 2 data
3365 */
3366static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3367{
3368 struct ethhdr *data = (struct ethhdr *)skb->data;
3369
3370 if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
3371 return (data->h_dest[5] ^ data->h_source[5]) % count;
3372
3373 return 0;
3374}
3375
3376/*
3355 * Hash for the output device based upon layer 2 and layer 3 data. If 3377 * Hash for the output device based upon layer 2 and layer 3 data. If
3356 * the packet is not IP mimic bond_xmit_hash_policy_l2() 3378 * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
3357 */ 3379 */
3358static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) 3380static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3359{ 3381{
3360 struct ethhdr *data = (struct ethhdr *)skb->data; 3382 struct ethhdr *data = (struct ethhdr *)skb->data;
3361 struct iphdr *iph = ip_hdr(skb); 3383 struct iphdr *iph;
3362 3384 struct ipv6hdr *ipv6h;
3363 if (skb->protocol == htons(ETH_P_IP)) { 3385 u32 v6hash;
3386 __be32 *s, *d;
3387
3388 if (skb->protocol == htons(ETH_P_IP) &&
3389 skb_network_header_len(skb) >= sizeof(*iph)) {
3390 iph = ip_hdr(skb);
3364 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3391 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3365 (data->h_dest[5] ^ data->h_source[5])) % count; 3392 (data->h_dest[5] ^ data->h_source[5])) % count;
3393 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3394 skb_network_header_len(skb) >= sizeof(*ipv6h)) {
3395 ipv6h = ipv6_hdr(skb);
3396 s = &ipv6h->saddr.s6_addr32[0];
3397 d = &ipv6h->daddr.s6_addr32[0];
3398 v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3399 v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
3400 return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
3366 } 3401 }
3367 3402
3368 return (data->h_dest[5] ^ data->h_source[5]) % count; 3403 return bond_xmit_hash_policy_l2(skb, count);
3369} 3404}
3370 3405
3371/* 3406/*
3372 * Hash for the output device based upon layer 3 and layer 4 data. If 3407 * Hash for the output device based upon layer 3 and layer 4 data. If
3373 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3408 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3374 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3409 * altogether not IP, fall back on bond_xmit_hash_policy_l2()
3375 */ 3410 */
3376static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) 3411static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3377{ 3412{
3378 struct ethhdr *data = (struct ethhdr *)skb->data; 3413 u32 layer4_xor = 0;
3379 struct iphdr *iph = ip_hdr(skb); 3414 struct iphdr *iph;
3380 __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); 3415 struct ipv6hdr *ipv6h;
3381 int layer4_xor = 0; 3416 __be32 *s, *d;
3382 3417 __be16 *layer4hdr;
3383 if (skb->protocol == htons(ETH_P_IP)) { 3418
3419 if (skb->protocol == htons(ETH_P_IP) &&
3420 skb_network_header_len(skb) >= sizeof(*iph)) {
3421 iph = ip_hdr(skb);
3384 if (!ip_is_fragment(iph) && 3422 if (!ip_is_fragment(iph) &&
3385 (iph->protocol == IPPROTO_TCP || 3423 (iph->protocol == IPPROTO_TCP ||
3386 iph->protocol == IPPROTO_UDP)) { 3424 iph->protocol == IPPROTO_UDP) &&
3387 layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); 3425 (skb_headlen(skb) - skb_network_offset(skb) >=
3426 iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
3427 layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
3428 layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
3388 } 3429 }
3389 return (layer4_xor ^ 3430 return (layer4_xor ^
3390 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; 3431 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
3391 3432 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3433 skb_network_header_len(skb) >= sizeof(*ipv6h)) {
3434 ipv6h = ipv6_hdr(skb);
3435 if ((ipv6h->nexthdr == IPPROTO_TCP ||
3436 ipv6h->nexthdr == IPPROTO_UDP) &&
3437 (skb_headlen(skb) - skb_network_offset(skb) >=
3438 sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
3439 layer4hdr = (__be16 *)(ipv6h + 1);
3440 layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
3441 }
3442 s = &ipv6h->saddr.s6_addr32[0];
3443 d = &ipv6h->daddr.s6_addr32[0];
3444 layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3445 layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
3446 (layer4_xor >> 8);
3447 return layer4_xor % count;
3392 } 3448 }
3393 3449
3394 return (data->h_dest[5] ^ data->h_source[5]) % count; 3450 return bond_xmit_hash_policy_l2(skb, count);
3395}
3396
3397/*
3398 * Hash for the output device based upon layer 2 data
3399 */
3400static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3401{
3402 struct ethhdr *data = (struct ethhdr *)skb->data;
3403
3404 return (data->h_dest[5] ^ data->h_source[5]) % count;
3405} 3451}
3406 3452
3407/*-------------------------- Device entry points ----------------------------*/ 3453/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 4c538e388655..e5180dfddba5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -34,6 +34,7 @@
34#include <linux/if_ether.h> 34#include <linux/if_ether.h>
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/pm_runtime.h>
37 38
38#include <linux/can.h> 39#include <linux/can.h>
39#include <linux/can/dev.h> 40#include <linux/can/dev.h>
@@ -45,6 +46,9 @@
45#define IF_ENUM_REG_LEN 11 46#define IF_ENUM_REG_LEN 11
46#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN) 47#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
47 48
49/* control extension register D_CAN specific */
50#define CONTROL_EX_PDR BIT(8)
51
48/* control register */ 52/* control register */
49#define CONTROL_TEST BIT(7) 53#define CONTROL_TEST BIT(7)
50#define CONTROL_CCE BIT(6) 54#define CONTROL_CCE BIT(6)
@@ -64,6 +68,7 @@
64#define TEST_BASIC BIT(2) 68#define TEST_BASIC BIT(2)
65 69
66/* status register */ 70/* status register */
71#define STATUS_PDA BIT(10)
67#define STATUS_BOFF BIT(7) 72#define STATUS_BOFF BIT(7)
68#define STATUS_EWARN BIT(6) 73#define STATUS_EWARN BIT(6)
69#define STATUS_EPASS BIT(5) 74#define STATUS_EPASS BIT(5)
@@ -163,6 +168,9 @@
163/* minimum timeout for checking BUSY status */ 168/* minimum timeout for checking BUSY status */
164#define MIN_TIMEOUT_VALUE 6 169#define MIN_TIMEOUT_VALUE 6
165 170
171/* Wait for ~1 sec for INIT bit */
172#define INIT_WAIT_MS 1000
173
166/* napi related */ 174/* napi related */
167#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM 175#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
168 176
@@ -201,6 +209,30 @@ static const struct can_bittiming_const c_can_bittiming_const = {
201 .brp_inc = 1, 209 .brp_inc = 1,
202}; 210};
203 211
212static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
213{
214 if (priv->device)
215 pm_runtime_enable(priv->device);
216}
217
218static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
219{
220 if (priv->device)
221 pm_runtime_disable(priv->device);
222}
223
224static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
225{
226 if (priv->device)
227 pm_runtime_get_sync(priv->device);
228}
229
230static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
231{
232 if (priv->device)
233 pm_runtime_put_sync(priv->device);
234}
235
204static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 236static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
205{ 237{
206 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + 238 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -673,11 +705,15 @@ static int c_can_get_berr_counter(const struct net_device *dev,
673 unsigned int reg_err_counter; 705 unsigned int reg_err_counter;
674 struct c_can_priv *priv = netdev_priv(dev); 706 struct c_can_priv *priv = netdev_priv(dev);
675 707
708 c_can_pm_runtime_get_sync(priv);
709
676 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 710 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
677 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 711 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
678 ERR_CNT_REC_SHIFT; 712 ERR_CNT_REC_SHIFT;
679 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 713 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
680 714
715 c_can_pm_runtime_put_sync(priv);
716
681 return 0; 717 return 0;
682} 718}
683 719
@@ -1053,11 +1089,13 @@ static int c_can_open(struct net_device *dev)
1053 int err; 1089 int err;
1054 struct c_can_priv *priv = netdev_priv(dev); 1090 struct c_can_priv *priv = netdev_priv(dev);
1055 1091
1092 c_can_pm_runtime_get_sync(priv);
1093
1056 /* open the can device */ 1094 /* open the can device */
1057 err = open_candev(dev); 1095 err = open_candev(dev);
1058 if (err) { 1096 if (err) {
1059 netdev_err(dev, "failed to open can device\n"); 1097 netdev_err(dev, "failed to open can device\n");
1060 return err; 1098 goto exit_open_fail;
1061 } 1099 }
1062 1100
1063 /* register interrupt handler */ 1101 /* register interrupt handler */
@@ -1079,6 +1117,8 @@ static int c_can_open(struct net_device *dev)
1079 1117
1080exit_irq_fail: 1118exit_irq_fail:
1081 close_candev(dev); 1119 close_candev(dev);
1120exit_open_fail:
1121 c_can_pm_runtime_put_sync(priv);
1082 return err; 1122 return err;
1083} 1123}
1084 1124
@@ -1091,6 +1131,7 @@ static int c_can_close(struct net_device *dev)
1091 c_can_stop(dev); 1131 c_can_stop(dev);
1092 free_irq(dev->irq, dev); 1132 free_irq(dev->irq, dev);
1093 close_candev(dev); 1133 close_candev(dev);
1134 c_can_pm_runtime_put_sync(priv);
1094 1135
1095 return 0; 1136 return 0;
1096} 1137}
@@ -1119,6 +1160,77 @@ struct net_device *alloc_c_can_dev(void)
1119} 1160}
1120EXPORT_SYMBOL_GPL(alloc_c_can_dev); 1161EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1121 1162
1163#ifdef CONFIG_PM
1164int c_can_power_down(struct net_device *dev)
1165{
1166 u32 val;
1167 unsigned long time_out;
1168 struct c_can_priv *priv = netdev_priv(dev);
1169
1170 if (!(dev->flags & IFF_UP))
1171 return 0;
1172
1173 WARN_ON(priv->type != BOSCH_D_CAN);
1174
1175 /* set PDR value so the device goes to power down mode */
1176 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1177 val |= CONTROL_EX_PDR;
1178 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1179
1180 /* Wait for the PDA bit to get set */
1181 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1182 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1183 time_after(time_out, jiffies))
1184 cpu_relax();
1185
1186 if (time_after(jiffies, time_out))
1187 return -ETIMEDOUT;
1188
1189 c_can_stop(dev);
1190
1191 c_can_pm_runtime_put_sync(priv);
1192
1193 return 0;
1194}
1195EXPORT_SYMBOL_GPL(c_can_power_down);
1196
1197int c_can_power_up(struct net_device *dev)
1198{
1199 u32 val;
1200 unsigned long time_out;
1201 struct c_can_priv *priv = netdev_priv(dev);
1202
1203 if (!(dev->flags & IFF_UP))
1204 return 0;
1205
1206 WARN_ON(priv->type != BOSCH_D_CAN);
1207
1208 c_can_pm_runtime_get_sync(priv);
1209
1210 /* Clear PDR and INIT bits */
1211 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1212 val &= ~CONTROL_EX_PDR;
1213 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1214 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1215 val &= ~CONTROL_INIT;
1216 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1217
1218 /* Wait for the PDA bit to get clear */
1219 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1220 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1221 time_after(time_out, jiffies))
1222 cpu_relax();
1223
1224 if (time_after(jiffies, time_out))
1225 return -ETIMEDOUT;
1226
1227 c_can_start(dev);
1228
1229 return 0;
1230}
1231EXPORT_SYMBOL_GPL(c_can_power_up);
1232#endif
1233
1122void free_c_can_dev(struct net_device *dev) 1234void free_c_can_dev(struct net_device *dev)
1123{ 1235{
1124 free_candev(dev); 1236 free_candev(dev);
@@ -1133,10 +1245,19 @@ static const struct net_device_ops c_can_netdev_ops = {
1133 1245
1134int register_c_can_dev(struct net_device *dev) 1246int register_c_can_dev(struct net_device *dev)
1135{ 1247{
1248 struct c_can_priv *priv = netdev_priv(dev);
1249 int err;
1250
1251 c_can_pm_runtime_enable(priv);
1252
1136 dev->flags |= IFF_ECHO; /* we support local echo */ 1253 dev->flags |= IFF_ECHO; /* we support local echo */
1137 dev->netdev_ops = &c_can_netdev_ops; 1254 dev->netdev_ops = &c_can_netdev_ops;
1138 1255
1139 return register_candev(dev); 1256 err = register_candev(dev);
1257 if (err)
1258 c_can_pm_runtime_disable(priv);
1259
1260 return err;
1140} 1261}
1141EXPORT_SYMBOL_GPL(register_c_can_dev); 1262EXPORT_SYMBOL_GPL(register_c_can_dev);
1142 1263
@@ -1144,10 +1265,9 @@ void unregister_c_can_dev(struct net_device *dev)
1144{ 1265{
1145 struct c_can_priv *priv = netdev_priv(dev); 1266 struct c_can_priv *priv = netdev_priv(dev);
1146 1267
1147 /* disable all interrupts */
1148 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1149
1150 unregister_candev(dev); 1268 unregister_candev(dev);
1269
1270 c_can_pm_runtime_disable(priv);
1151} 1271}
1152EXPORT_SYMBOL_GPL(unregister_c_can_dev); 1272EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1153 1273
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 01a7049ab990..e5ed41dafa1b 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -24,6 +24,7 @@
24 24
25enum reg { 25enum reg {
26 C_CAN_CTRL_REG = 0, 26 C_CAN_CTRL_REG = 0,
27 C_CAN_CTRL_EX_REG,
27 C_CAN_STS_REG, 28 C_CAN_STS_REG,
28 C_CAN_ERR_CNT_REG, 29 C_CAN_ERR_CNT_REG,
29 C_CAN_BTR_REG, 30 C_CAN_BTR_REG,
@@ -104,6 +105,7 @@ static const u16 reg_map_c_can[] = {
104 105
105static const u16 reg_map_d_can[] = { 106static const u16 reg_map_d_can[] = {
106 [C_CAN_CTRL_REG] = 0x00, 107 [C_CAN_CTRL_REG] = 0x00,
108 [C_CAN_CTRL_EX_REG] = 0x02,
107 [C_CAN_STS_REG] = 0x04, 109 [C_CAN_STS_REG] = 0x04,
108 [C_CAN_ERR_CNT_REG] = 0x08, 110 [C_CAN_ERR_CNT_REG] = 0x08,
109 [C_CAN_BTR_REG] = 0x0C, 111 [C_CAN_BTR_REG] = 0x0C,
@@ -143,8 +145,9 @@ static const u16 reg_map_d_can[] = {
143}; 145};
144 146
145enum c_can_dev_id { 147enum c_can_dev_id {
146 C_CAN_DEVTYPE, 148 BOSCH_C_CAN_PLATFORM,
147 D_CAN_DEVTYPE, 149 BOSCH_C_CAN,
150 BOSCH_D_CAN,
148}; 151};
149 152
150/* c_can private data structure */ 153/* c_can private data structure */
@@ -152,6 +155,7 @@ struct c_can_priv {
152 struct can_priv can; /* must be the first member */ 155 struct can_priv can; /* must be the first member */
153 struct napi_struct napi; 156 struct napi_struct napi;
154 struct net_device *dev; 157 struct net_device *dev;
158 struct device *device;
155 int tx_object; 159 int tx_object;
156 int current_status; 160 int current_status;
157 int last_status; 161 int last_status;
@@ -164,6 +168,7 @@ struct c_can_priv {
164 unsigned int tx_echo; 168 unsigned int tx_echo;
165 void *priv; /* for board-specific data */ 169 void *priv; /* for board-specific data */
166 u16 irqstatus; 170 u16 irqstatus;
171 enum c_can_dev_id type;
167}; 172};
168 173
169struct net_device *alloc_c_can_dev(void); 174struct net_device *alloc_c_can_dev(void);
@@ -171,4 +176,9 @@ void free_c_can_dev(struct net_device *dev);
171int register_c_can_dev(struct net_device *dev); 176int register_c_can_dev(struct net_device *dev);
172void unregister_c_can_dev(struct net_device *dev); 177void unregister_c_can_dev(struct net_device *dev);
173 178
179#ifdef CONFIG_PM
180int c_can_power_up(struct net_device *dev);
181int c_can_power_down(struct net_device *dev);
182#endif
183
174#endif /* C_CAN_H */ 184#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 1011146ea513..3d7830bcd2bf 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -120,10 +120,10 @@ static int __devinit c_can_pci_probe(struct pci_dev *pdev,
120 120
121 /* Configure CAN type */ 121 /* Configure CAN type */
122 switch (c_can_pci_data->type) { 122 switch (c_can_pci_data->type) {
123 case C_CAN_DEVTYPE: 123 case BOSCH_C_CAN:
124 priv->regs = reg_map_c_can; 124 priv->regs = reg_map_c_can;
125 break; 125 break;
126 case D_CAN_DEVTYPE: 126 case BOSCH_D_CAN:
127 priv->regs = reg_map_d_can; 127 priv->regs = reg_map_d_can;
128 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 128 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
129 break; 129 break;
@@ -192,7 +192,7 @@ static void __devexit c_can_pci_remove(struct pci_dev *pdev)
192} 192}
193 193
194static struct c_can_pci_data c_can_sta2x11= { 194static struct c_can_pci_data c_can_sta2x11= {
195 .type = C_CAN_DEVTYPE, 195 .type = BOSCH_C_CAN,
196 .reg_align = C_CAN_REG_ALIGN_32, 196 .reg_align = C_CAN_REG_ALIGN_32,
197 .freq = 52000000, /* 52 Mhz */ 197 .freq = 52000000, /* 52 Mhz */
198}; 198};
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6ff7ad006c30..ee1416132aba 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -30,6 +30,9 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/pinctrl/consumer.h>
33 36
34#include <linux/can/dev.h> 37#include <linux/can/dev.h>
35 38
@@ -65,17 +68,58 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
65 writew(val, priv->base + 2 * priv->regs[index]); 68 writew(val, priv->base + 2 * priv->regs[index]);
66} 69}
67 70
71static struct platform_device_id c_can_id_table[] = {
72 [BOSCH_C_CAN_PLATFORM] = {
73 .name = KBUILD_MODNAME,
74 .driver_data = BOSCH_C_CAN,
75 },
76 [BOSCH_C_CAN] = {
77 .name = "c_can",
78 .driver_data = BOSCH_C_CAN,
79 },
80 [BOSCH_D_CAN] = {
81 .name = "d_can",
82 .driver_data = BOSCH_D_CAN,
83 }, {
84 }
85};
86
87static const struct of_device_id c_can_of_table[] = {
88 { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
89 { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
90 { /* sentinel */ },
91};
92
68static int __devinit c_can_plat_probe(struct platform_device *pdev) 93static int __devinit c_can_plat_probe(struct platform_device *pdev)
69{ 94{
70 int ret; 95 int ret;
71 void __iomem *addr; 96 void __iomem *addr;
72 struct net_device *dev; 97 struct net_device *dev;
73 struct c_can_priv *priv; 98 struct c_can_priv *priv;
99 const struct of_device_id *match;
74 const struct platform_device_id *id; 100 const struct platform_device_id *id;
101 struct pinctrl *pinctrl;
75 struct resource *mem; 102 struct resource *mem;
76 int irq; 103 int irq;
77 struct clk *clk; 104 struct clk *clk;
78 105
106 if (pdev->dev.of_node) {
107 match = of_match_device(c_can_of_table, &pdev->dev);
108 if (!match) {
109 dev_err(&pdev->dev, "Failed to find matching dt id\n");
110 ret = -EINVAL;
111 goto exit;
112 }
113 id = match->data;
114 } else {
115 id = platform_get_device_id(pdev);
116 }
117
118 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
119 if (IS_ERR(pinctrl))
120 dev_warn(&pdev->dev,
121 "failed to configure pins from driver\n");
122
79 /* get the appropriate clk */ 123 /* get the appropriate clk */
80 clk = clk_get(&pdev->dev, NULL); 124 clk = clk_get(&pdev->dev, NULL);
81 if (IS_ERR(clk)) { 125 if (IS_ERR(clk)) {
@@ -114,9 +158,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
114 } 158 }
115 159
116 priv = netdev_priv(dev); 160 priv = netdev_priv(dev);
117 id = platform_get_device_id(pdev);
118 switch (id->driver_data) { 161 switch (id->driver_data) {
119 case C_CAN_DEVTYPE: 162 case BOSCH_C_CAN:
120 priv->regs = reg_map_c_can; 163 priv->regs = reg_map_c_can;
121 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { 164 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
122 case IORESOURCE_MEM_32BIT: 165 case IORESOURCE_MEM_32BIT:
@@ -130,7 +173,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
130 break; 173 break;
131 } 174 }
132 break; 175 break;
133 case D_CAN_DEVTYPE: 176 case BOSCH_D_CAN:
134 priv->regs = reg_map_d_can; 177 priv->regs = reg_map_d_can;
135 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 178 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
136 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 179 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
@@ -143,8 +186,10 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
143 186
144 dev->irq = irq; 187 dev->irq = irq;
145 priv->base = addr; 188 priv->base = addr;
189 priv->device = &pdev->dev;
146 priv->can.clock.freq = clk_get_rate(clk); 190 priv->can.clock.freq = clk_get_rate(clk);
147 priv->priv = clk; 191 priv->priv = clk;
192 priv->type = id->driver_data;
148 193
149 platform_set_drvdata(pdev, dev); 194 platform_set_drvdata(pdev, dev);
150 SET_NETDEV_DEV(dev, &pdev->dev); 195 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -195,27 +240,75 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
195 return 0; 240 return 0;
196} 241}
197 242
198static const struct platform_device_id c_can_id_table[] = { 243#ifdef CONFIG_PM
199 { 244static int c_can_suspend(struct platform_device *pdev, pm_message_t state)
200 .name = KBUILD_MODNAME, 245{
201 .driver_data = C_CAN_DEVTYPE, 246 int ret;
202 }, { 247 struct net_device *ndev = platform_get_drvdata(pdev);
203 .name = "c_can", 248 struct c_can_priv *priv = netdev_priv(ndev);
204 .driver_data = C_CAN_DEVTYPE, 249
205 }, { 250 if (priv->type != BOSCH_D_CAN) {
206 .name = "d_can", 251 dev_warn(&pdev->dev, "Not supported\n");
207 .driver_data = D_CAN_DEVTYPE, 252 return 0;
208 }, {
209 } 253 }
210}; 254
255 if (netif_running(ndev)) {
256 netif_stop_queue(ndev);
257 netif_device_detach(ndev);
258 }
259
260 ret = c_can_power_down(ndev);
261 if (ret) {
262 netdev_err(ndev, "failed to enter power down mode\n");
263 return ret;
264 }
265
266 priv->can.state = CAN_STATE_SLEEPING;
267
268 return 0;
269}
270
271static int c_can_resume(struct platform_device *pdev)
272{
273 int ret;
274 struct net_device *ndev = platform_get_drvdata(pdev);
275 struct c_can_priv *priv = netdev_priv(ndev);
276
277 if (priv->type != BOSCH_D_CAN) {
278 dev_warn(&pdev->dev, "Not supported\n");
279 return 0;
280 }
281
282 ret = c_can_power_up(ndev);
283 if (ret) {
284 netdev_err(ndev, "Still in power down mode\n");
285 return ret;
286 }
287
288 priv->can.state = CAN_STATE_ERROR_ACTIVE;
289
290 if (netif_running(ndev)) {
291 netif_device_attach(ndev);
292 netif_start_queue(ndev);
293 }
294
295 return 0;
296}
297#else
298#define c_can_suspend NULL
299#define c_can_resume NULL
300#endif
211 301
212static struct platform_driver c_can_plat_driver = { 302static struct platform_driver c_can_plat_driver = {
213 .driver = { 303 .driver = {
214 .name = KBUILD_MODNAME, 304 .name = KBUILD_MODNAME,
215 .owner = THIS_MODULE, 305 .owner = THIS_MODULE,
306 .of_match_table = of_match_ptr(c_can_of_table),
216 }, 307 },
217 .probe = c_can_plat_probe, 308 .probe = c_can_plat_probe,
218 .remove = __devexit_p(c_can_plat_remove), 309 .remove = __devexit_p(c_can_plat_remove),
310 .suspend = c_can_suspend,
311 .resume = c_can_resume,
219 .id_table = c_can_id_table, 312 .id_table = c_can_id_table,
220}; 313};
221 314
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index c5f143165f80..c78ecfca1e45 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,10 @@
144 144
145#define FLEXCAN_MB_CODE_MASK (0xf0ffffff) 145#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
146 146
147/* FLEXCAN hardware feature flags */
148#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
149#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */
150
147/* Structure of the message buffer */ 151/* Structure of the message buffer */
148struct flexcan_mb { 152struct flexcan_mb {
149 u32 can_ctrl; 153 u32 can_ctrl;
@@ -178,7 +182,7 @@ struct flexcan_regs {
178}; 182};
179 183
180struct flexcan_devtype_data { 184struct flexcan_devtype_data {
181 u32 hw_ver; /* hardware controller version */ 185 u32 features; /* hardware controller features */
182}; 186};
183 187
184struct flexcan_priv { 188struct flexcan_priv {
@@ -197,11 +201,11 @@ struct flexcan_priv {
197}; 201};
198 202
199static struct flexcan_devtype_data fsl_p1010_devtype_data = { 203static struct flexcan_devtype_data fsl_p1010_devtype_data = {
200 .hw_ver = 3, 204 .features = FLEXCAN_HAS_BROKEN_ERR_STATE,
201}; 205};
202 206static struct flexcan_devtype_data fsl_imx28_devtype_data;
203static struct flexcan_devtype_data fsl_imx6q_devtype_data = { 207static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
204 .hw_ver = 10, 208 .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
205}; 209};
206 210
207static const struct can_bittiming_const flexcan_bittiming_const = { 211static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -741,15 +745,19 @@ static int flexcan_chip_start(struct net_device *dev)
741 * enable tx and rx warning interrupt 745 * enable tx and rx warning interrupt
742 * enable bus off interrupt 746 * enable bus off interrupt
743 * (== FLEXCAN_CTRL_ERR_STATE) 747 * (== FLEXCAN_CTRL_ERR_STATE)
744 *
745 * _note_: we enable the "error interrupt"
746 * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
747 * warning or bus passive interrupts.
748 */ 748 */
749 reg_ctrl = flexcan_read(&regs->ctrl); 749 reg_ctrl = flexcan_read(&regs->ctrl);
750 reg_ctrl &= ~FLEXCAN_CTRL_TSYN; 750 reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
751 reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | 751 reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
752 FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK; 752 FLEXCAN_CTRL_ERR_STATE;
753 /*
754 * enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
755 * on most Flexcan cores, too. Otherwise we don't get
756 * any error warning or passive interrupts.
757 */
758 if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
759 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
760 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
753 761
754 /* save for later use */ 762 /* save for later use */
755 priv->reg_ctrl_default = reg_ctrl; 763 priv->reg_ctrl_default = reg_ctrl;
@@ -772,7 +780,7 @@ static int flexcan_chip_start(struct net_device *dev)
772 flexcan_write(0x0, &regs->rx14mask); 780 flexcan_write(0x0, &regs->rx14mask);
773 flexcan_write(0x0, &regs->rx15mask); 781 flexcan_write(0x0, &regs->rx15mask);
774 782
775 if (priv->devtype_data->hw_ver >= 10) 783 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
776 flexcan_write(0x0, &regs->rxfgmask); 784 flexcan_write(0x0, &regs->rxfgmask);
777 785
778 flexcan_transceiver_switch(priv, 1); 786 flexcan_transceiver_switch(priv, 1);
@@ -954,6 +962,7 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
954 962
955static const struct of_device_id flexcan_of_match[] = { 963static const struct of_device_id flexcan_of_match[] = {
956 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, 964 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
965 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
957 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 966 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
958 { /* sentinel */ }, 967 { /* sentinel */ },
959}; 968};
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 8a8df82988d1..c975999bb055 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
181 181
182 if (!clock_name || !strcmp(clock_name, "sys")) { 182 if (!clock_name || !strcmp(clock_name, "sys")) {
183 sys_clk = clk_get(&ofdev->dev, "sys_clk"); 183 sys_clk = clk_get(&ofdev->dev, "sys_clk");
184 if (!sys_clk) { 184 if (IS_ERR(sys_clk)) {
185 dev_err(&ofdev->dev, "couldn't get sys_clk\n"); 185 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
186 goto exit_unmap; 186 goto exit_unmap;
187 } 187 }
@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
204 204
205 if (clocksrc < 0) { 205 if (clocksrc < 0) {
206 ref_clk = clk_get(&ofdev->dev, "ref_clk"); 206 ref_clk = clk_get(&ofdev->dev, "ref_clk");
207 if (!ref_clk) { 207 if (IS_ERR(ref_clk)) {
208 dev_err(&ofdev->dev, "couldn't get ref_clk\n"); 208 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
209 goto exit_unmap; 209 goto exit_unmap;
210 } 210 }
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 4c4f33d482d2..25011dbe1b96 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -156,8 +156,13 @@ static void set_normal_mode(struct net_device *dev)
156 } 156 }
157 157
158 /* set chip to normal mode */ 158 /* set chip to normal mode */
159 priv->write_reg(priv, REG_MOD, 0x00); 159 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
160 priv->write_reg(priv, REG_MOD, MOD_LOM);
161 else
162 priv->write_reg(priv, REG_MOD, 0x00);
163
160 udelay(10); 164 udelay(10);
165
161 status = priv->read_reg(priv, REG_MOD); 166 status = priv->read_reg(priv, REG_MOD);
162 } 167 }
163 168
@@ -310,7 +315,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
310 315
311 can_put_echo_skb(skb, dev, 0); 316 can_put_echo_skb(skb, dev, 0);
312 317
313 sja1000_write_cmdreg(priv, CMD_TR); 318 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
319 sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
320 else
321 sja1000_write_cmdreg(priv, CMD_TR);
314 322
315 return NETDEV_TX_OK; 323 return NETDEV_TX_OK;
316} 324}
@@ -505,10 +513,18 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
505 netdev_warn(dev, "wakeup interrupt\n"); 513 netdev_warn(dev, "wakeup interrupt\n");
506 514
507 if (isrc & IRQ_TI) { 515 if (isrc & IRQ_TI) {
508 /* transmission complete interrupt */ 516 /* transmission buffer released */
509 stats->tx_bytes += priv->read_reg(priv, REG_FI) & 0xf; 517 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
510 stats->tx_packets++; 518 !(status & SR_TCS)) {
511 can_get_echo_skb(dev, 0); 519 stats->tx_errors++;
520 can_free_echo_skb(dev, 0);
521 } else {
522 /* transmission complete */
523 stats->tx_bytes +=
524 priv->read_reg(priv, REG_FI) & 0xf;
525 stats->tx_packets++;
526 can_get_echo_skb(dev, 0);
527 }
512 netif_wake_queue(dev); 528 netif_wake_queue(dev);
513 } 529 }
514 if (isrc & IRQ_RI) { 530 if (isrc & IRQ_RI) {
@@ -605,7 +621,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
605 priv->can.do_set_mode = sja1000_set_mode; 621 priv->can.do_set_mode = sja1000_set_mode;
606 priv->can.do_get_berr_counter = sja1000_get_berr_counter; 622 priv->can.do_get_berr_counter = sja1000_get_berr_counter;
607 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 623 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
608 CAN_CTRLMODE_BERR_REPORTING; 624 CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
625 CAN_CTRLMODE_ONE_SHOT;
609 626
610 spin_lock_init(&priv->cmdreg_lock); 627 spin_lock_init(&priv->cmdreg_lock);
611 628
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d2f91f737871..c4643c400d46 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -53,7 +53,7 @@ static struct peak_usb_adapter *peak_usb_adapters_list[] = {
53 * dump memory 53 * dump memory
54 */ 54 */
55#define DUMP_WIDTH 16 55#define DUMP_WIDTH 16
56void dump_mem(char *prompt, void *p, int l) 56void pcan_dump_mem(char *prompt, void *p, int l)
57{ 57{
58 pr_info("%s dumping %s (%d bytes):\n", 58 pr_info("%s dumping %s (%d bytes):\n",
59 PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); 59 PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -203,9 +203,9 @@ static void peak_usb_read_bulk_callback(struct urb *urb)
203 if (dev->state & PCAN_USB_STATE_STARTED) { 203 if (dev->state & PCAN_USB_STATE_STARTED) {
204 err = dev->adapter->dev_decode_buf(dev, urb); 204 err = dev->adapter->dev_decode_buf(dev, urb);
205 if (err) 205 if (err)
206 dump_mem("received usb message", 206 pcan_dump_mem("received usb message",
207 urb->transfer_buffer, 207 urb->transfer_buffer,
208 urb->transfer_buffer_length); 208 urb->transfer_buffer_length);
209 } 209 }
210 } 210 }
211 211
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 4c775b620be2..c8e5e91d7cb5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -131,7 +131,7 @@ struct peak_usb_device {
131 struct peak_usb_device *next_siblings; 131 struct peak_usb_device *next_siblings;
132}; 132};
133 133
134void dump_mem(char *prompt, void *p, int l); 134void pcan_dump_mem(char *prompt, void *p, int l);
135 135
136/* common timestamp management */ 136/* common timestamp management */
137void peak_usb_init_time_ref(struct peak_time_ref *time_ref, 137void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 629c4ba5d49d..e1626d92511a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -292,8 +292,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
292 if (!rec_len) { 292 if (!rec_len) {
293 netdev_err(dev->netdev, 293 netdev_err(dev->netdev,
294 "got unprocessed record in msg\n"); 294 "got unprocessed record in msg\n");
295 dump_mem("rcvd rsp msg", pum->u.rec_buffer, 295 pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer,
296 actual_length); 296 actual_length);
297 break; 297 break;
298 } 298 }
299 299
@@ -756,8 +756,8 @@ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb)
756 756
757fail: 757fail:
758 if (err) 758 if (err)
759 dump_mem("received msg", 759 pcan_dump_mem("received msg",
760 urb->transfer_buffer, urb->actual_length); 760 urb->transfer_buffer, urb->actual_length);
761 761
762 return err; 762 return err;
763} 763}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index a11af5cc4844..e4ff38949112 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -89,15 +89,6 @@ source "drivers/net/ethernet/marvell/Kconfig"
89source "drivers/net/ethernet/mellanox/Kconfig" 89source "drivers/net/ethernet/mellanox/Kconfig"
90source "drivers/net/ethernet/micrel/Kconfig" 90source "drivers/net/ethernet/micrel/Kconfig"
91source "drivers/net/ethernet/microchip/Kconfig" 91source "drivers/net/ethernet/microchip/Kconfig"
92
93config MIPS_SIM_NET
94 tristate "MIPS simulator Network device"
95 depends on MIPS_SIM
96 ---help---
97 The MIPSNET device is a simple Ethernet network device which is
98 emulated by the MIPS Simulator.
99 If you are not using a MIPSsim or are unsure, say N.
100
101source "drivers/net/ethernet/myricom/Kconfig" 92source "drivers/net/ethernet/myricom/Kconfig"
102 93
103config FEALNX 94config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 878ad32b93f2..d4473072654a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
40obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ 40obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
41obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ 41obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
42obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ 42obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
43obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
44obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ 43obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
45obj-$(CONFIG_FEALNX) += fealnx.o 44obj-$(CONFIG_FEALNX) += fealnx.o
46obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ 45obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f15e72e81ac4..4bd416b72e65 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -101,6 +101,7 @@ config TIGON3
101 tristate "Broadcom Tigon3 support" 101 tristate "Broadcom Tigon3 support"
102 depends on PCI 102 depends on PCI
103 select PHYLIB 103 select PHYLIB
104 select HWMON
104 ---help--- 105 ---help---
105 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 106 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
106 107
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index eac25236856c..72897c47b8c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.51-0" 26#define DRV_MODULE_VERSION "1.78.00-0"
27#define DRV_MODULE_RELDATE "2012/06/18" 27#define DRV_MODULE_RELDATE "2012/09/27"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e97a7d1d06..30f04a389227 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2285,7 +2285,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2285 /* Wait for all pending SP commands to complete */ 2285 /* Wait for all pending SP commands to complete */
2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { 2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2287 BNX2X_ERR("Timeout waiting for SP elements to complete\n"); 2287 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2288 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 2288 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2289 return -EBUSY; 2289 return -EBUSY;
2290 } 2290 }
2291 2291
@@ -2333,7 +2333,7 @@ load_error0:
2333} 2333}
2334 2334
2335/* must be called with rtnl_lock */ 2335/* must be called with rtnl_lock */
2336int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 2336int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2337{ 2337{
2338 int i; 2338 int i;
2339 bool global = false; 2339 bool global = false;
@@ -2395,7 +2395,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2395 2395
2396 /* Cleanup the chip if needed */ 2396 /* Cleanup the chip if needed */
2397 if (unload_mode != UNLOAD_RECOVERY) 2397 if (unload_mode != UNLOAD_RECOVERY)
2398 bnx2x_chip_cleanup(bp, unload_mode); 2398 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2399 else { 2399 else {
2400 /* Send the UNLOAD_REQUEST to the MCP */ 2400 /* Send the UNLOAD_REQUEST to the MCP */
2401 bnx2x_send_unload_req(bp, unload_mode); 2401 bnx2x_send_unload_req(bp, unload_mode);
@@ -2419,7 +2419,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2419 bnx2x_free_irq(bp); 2419 bnx2x_free_irq(bp);
2420 2420
2421 /* Report UNLOAD_DONE to MCP */ 2421 /* Report UNLOAD_DONE to MCP */
2422 bnx2x_send_unload_done(bp); 2422 bnx2x_send_unload_done(bp, false);
2423 } 2423 }
2424 2424
2425 /* 2425 /*
@@ -3026,8 +3026,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3026 first_bd = tx_start_bd; 3026 first_bd = tx_start_bd;
3027 3027
3028 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3028 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3029 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, 3029 SET_FLAG(tx_start_bd->general_data,
3030 mac_type); 3030 ETH_TX_START_BD_PARSE_NBDS,
3031 0);
3031 3032
3032 /* header nbd */ 3033 /* header nbd */
3033 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); 3034 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
@@ -3077,13 +3078,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3077 &pbd_e2->dst_mac_addr_lo, 3078 &pbd_e2->dst_mac_addr_lo,
3078 eth->h_dest); 3079 eth->h_dest);
3079 } 3080 }
3081
3082 SET_FLAG(pbd_e2_parsing_data,
3083 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3080 } else { 3084 } else {
3085 u16 global_data = 0;
3081 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 3086 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3082 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 3087 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3083 /* Set PBD in checksum offload case */ 3088 /* Set PBD in checksum offload case */
3084 if (xmit_type & XMIT_CSUM) 3089 if (xmit_type & XMIT_CSUM)
3085 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); 3090 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3086 3091
3092 SET_FLAG(global_data,
3093 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3094 pbd_e1x->global_data |= cpu_to_le16(global_data);
3087 } 3095 }
3088 3096
3089 /* Setup the data pointer of the first BD of the packet */ 3097 /* Setup the data pointer of the first BD of the packet */
@@ -3770,7 +3778,7 @@ int bnx2x_reload_if_running(struct net_device *dev)
3770 if (unlikely(!netif_running(dev))) 3778 if (unlikely(!netif_running(dev)))
3771 return 0; 3779 return 0;
3772 3780
3773 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 3781 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3774 return bnx2x_nic_load(bp, LOAD_NORMAL); 3782 return bnx2x_nic_load(bp, LOAD_NORMAL);
3775} 3783}
3776 3784
@@ -3967,7 +3975,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3967 3975
3968 netif_device_detach(dev); 3976 netif_device_detach(dev);
3969 3977
3970 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 3978 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
3971 3979
3972 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 3980 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3973 3981
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfd86a55f1dc..9c5ea6c5b4c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
84 * 84 *
85 * @bp: driver handle 85 * @bp: driver handle
86 * @keep_link: true iff link should be kept up
86 */ 87 */
87void bnx2x_send_unload_done(struct bnx2x *bp); 88void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
88 89
89/** 90/**
90 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 91 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
@@ -153,6 +154,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
153void bnx2x_link_set(struct bnx2x *bp); 154void bnx2x_link_set(struct bnx2x *bp);
154 155
155/** 156/**
157 * bnx2x_force_link_reset - Forces link reset, and put the PHY
158 * in reset as well.
159 *
160 * @bp: driver handle
161 */
162void bnx2x_force_link_reset(struct bnx2x *bp);
163
164/**
156 * bnx2x_link_test - query link status. 165 * bnx2x_link_test - query link status.
157 * 166 *
158 * @bp: driver handle 167 * @bp: driver handle
@@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp);
312 * 321 *
313 * @bp: driver handle 322 * @bp: driver handle
314 * @unload_mode: COMMON, PORT, FUNCTION 323 * @unload_mode: COMMON, PORT, FUNCTION
324 * @keep_link: true iff link should be kept up.
315 * 325 *
316 * - Cleanup MAC configuration. 326 * - Cleanup MAC configuration.
317 * - Closes clients. 327 * - Closes clients.
318 * - etc. 328 * - etc.
319 */ 329 */
320void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 330void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
321 331
322/** 332/**
323 * bnx2x_acquire_hw_lock - acquire HW lock. 333 * bnx2x_acquire_hw_lock - acquire HW lock.
@@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
446bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); 456bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
447 457
448/* dev_close main block */ 458/* dev_close main block */
449int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 459int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
450 460
451/* dev_open main block */ 461/* dev_open main block */
452int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 462int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 8a73374e52a7..2245c3895409 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -91,25 +91,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
91 /* 91 /*
92 * Rx COS configuration 92 * Rx COS configuration
93 * Changing PFC RX configuration . 93 * Changing PFC RX configuration .
94 * In RX COS0 will always be configured to lossy and COS1 to lossless 94 * In RX COS0 will always be configured to lossless and COS1 to lossy
95 */ 95 */
96 for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { 96 for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
97 pri_bit = 1 << i; 97 pri_bit = 1 << i;
98 98
99 if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) 99 if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
100 val |= 1 << (i * 4); 100 val |= 1 << (i * 4);
101 } 101 }
102 102
103 pfc_params.pkt_priority_to_cos = val; 103 pfc_params.pkt_priority_to_cos = val;
104 104
105 /* RX COS0 */ 105 /* RX COS0 */
106 pfc_params.llfc_low_priority_classes = 0; 106 pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
107 /* RX COS1 */ 107 /* RX COS1 */
108 pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); 108 pfc_params.llfc_high_priority_classes = 0;
109
110 /* BRB configuration */
111 pfc_params.cos0_pauseable = false;
112 pfc_params.cos1_pauseable = true;
113 109
114 bnx2x_acquire_phy_lock(bp); 110 bnx2x_acquire_phy_lock(bp);
115 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; 111 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ebf40cd7aa10..c65295dded39 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev)
905 905
906 if (netif_running(dev)) { 906 if (netif_running(dev)) {
907 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 907 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
908 bnx2x_force_link_reset(bp);
908 bnx2x_link_set(bp); 909 bnx2x_link_set(bp);
909 } 910 }
910 911
@@ -1606,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1606 return 0; 1607 return 0;
1607} 1608}
1608 1609
1609static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = { 1610static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
1610 "register_test (offline) ", 1611 "register_test (offline) ",
1611 "memory_test (offline) ", 1612 "memory_test (offline) ",
1612 "int_loopback_test (offline)", 1613 "int_loopback_test (offline)",
@@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1653 return -EOPNOTSUPP; 1654 return -EOPNOTSUPP;
1654 } 1655 }
1655 1656
1656 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); 1657 eee_cfg = bp->link_vars.eee_status;
1657 1658
1658 edata->supported = 1659 edata->supported =
1659 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> 1660 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
@@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1690 return -EOPNOTSUPP; 1691 return -EOPNOTSUPP;
1691 } 1692 }
1692 1693
1693 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); 1694 eee_cfg = bp->link_vars.eee_status;
1694 1695
1695 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { 1696 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
1696 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); 1697 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
@@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1739 /* Restart link to propogate changes */ 1740 /* Restart link to propogate changes */
1740 if (netif_running(dev)) { 1741 if (netif_running(dev)) {
1741 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1742 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1743 bnx2x_force_link_reset(bp);
1742 bnx2x_link_set(bp); 1744 bnx2x_link_set(bp);
1743 } 1745 }
1744 1746
@@ -2038,8 +2040,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
2038 u16 pkt_prod, bd_prod; 2040 u16 pkt_prod, bd_prod;
2039 struct sw_tx_bd *tx_buf; 2041 struct sw_tx_bd *tx_buf;
2040 struct eth_tx_start_bd *tx_start_bd; 2042 struct eth_tx_start_bd *tx_start_bd;
2041 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2042 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2043 dma_addr_t mapping; 2043 dma_addr_t mapping;
2044 union eth_rx_cqe *cqe; 2044 union eth_rx_cqe *cqe;
2045 u8 cqe_fp_flags, cqe_fp_type; 2045 u8 cqe_fp_flags, cqe_fp_type;
@@ -2131,21 +2131,32 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
2131 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 2131 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2132 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2132 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2133 SET_FLAG(tx_start_bd->general_data, 2133 SET_FLAG(tx_start_bd->general_data,
2134 ETH_TX_START_BD_ETH_ADDR_TYPE,
2135 UNICAST_ADDRESS);
2136 SET_FLAG(tx_start_bd->general_data,
2137 ETH_TX_START_BD_HDR_NBDS, 2134 ETH_TX_START_BD_HDR_NBDS,
2138 1); 2135 1);
2136 SET_FLAG(tx_start_bd->general_data,
2137 ETH_TX_START_BD_PARSE_NBDS,
2138 0);
2139 2139
2140 /* turn on parsing and get a BD */ 2140 /* turn on parsing and get a BD */
2141 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2141 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2142 2142
2143 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 2143 if (CHIP_IS_E1x(bp)) {
2144 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 2144 u16 global_data = 0;
2145 2145 struct eth_tx_parse_bd_e1x *pbd_e1x =
2146 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2146 &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2147 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2147 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2148 2148 SET_FLAG(global_data,
2149 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2150 pbd_e1x->global_data = cpu_to_le16(global_data);
2151 } else {
2152 u32 parsing_data = 0;
2153 struct eth_tx_parse_bd_e2 *pbd_e2 =
2154 &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2155 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2156 SET_FLAG(parsing_data,
2157 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2158 pbd_e2->parsing_data = cpu_to_le32(parsing_data);
2159 }
2149 wmb(); 2160 wmb();
2150 2161
2151 txdata->tx_db.data.prod += 2; 2162 txdata->tx_db.data.prod += 2;
@@ -2263,7 +2274,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2263 if (!netif_running(bp->dev)) 2274 if (!netif_running(bp->dev))
2264 return BNX2X_EXT_LOOPBACK_FAILED; 2275 return BNX2X_EXT_LOOPBACK_FAILED;
2265 2276
2266 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2277 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2267 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); 2278 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2268 if (rc) { 2279 if (rc) {
2269 DP(BNX2X_MSG_ETHTOOL, 2280 DP(BNX2X_MSG_ETHTOOL,
@@ -2414,7 +2425,7 @@ static void bnx2x_self_test(struct net_device *dev,
2414 2425
2415 link_up = bp->link_vars.link_up; 2426 link_up = bp->link_vars.link_up;
2416 2427
2417 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2428 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2418 rc = bnx2x_nic_load(bp, LOAD_DIAG); 2429 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2419 if (rc) { 2430 if (rc) {
2420 etest->flags |= ETH_TEST_FL_FAILED; 2431 etest->flags |= ETH_TEST_FL_FAILED;
@@ -2446,7 +2457,7 @@ static void bnx2x_self_test(struct net_device *dev,
2446 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 2457 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2447 } 2458 }
2448 2459
2449 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2460 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2450 2461
2451 /* restore input for TX port IF */ 2462 /* restore input for TX port IF */
2452 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); 2463 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
@@ -2534,7 +2545,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2534static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 2545static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2535{ 2546{
2536 struct bnx2x *bp = netdev_priv(dev); 2547 struct bnx2x *bp = netdev_priv(dev);
2537 int i, j, k, offset, start; 2548 int i, j, k, start;
2538 char queue_name[MAX_QUEUE_NAME_LEN+1]; 2549 char queue_name[MAX_QUEUE_NAME_LEN+1];
2539 2550
2540 switch (stringset) { 2551 switch (stringset) {
@@ -2570,13 +2581,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2570 start = 0; 2581 start = 0;
2571 else 2582 else
2572 start = 4; 2583 start = 4;
2573 for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp)); 2584 memcpy(buf, bnx2x_tests_str_arr + start,
2574 i++, j++) { 2585 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
2575 offset = sprintf(buf+32*i, "%s",
2576 bnx2x_tests_str_arr[j]);
2577 *(buf+offset) = '\0';
2578 }
2579 break;
2580 } 2586 }
2581} 2587}
2582 2588
@@ -2940,7 +2946,7 @@ static int bnx2x_set_channels(struct net_device *dev,
2940 bnx2x_change_num_queues(bp, channels->combined_count); 2946 bnx2x_change_num_queues(bp, channels->combined_count);
2941 return 0; 2947 return 0;
2942 } 2948 }
2943 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2949 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
2944 bnx2x_change_num_queues(bp, channels->combined_count); 2950 bnx2x_change_num_queues(bp, channels->combined_count);
2945 return bnx2x_nic_load(bp, LOAD_NORMAL); 2951 return bnx2x_nic_load(bp, LOAD_NORMAL);
2946} 2952}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index bbc66ced9c25..620fe939ecfd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -88,9 +88,6 @@
88#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) 88#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
89#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 89#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
90 (IRO[101].base + ((assertListEntry) * IRO[101].m1)) 90 (IRO[101].base + ((assertListEntry) * IRO[101].m1))
91#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
92#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
93 (IRO[108].base)
94#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ 91#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
95 (IRO[201].base + ((pfId) * IRO[201].m1)) 92 (IRO[201].base + ((pfId) * IRO[201].m1))
96#define TSTORM_FUNC_EN_OFFSET(funcId) \ 93#define TSTORM_FUNC_EN_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 76b6e65790f8..18704929e642 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1286,6 +1286,9 @@ struct drv_func_mb {
1286 #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 1286 #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
1287 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 1287 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
1288 1288
1289 #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
1290
1291 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
1289 u32 fw_mb_header; 1292 u32 fw_mb_header;
1290 #define FW_MSG_CODE_MASK 0xffff0000 1293 #define FW_MSG_CODE_MASK 0xffff0000
1291 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 1294 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
@@ -1909,6 +1912,54 @@ struct lldp_local_mib {
1909}; 1912};
1910/***END OF DCBX STRUCTURES DECLARATIONS***/ 1913/***END OF DCBX STRUCTURES DECLARATIONS***/
1911 1914
1915/***********************************************************/
1916/* Elink section */
1917/***********************************************************/
1918#define SHMEM_LINK_CONFIG_SIZE 2
1919struct shmem_lfa {
1920 u32 req_duplex;
1921 #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
1922 #define REQ_DUPLEX_PHY0_SHIFT 0
1923 #define REQ_DUPLEX_PHY1_MASK 0xffff0000
1924 #define REQ_DUPLEX_PHY1_SHIFT 16
1925 u32 req_flow_ctrl;
1926 #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
1927 #define REQ_FLOW_CTRL_PHY0_SHIFT 0
1928 #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
1929 #define REQ_FLOW_CTRL_PHY1_SHIFT 16
1930 u32 req_line_speed; /* Also determine AutoNeg */
1931 #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
1932 #define REQ_LINE_SPD_PHY0_SHIFT 0
1933 #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
1934 #define REQ_LINE_SPD_PHY1_SHIFT 16
1935 u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
1936 u32 additional_config;
1937 #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
1938 #define REQ_FC_AUTO_ADV0_SHIFT 0
1939 #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
1940 u32 lfa_sts;
1941 #define LFA_LINK_FLAP_REASON_OFFSET 0
1942 #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
1943 #define LFA_LINK_DOWN 0x1
1944 #define LFA_LOOPBACK_ENABLED 0x2
1945 #define LFA_DUPLEX_MISMATCH 0x3
1946 #define LFA_MFW_IS_TOO_OLD 0x4
1947 #define LFA_LINK_SPEED_MISMATCH 0x5
1948 #define LFA_FLOW_CTRL_MISMATCH 0x6
1949 #define LFA_SPEED_CAP_MISMATCH 0x7
1950 #define LFA_DCC_LFA_DISABLED 0x8
1951 #define LFA_EEE_MISMATCH 0x9
1952
1953 #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
1954 #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
1955
1956 #define LINK_FLAP_COUNT_OFFSET 16
1957 #define LINK_FLAP_COUNT_MASK 0x00ff0000
1958
1959 #define LFA_FLAGS_MASK 0xff000000
1960 #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
1961};
1962
1912struct ncsi_oem_fcoe_features { 1963struct ncsi_oem_fcoe_features {
1913 u32 fcoe_features1; 1964 u32 fcoe_features1;
1914 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF 1965 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2738,8 +2789,8 @@ struct afex_stats {
2738}; 2789};
2739 2790
2740#define BCM_5710_FW_MAJOR_VERSION 7 2791#define BCM_5710_FW_MAJOR_VERSION 7
2741#define BCM_5710_FW_MINOR_VERSION 2 2792#define BCM_5710_FW_MINOR_VERSION 8
2742#define BCM_5710_FW_REVISION_VERSION 51 2793#define BCM_5710_FW_REVISION_VERSION 2
2743#define BCM_5710_FW_ENGINEERING_VERSION 0 2794#define BCM_5710_FW_ENGINEERING_VERSION 0
2744#define BCM_5710_FW_COMPILE_FLAGS 1 2795#define BCM_5710_FW_COMPILE_FLAGS 1
2745 2796
@@ -3861,10 +3912,8 @@ struct eth_rss_update_ramrod_data {
3861#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 3912#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
3862#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) 3913#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
3863#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 3914#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
3864#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) 3915#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
3865#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 3916#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
3866#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
3867#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
3868 u8 rss_result_mask; 3917 u8 rss_result_mask;
3869 u8 rss_mode; 3918 u8 rss_mode;
3870 __le32 __reserved2; 3919 __le32 __reserved2;
@@ -4080,27 +4129,29 @@ struct eth_tx_start_bd {
4080#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 4129#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
4081#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) 4130#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
4082#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 4131#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
4083#define ETH_TX_START_BD_RESREVED (0x1<<5) 4132#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
4084#define ETH_TX_START_BD_RESREVED_SHIFT 5 4133#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
4085#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) 4134#define ETH_TX_START_BD_RESREVED (0x1<<7)
4086#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 4135#define ETH_TX_START_BD_RESREVED_SHIFT 7
4087}; 4136};
4088 4137
4089/* 4138/*
4090 * Tx parsing BD structure for ETH E1/E1h 4139 * Tx parsing BD structure for ETH E1/E1h
4091 */ 4140 */
4092struct eth_tx_parse_bd_e1x { 4141struct eth_tx_parse_bd_e1x {
4093 u8 global_data; 4142 __le16 global_data;
4094#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) 4143#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
4095#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 4144#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
4096#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) 4145#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
4097#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 4146#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
4098#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) 4147#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
4099#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 4148#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
4100#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) 4149#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
4101#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 4150#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
4102#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) 4151#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
4103#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 4152#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
4153#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
4154#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
4104 u8 tcp_flags; 4155 u8 tcp_flags;
4105#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) 4156#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
4106#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 4157#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
@@ -4119,7 +4170,6 @@ struct eth_tx_parse_bd_e1x {
4119#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) 4170#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
4120#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 4171#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
4121 u8 ip_hlen_w; 4172 u8 ip_hlen_w;
4122 s8 reserved;
4123 __le16 total_hlen_w; 4173 __le16 total_hlen_w;
4124 __le16 tcp_pseudo_csum; 4174 __le16 tcp_pseudo_csum;
4125 __le16 lso_mss; 4175 __le16 lso_mss;
@@ -4138,14 +4188,16 @@ struct eth_tx_parse_bd_e2 {
4138 __le16 src_mac_addr_mid; 4188 __le16 src_mac_addr_mid;
4139 __le16 src_mac_addr_hi; 4189 __le16 src_mac_addr_hi;
4140 __le32 parsing_data; 4190 __le32 parsing_data;
4141#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) 4191#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
4142#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 4192#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
4143#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) 4193#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
4144#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 4194#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
4145#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) 4195#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
4146#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 4196#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
4147#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) 4197#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
4148#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 4198#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
4199#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
4200#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
4149}; 4201};
4150 4202
4151/* 4203/*
@@ -4913,7 +4965,8 @@ struct flow_control_configuration {
4913 * 4965 *
4914 */ 4966 */
4915struct function_start_data { 4967struct function_start_data {
4916 __le16 function_mode; 4968 u8 function_mode;
4969 u8 reserved;
4917 __le16 sd_vlan_tag; 4970 __le16 sd_vlan_tag;
4918 __le16 vif_id; 4971 __le16 vif_id;
4919 u8 path_id; 4972 u8 path_id;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 559c396d45cc..c8f10f0e8a0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -566,7 +566,7 @@ static const struct {
566 u32 e2; /* 57712 */ 566 u32 e2; /* 57712 */
567 u32 e3; /* 578xx */ 567 u32 e3; /* 578xx */
568 } reg_mask; /* Register mask (all valid bits) */ 568 } reg_mask; /* Register mask (all valid bits) */
569 char name[7]; /* Block's longest name is 6 characters long 569 char name[8]; /* Block's longest name is 7 characters long
570 * (name + suffix) 570 * (name + suffix)
571 */ 571 */
572} bnx2x_blocks_parity_data[] = { 572} bnx2x_blocks_parity_data[] = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b046beb435b2..e2e45ee5df33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -161,120 +161,6 @@
161#define EDC_MODE_LIMITING 0x0044 161#define EDC_MODE_LIMITING 0x0044
162#define EDC_MODE_PASSIVE_DAC 0x0055 162#define EDC_MODE_PASSIVE_DAC 0x0055
163 163
164/* BRB default for class 0 E2 */
165#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
166#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
167#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
168#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
169
170/* BRB thresholds for E2*/
171#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
172#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
173
174#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
175#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
176
177#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
178#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
179
180#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
181#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
182
183/* BRB default for class 0 E3A0 */
184#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
185#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
186#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
187#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
188
189/* BRB thresholds for E3A0 */
190#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
191#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
192
193#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
194#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
195
196#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
197#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
198
199#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
200#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
201
202/* BRB default for E3B0 */
203#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
204#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
205#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
206#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
207
208/* BRB thresholds for E3B0 2 port mode*/
209#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
210#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
211
212#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
213#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
214
215#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
216#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
217
218#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
219#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
220
221/* only for E3B0*/
222#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
223#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
224
225/* Lossy +Lossless GUARANTIED == GUART */
226#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
227/* Lossless +Lossless*/
228#define PFC_E3B0_2P_PAUSE_LB_GUART 236
229/* Lossy +Lossy*/
230#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
231
232/* Lossy +Lossless*/
233#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
234/* Lossless +Lossless*/
235#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
236/* Lossy +Lossy*/
237#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
238#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
239
240#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
241#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
242
243/* BRB thresholds for E3B0 4 port mode */
244#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
245#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
246
247#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
248#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
249
250#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
251#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
252
253#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
254#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
255
256/* only for E3B0*/
257#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
258#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
259#define PFC_E3B0_4P_LB_GUART 120
260
261#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
262#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
263
264#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
265#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
266
267/* Pause defines*/
268#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
269#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
270#define DEFAULT_E3B0_LB_GUART 40
271
272#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
273#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
274
275#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
276#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
277
278/* ETS defines*/ 164/* ETS defines*/
279#define DCBX_INVALID_COS (0xFF) 165#define DCBX_INVALID_COS (0xFF)
280 166
@@ -321,6 +207,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
321 return val; 207 return val;
322} 208}
323 209
210/*
211 * bnx2x_check_lfa - This function checks if link reinitialization is required,
212 * or link flap can be avoided.
213 *
214 * @params: link parameters
215 * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
216 * condition code.
217 */
218static int bnx2x_check_lfa(struct link_params *params)
219{
220 u32 link_status, cfg_idx, lfa_mask, cfg_size;
221 u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
222 u32 saved_val, req_val, eee_status;
223 struct bnx2x *bp = params->bp;
224
225 additional_config =
226 REG_RD(bp, params->lfa_base +
227 offsetof(struct shmem_lfa, additional_config));
228
229 /* NOTE: must be first condition checked -
230 * to verify DCC bit is cleared in any case!
231 */
232 if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
233 DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
234 REG_WR(bp, params->lfa_base +
235 offsetof(struct shmem_lfa, additional_config),
236 additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
237 return LFA_DCC_LFA_DISABLED;
238 }
239
240 /* Verify that link is up */
241 link_status = REG_RD(bp, params->shmem_base +
242 offsetof(struct shmem_region,
243 port_mb[params->port].link_status));
244 if (!(link_status & LINK_STATUS_LINK_UP))
245 return LFA_LINK_DOWN;
246
247 /* Verify that loopback mode is not set */
248 if (params->loopback_mode)
249 return LFA_LOOPBACK_ENABLED;
250
251 /* Verify that MFW supports LFA */
252 if (!params->lfa_base)
253 return LFA_MFW_IS_TOO_OLD;
254
255 if (params->num_phys == 3) {
256 cfg_size = 2;
257 lfa_mask = 0xffffffff;
258 } else {
259 cfg_size = 1;
260 lfa_mask = 0xffff;
261 }
262
263 /* Compare Duplex */
264 saved_val = REG_RD(bp, params->lfa_base +
265 offsetof(struct shmem_lfa, req_duplex));
266 req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
267 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
268 DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
269 (saved_val & lfa_mask), (req_val & lfa_mask));
270 return LFA_DUPLEX_MISMATCH;
271 }
272 /* Compare Flow Control */
273 saved_val = REG_RD(bp, params->lfa_base +
274 offsetof(struct shmem_lfa, req_flow_ctrl));
275 req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
276 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
277 DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
278 (saved_val & lfa_mask), (req_val & lfa_mask));
279 return LFA_FLOW_CTRL_MISMATCH;
280 }
281 /* Compare Link Speed */
282 saved_val = REG_RD(bp, params->lfa_base +
283 offsetof(struct shmem_lfa, req_line_speed));
284 req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
285 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
286 DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
287 (saved_val & lfa_mask), (req_val & lfa_mask));
288 return LFA_LINK_SPEED_MISMATCH;
289 }
290
291 for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
292 cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
293 offsetof(struct shmem_lfa,
294 speed_cap_mask[cfg_idx]));
295
296 if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
297 DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
298 cur_speed_cap_mask,
299 params->speed_cap_mask[cfg_idx]);
300 return LFA_SPEED_CAP_MISMATCH;
301 }
302 }
303
304 cur_req_fc_auto_adv =
305 REG_RD(bp, params->lfa_base +
306 offsetof(struct shmem_lfa, additional_config)) &
307 REQ_FC_AUTO_ADV_MASK;
308
309 if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
310 DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
311 cur_req_fc_auto_adv, params->req_fc_auto_adv);
312 return LFA_FLOW_CTRL_MISMATCH;
313 }
314
315 eee_status = REG_RD(bp, params->shmem2_base +
316 offsetof(struct shmem2_region,
317 eee_status[params->port]));
318
319 if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
320 (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
321 ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
322 (params->eee_mode & EEE_MODE_ADV_LPI))) {
323 DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
324 eee_status);
325 return LFA_EEE_MISMATCH;
326 }
327
328 /* LFA conditions are met */
329 return 0;
330}
324/******************************************************************/ 331/******************************************************************/
325/* EPIO/GPIO section */ 332/* EPIO/GPIO section */
326/******************************************************************/ 333/******************************************************************/
@@ -1307,93 +1314,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1307} 1314}
1308 1315
1309/******************************************************************/ 1316/******************************************************************/
1310/* EEE section */
1311/******************************************************************/
1312static u8 bnx2x_eee_has_cap(struct link_params *params)
1313{
1314 struct bnx2x *bp = params->bp;
1315
1316 if (REG_RD(bp, params->shmem2_base) <=
1317 offsetof(struct shmem2_region, eee_status[params->port]))
1318 return 0;
1319
1320 return 1;
1321}
1322
1323static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
1324{
1325 switch (nvram_mode) {
1326 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
1327 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
1328 break;
1329 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
1330 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
1331 break;
1332 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
1333 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
1334 break;
1335 default:
1336 *idle_timer = 0;
1337 break;
1338 }
1339
1340 return 0;
1341}
1342
1343static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
1344{
1345 switch (idle_timer) {
1346 case EEE_MODE_NVRAM_BALANCED_TIME:
1347 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
1348 break;
1349 case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
1350 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
1351 break;
1352 case EEE_MODE_NVRAM_LATENCY_TIME:
1353 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
1354 break;
1355 default:
1356 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
1357 break;
1358 }
1359
1360 return 0;
1361}
1362
1363static u32 bnx2x_eee_calc_timer(struct link_params *params)
1364{
1365 u32 eee_mode, eee_idle;
1366 struct bnx2x *bp = params->bp;
1367
1368 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
1369 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
1370 /* time value in eee_mode --> used directly*/
1371 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
1372 } else {
1373 /* hsi value in eee_mode --> time */
1374 if (bnx2x_eee_nvram_to_time(params->eee_mode &
1375 EEE_MODE_NVRAM_MASK,
1376 &eee_idle))
1377 return 0;
1378 }
1379 } else {
1380 /* hsi values in nvram --> time*/
1381 eee_mode = ((REG_RD(bp, params->shmem_base +
1382 offsetof(struct shmem_region, dev_info.
1383 port_feature_config[params->port].
1384 eee_power_mode)) &
1385 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
1386 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
1387
1388 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
1389 return 0;
1390 }
1391
1392 return eee_idle;
1393}
1394
1395
1396/******************************************************************/
1397/* PFC section */ 1317/* PFC section */
1398/******************************************************************/ 1318/******************************************************************/
1399static void bnx2x_update_pfc_xmac(struct link_params *params, 1319static void bnx2x_update_pfc_xmac(struct link_params *params,
@@ -1606,16 +1526,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
1606 NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); 1526 NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
1607} 1527}
1608 1528
1609static void bnx2x_umac_disable(struct link_params *params) 1529static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
1610{ 1530{
1611 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 1531 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1532 u32 val;
1612 struct bnx2x *bp = params->bp; 1533 struct bnx2x *bp = params->bp;
1613 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & 1534 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
1614 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) 1535 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
1615 return; 1536 return;
1616 1537 val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
1538 if (en)
1539 val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
1540 UMAC_COMMAND_CONFIG_REG_RX_ENA);
1541 else
1542 val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
1543 UMAC_COMMAND_CONFIG_REG_RX_ENA);
1617 /* Disable RX and TX */ 1544 /* Disable RX and TX */
1618 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0); 1545 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1619} 1546}
1620 1547
1621static void bnx2x_umac_enable(struct link_params *params, 1548static void bnx2x_umac_enable(struct link_params *params,
@@ -1671,6 +1598,16 @@ static void bnx2x_umac_enable(struct link_params *params,
1671 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1598 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1672 udelay(50); 1599 udelay(50);
1673 1600
1601 /* Configure UMAC for EEE */
1602 if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
1603 DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
1604 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
1605 UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
1606 REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
1607 } else {
1608 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
1609 }
1610
1674 /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ 1611 /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
1675 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, 1612 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
1676 ((params->mac_addr[2] << 24) | 1613 ((params->mac_addr[2] << 24) |
@@ -1766,11 +1703,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1766 1703
1767} 1704}
1768 1705
1769static void bnx2x_xmac_disable(struct link_params *params) 1706static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
1770{ 1707{
1771 u8 port = params->port; 1708 u8 port = params->port;
1772 struct bnx2x *bp = params->bp; 1709 struct bnx2x *bp = params->bp;
1773 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 1710 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1711 u32 val;
1774 1712
1775 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1713 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1776 MISC_REGISTERS_RESET_REG_2_XMAC) { 1714 MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -1784,7 +1722,12 @@ static void bnx2x_xmac_disable(struct link_params *params)
1784 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, 1722 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1785 (pfc_ctrl | (1<<1))); 1723 (pfc_ctrl | (1<<1)));
1786 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); 1724 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
1787 REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); 1725 val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
1726 if (en)
1727 val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
1728 else
1729 val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1788 } 1731 }
1789} 1732}
1790 1733
@@ -2087,391 +2030,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
2087 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); 2030 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
2088} 2031}
2089 2032
2090/* PFC BRB internal port configuration params */
2091struct bnx2x_pfc_brb_threshold_val {
2092 u32 pause_xoff;
2093 u32 pause_xon;
2094 u32 full_xoff;
2095 u32 full_xon;
2096};
2097
2098struct bnx2x_pfc_brb_e3b0_val {
2099 u32 per_class_guaranty_mode;
2100 u32 lb_guarantied_hyst;
2101 u32 full_lb_xoff_th;
2102 u32 full_lb_xon_threshold;
2103 u32 lb_guarantied;
2104 u32 mac_0_class_t_guarantied;
2105 u32 mac_0_class_t_guarantied_hyst;
2106 u32 mac_1_class_t_guarantied;
2107 u32 mac_1_class_t_guarantied_hyst;
2108};
2109
2110struct bnx2x_pfc_brb_th_val {
2111 struct bnx2x_pfc_brb_threshold_val pauseable_th;
2112 struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
2113 struct bnx2x_pfc_brb_threshold_val default_class0;
2114 struct bnx2x_pfc_brb_threshold_val default_class1;
2115
2116};
2117static int bnx2x_pfc_brb_get_config_params(
2118 struct link_params *params,
2119 struct bnx2x_pfc_brb_th_val *config_val)
2120{
2121 struct bnx2x *bp = params->bp;
2122 DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
2123
2124 config_val->default_class1.pause_xoff = 0;
2125 config_val->default_class1.pause_xon = 0;
2126 config_val->default_class1.full_xoff = 0;
2127 config_val->default_class1.full_xon = 0;
2128
2129 if (CHIP_IS_E2(bp)) {
2130 /* Class0 defaults */
2131 config_val->default_class0.pause_xoff =
2132 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
2133 config_val->default_class0.pause_xon =
2134 DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
2135 config_val->default_class0.full_xoff =
2136 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
2137 config_val->default_class0.full_xon =
2138 DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
2139 /* Pause able*/
2140 config_val->pauseable_th.pause_xoff =
2141 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2142 config_val->pauseable_th.pause_xon =
2143 PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
2144 config_val->pauseable_th.full_xoff =
2145 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
2146 config_val->pauseable_th.full_xon =
2147 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
2148 /* Non pause able*/
2149 config_val->non_pauseable_th.pause_xoff =
2150 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2151 config_val->non_pauseable_th.pause_xon =
2152 PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2153 config_val->non_pauseable_th.full_xoff =
2154 PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2155 config_val->non_pauseable_th.full_xon =
2156 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2157 } else if (CHIP_IS_E3A0(bp)) {
2158 /* Class0 defaults */
2159 config_val->default_class0.pause_xoff =
2160 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
2161 config_val->default_class0.pause_xon =
2162 DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
2163 config_val->default_class0.full_xoff =
2164 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
2165 config_val->default_class0.full_xon =
2166 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
2167 /* Pause able */
2168 config_val->pauseable_th.pause_xoff =
2169 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2170 config_val->pauseable_th.pause_xon =
2171 PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
2172 config_val->pauseable_th.full_xoff =
2173 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
2174 config_val->pauseable_th.full_xon =
2175 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
2176 /* Non pause able*/
2177 config_val->non_pauseable_th.pause_xoff =
2178 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2179 config_val->non_pauseable_th.pause_xon =
2180 PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2181 config_val->non_pauseable_th.full_xoff =
2182 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2183 config_val->non_pauseable_th.full_xon =
2184 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2185 } else if (CHIP_IS_E3B0(bp)) {
2186 /* Class0 defaults */
2187 config_val->default_class0.pause_xoff =
2188 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
2189 config_val->default_class0.pause_xon =
2190 DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
2191 config_val->default_class0.full_xoff =
2192 DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
2193 config_val->default_class0.full_xon =
2194 DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
2195
2196 if (params->phy[INT_PHY].flags &
2197 FLAGS_4_PORT_MODE) {
2198 config_val->pauseable_th.pause_xoff =
2199 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2200 config_val->pauseable_th.pause_xon =
2201 PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
2202 config_val->pauseable_th.full_xoff =
2203 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2204 config_val->pauseable_th.full_xon =
2205 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
2206 /* Non pause able*/
2207 config_val->non_pauseable_th.pause_xoff =
2208 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2209 config_val->non_pauseable_th.pause_xon =
2210 PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2211 config_val->non_pauseable_th.full_xoff =
2212 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2213 config_val->non_pauseable_th.full_xon =
2214 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2215 } else {
2216 config_val->pauseable_th.pause_xoff =
2217 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2218 config_val->pauseable_th.pause_xon =
2219 PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
2220 config_val->pauseable_th.full_xoff =
2221 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2222 config_val->pauseable_th.full_xon =
2223 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
2224 /* Non pause able*/
2225 config_val->non_pauseable_th.pause_xoff =
2226 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2227 config_val->non_pauseable_th.pause_xon =
2228 PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2229 config_val->non_pauseable_th.full_xoff =
2230 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2231 config_val->non_pauseable_th.full_xon =
2232 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2233 }
2234 } else
2235 return -EINVAL;
2236
2237 return 0;
2238}
2239
2240static void bnx2x_pfc_brb_get_e3b0_config_params(
2241 struct link_params *params,
2242 struct bnx2x_pfc_brb_e3b0_val
2243 *e3b0_val,
2244 struct bnx2x_nig_brb_pfc_port_params *pfc_params,
2245 const u8 pfc_enabled)
2246{
2247 if (pfc_enabled && pfc_params) {
2248 e3b0_val->per_class_guaranty_mode = 1;
2249 e3b0_val->lb_guarantied_hyst = 80;
2250
2251 if (params->phy[INT_PHY].flags &
2252 FLAGS_4_PORT_MODE) {
2253 e3b0_val->full_lb_xoff_th =
2254 PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
2255 e3b0_val->full_lb_xon_threshold =
2256 PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
2257 e3b0_val->lb_guarantied =
2258 PFC_E3B0_4P_LB_GUART;
2259 e3b0_val->mac_0_class_t_guarantied =
2260 PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
2261 e3b0_val->mac_0_class_t_guarantied_hyst =
2262 PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
2263 e3b0_val->mac_1_class_t_guarantied =
2264 PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
2265 e3b0_val->mac_1_class_t_guarantied_hyst =
2266 PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
2267 } else {
2268 e3b0_val->full_lb_xoff_th =
2269 PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
2270 e3b0_val->full_lb_xon_threshold =
2271 PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
2272 e3b0_val->mac_0_class_t_guarantied_hyst =
2273 PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
2274 e3b0_val->mac_1_class_t_guarantied =
2275 PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
2276 e3b0_val->mac_1_class_t_guarantied_hyst =
2277 PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
2278
2279 if (pfc_params->cos0_pauseable !=
2280 pfc_params->cos1_pauseable) {
2281 /* Nonpauseable= Lossy + pauseable = Lossless*/
2282 e3b0_val->lb_guarantied =
2283 PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
2284 e3b0_val->mac_0_class_t_guarantied =
2285 PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
2286 } else if (pfc_params->cos0_pauseable) {
2287 /* Lossless +Lossless*/
2288 e3b0_val->lb_guarantied =
2289 PFC_E3B0_2P_PAUSE_LB_GUART;
2290 e3b0_val->mac_0_class_t_guarantied =
2291 PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
2292 } else {
2293 /* Lossy +Lossy*/
2294 e3b0_val->lb_guarantied =
2295 PFC_E3B0_2P_NON_PAUSE_LB_GUART;
2296 e3b0_val->mac_0_class_t_guarantied =
2297 PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
2298 }
2299 }
2300 } else {
2301 e3b0_val->per_class_guaranty_mode = 0;
2302 e3b0_val->lb_guarantied_hyst = 0;
2303 e3b0_val->full_lb_xoff_th =
2304 DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
2305 e3b0_val->full_lb_xon_threshold =
2306 DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
2307 e3b0_val->lb_guarantied =
2308 DEFAULT_E3B0_LB_GUART;
2309 e3b0_val->mac_0_class_t_guarantied =
2310 DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
2311 e3b0_val->mac_0_class_t_guarantied_hyst =
2312 DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
2313 e3b0_val->mac_1_class_t_guarantied =
2314 DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
2315 e3b0_val->mac_1_class_t_guarantied_hyst =
2316 DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
2317 }
2318}
2319static int bnx2x_update_pfc_brb(struct link_params *params,
2320 struct link_vars *vars,
2321 struct bnx2x_nig_brb_pfc_port_params
2322 *pfc_params)
2323{
2324 struct bnx2x *bp = params->bp;
2325 struct bnx2x_pfc_brb_th_val config_val = { {0} };
2326 struct bnx2x_pfc_brb_threshold_val *reg_th_config =
2327 &config_val.pauseable_th;
2328 struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
2329 const int set_pfc = params->feature_config_flags &
2330 FEATURE_CONFIG_PFC_ENABLED;
2331 const u8 pfc_enabled = (set_pfc && pfc_params);
2332 int bnx2x_status = 0;
2333 u8 port = params->port;
2334
2335 /* default - pause configuration */
2336 reg_th_config = &config_val.pauseable_th;
2337 bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
2338 if (bnx2x_status)
2339 return bnx2x_status;
2340
2341 if (pfc_enabled) {
2342 /* First COS */
2343 if (pfc_params->cos0_pauseable)
2344 reg_th_config = &config_val.pauseable_th;
2345 else
2346 reg_th_config = &config_val.non_pauseable_th;
2347 } else
2348 reg_th_config = &config_val.default_class0;
2349 /* The number of free blocks below which the pause signal to class 0
2350 * of MAC #n is asserted. n=0,1
2351 */
2352 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
2353 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
2354 reg_th_config->pause_xoff);
2355 /* The number of free blocks above which the pause signal to class 0
2356 * of MAC #n is de-asserted. n=0,1
2357 */
2358 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
2359 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
2360 /* The number of free blocks below which the full signal to class 0
2361 * of MAC #n is asserted. n=0,1
2362 */
2363 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
2364 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
2365 /* The number of free blocks above which the full signal to class 0
2366 * of MAC #n is de-asserted. n=0,1
2367 */
2368 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
2369 BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
2370
2371 if (pfc_enabled) {
2372 /* Second COS */
2373 if (pfc_params->cos1_pauseable)
2374 reg_th_config = &config_val.pauseable_th;
2375 else
2376 reg_th_config = &config_val.non_pauseable_th;
2377 } else
2378 reg_th_config = &config_val.default_class1;
2379 /* The number of free blocks below which the pause signal to
2380 * class 1 of MAC #n is asserted. n=0,1
2381 */
2382 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
2383 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
2384 reg_th_config->pause_xoff);
2385
2386 /* The number of free blocks above which the pause signal to
2387 * class 1 of MAC #n is de-asserted. n=0,1
2388 */
2389 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
2390 BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
2391 reg_th_config->pause_xon);
2392 /* The number of free blocks below which the full signal to
2393 * class 1 of MAC #n is asserted. n=0,1
2394 */
2395 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
2396 BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
2397 reg_th_config->full_xoff);
2398 /* The number of free blocks above which the full signal to
2399 * class 1 of MAC #n is de-asserted. n=0,1
2400 */
2401 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
2402 BRB1_REG_FULL_1_XON_THRESHOLD_0,
2403 reg_th_config->full_xon);
2404
2405 if (CHIP_IS_E3B0(bp)) {
2406 bnx2x_pfc_brb_get_e3b0_config_params(
2407 params,
2408 &e3b0_val,
2409 pfc_params,
2410 pfc_enabled);
2411
2412 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
2413 e3b0_val.per_class_guaranty_mode);
2414
2415 /* The hysteresis on the guarantied buffer space for the Lb
2416 * port before signaling XON.
2417 */
2418 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
2419 e3b0_val.lb_guarantied_hyst);
2420
2421 /* The number of free blocks below which the full signal to the
2422 * LB port is asserted.
2423 */
2424 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
2425 e3b0_val.full_lb_xoff_th);
2426 /* The number of free blocks above which the full signal to the
2427 * LB port is de-asserted.
2428 */
2429 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
2430 e3b0_val.full_lb_xon_threshold);
2431 /* The number of blocks guarantied for the MAC #n port. n=0,1
2432 */
2433
2434 /* The number of blocks guarantied for the LB port. */
2435 REG_WR(bp, BRB1_REG_LB_GUARANTIED,
2436 e3b0_val.lb_guarantied);
2437
2438 /* The number of blocks guarantied for the MAC #n port. */
2439 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
2440 2 * e3b0_val.mac_0_class_t_guarantied);
2441 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
2442 2 * e3b0_val.mac_1_class_t_guarantied);
2443 /* The number of blocks guarantied for class #t in MAC0. t=0,1
2444 */
2445 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
2446 e3b0_val.mac_0_class_t_guarantied);
2447 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
2448 e3b0_val.mac_0_class_t_guarantied);
2449 /* The hysteresis on the guarantied buffer space for class in
2450 * MAC0. t=0,1
2451 */
2452 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
2453 e3b0_val.mac_0_class_t_guarantied_hyst);
2454 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
2455 e3b0_val.mac_0_class_t_guarantied_hyst);
2456
2457 /* The number of blocks guarantied for class #t in MAC1.t=0,1
2458 */
2459 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
2460 e3b0_val.mac_1_class_t_guarantied);
2461 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
2462 e3b0_val.mac_1_class_t_guarantied);
2463 /* The hysteresis on the guarantied buffer space for class #t
2464 * in MAC1. t=0,1
2465 */
2466 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
2467 e3b0_val.mac_1_class_t_guarantied_hyst);
2468 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
2469 e3b0_val.mac_1_class_t_guarantied_hyst);
2470 }
2471
2472 return bnx2x_status;
2473}
2474
2475/****************************************************************************** 2033/******************************************************************************
2476* Description: 2034* Description:
2477* This function is needed because NIG ARB_CREDIT_WEIGHT_X are 2035* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
@@ -2529,16 +2087,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2529 port_mb[params->port].link_status), link_status); 2087 port_mb[params->port].link_status), link_status);
2530} 2088}
2531 2089
2532static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2533{
2534 struct bnx2x *bp = params->bp;
2535
2536 if (bnx2x_eee_has_cap(params))
2537 REG_WR(bp, params->shmem2_base +
2538 offsetof(struct shmem2_region,
2539 eee_status[params->port]), eee_status);
2540}
2541
2542static void bnx2x_update_pfc_nig(struct link_params *params, 2090static void bnx2x_update_pfc_nig(struct link_params *params,
2543 struct link_vars *vars, 2091 struct link_vars *vars,
2544 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2092 struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2658,11 +2206,6 @@ int bnx2x_update_pfc(struct link_params *params,
2658 /* Update NIG params */ 2206 /* Update NIG params */
2659 bnx2x_update_pfc_nig(params, vars, pfc_params); 2207 bnx2x_update_pfc_nig(params, vars, pfc_params);
2660 2208
2661 /* Update BRB params */
2662 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
2663 if (bnx2x_status)
2664 return bnx2x_status;
2665
2666 if (!vars->link_up) 2209 if (!vars->link_up)
2667 return bnx2x_status; 2210 return bnx2x_status;
2668 2211
@@ -2827,16 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2827 2370
2828static int bnx2x_bmac_enable(struct link_params *params, 2371static int bnx2x_bmac_enable(struct link_params *params,
2829 struct link_vars *vars, 2372 struct link_vars *vars,
2830 u8 is_lb) 2373 u8 is_lb, u8 reset_bmac)
2831{ 2374{
2832 int rc = 0; 2375 int rc = 0;
2833 u8 port = params->port; 2376 u8 port = params->port;
2834 struct bnx2x *bp = params->bp; 2377 struct bnx2x *bp = params->bp;
2835 u32 val; 2378 u32 val;
2836 /* Reset and unreset the BigMac */ 2379 /* Reset and unreset the BigMac */
2837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2380 if (reset_bmac) {
2838 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2381 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2839 usleep_range(1000, 2000); 2382 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2383 usleep_range(1000, 2000);
2384 }
2840 2385
2841 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 2386 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2842 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2387 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2868,37 +2413,28 @@ static int bnx2x_bmac_enable(struct link_params *params,
2868 return rc; 2413 return rc;
2869} 2414}
2870 2415
2871static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 2416static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
2872{ 2417{
2873 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 2418 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2874 NIG_REG_INGRESS_BMAC0_MEM; 2419 NIG_REG_INGRESS_BMAC0_MEM;
2875 u32 wb_data[2]; 2420 u32 wb_data[2];
2876 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 2421 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
2877 2422
2423 if (CHIP_IS_E2(bp))
2424 bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
2425 else
2426 bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
2878 /* Only if the bmac is out of reset */ 2427 /* Only if the bmac is out of reset */
2879 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 2428 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2880 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && 2429 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
2881 nig_bmac_enable) { 2430 nig_bmac_enable) {
2882 2431 /* Clear Rx Enable bit in BMAC_CONTROL register */
2883 if (CHIP_IS_E2(bp)) { 2432 REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
2884 /* Clear Rx Enable bit in BMAC_CONTROL register */ 2433 if (en)
2885 REG_RD_DMAE(bp, bmac_addr + 2434 wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
2886 BIGMAC2_REGISTER_BMAC_CONTROL, 2435 else
2887 wb_data, 2);
2888 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2889 REG_WR_DMAE(bp, bmac_addr +
2890 BIGMAC2_REGISTER_BMAC_CONTROL,
2891 wb_data, 2);
2892 } else {
2893 /* Clear Rx Enable bit in BMAC_CONTROL register */
2894 REG_RD_DMAE(bp, bmac_addr +
2895 BIGMAC_REGISTER_BMAC_CONTROL,
2896 wb_data, 2);
2897 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 2436 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2898 REG_WR_DMAE(bp, bmac_addr + 2437 REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
2899 BIGMAC_REGISTER_BMAC_CONTROL,
2900 wb_data, 2);
2901 }
2902 usleep_range(1000, 2000); 2438 usleep_range(1000, 2000);
2903 } 2439 }
2904} 2440}
@@ -3233,6 +2769,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3233 EMAC_MDIO_STATUS_10MB); 2769 EMAC_MDIO_STATUS_10MB);
3234 return rc; 2770 return rc;
3235} 2771}
2772
2773/******************************************************************/
2774/* EEE section */
2775/******************************************************************/
2776static u8 bnx2x_eee_has_cap(struct link_params *params)
2777{
2778 struct bnx2x *bp = params->bp;
2779
2780 if (REG_RD(bp, params->shmem2_base) <=
2781 offsetof(struct shmem2_region, eee_status[params->port]))
2782 return 0;
2783
2784 return 1;
2785}
2786
2787static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
2788{
2789 switch (nvram_mode) {
2790 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
2791 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
2792 break;
2793 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
2794 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
2795 break;
2796 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
2797 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
2798 break;
2799 default:
2800 *idle_timer = 0;
2801 break;
2802 }
2803
2804 return 0;
2805}
2806
2807static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
2808{
2809 switch (idle_timer) {
2810 case EEE_MODE_NVRAM_BALANCED_TIME:
2811 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
2812 break;
2813 case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
2814 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
2815 break;
2816 case EEE_MODE_NVRAM_LATENCY_TIME:
2817 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
2818 break;
2819 default:
2820 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
2821 break;
2822 }
2823
2824 return 0;
2825}
2826
2827static u32 bnx2x_eee_calc_timer(struct link_params *params)
2828{
2829 u32 eee_mode, eee_idle;
2830 struct bnx2x *bp = params->bp;
2831
2832 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
2833 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
2834 /* time value in eee_mode --> used directly*/
2835 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
2836 } else {
2837 /* hsi value in eee_mode --> time */
2838 if (bnx2x_eee_nvram_to_time(params->eee_mode &
2839 EEE_MODE_NVRAM_MASK,
2840 &eee_idle))
2841 return 0;
2842 }
2843 } else {
2844 /* hsi values in nvram --> time*/
2845 eee_mode = ((REG_RD(bp, params->shmem_base +
2846 offsetof(struct shmem_region, dev_info.
2847 port_feature_config[params->port].
2848 eee_power_mode)) &
2849 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
2850 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
2851
2852 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
2853 return 0;
2854 }
2855
2856 return eee_idle;
2857}
2858
2859static int bnx2x_eee_set_timers(struct link_params *params,
2860 struct link_vars *vars)
2861{
2862 u32 eee_idle = 0, eee_mode;
2863 struct bnx2x *bp = params->bp;
2864
2865 eee_idle = bnx2x_eee_calc_timer(params);
2866
2867 if (eee_idle) {
2868 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
2869 eee_idle);
2870 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
2871 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
2872 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
2873 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
2874 return -EINVAL;
2875 }
2876
2877 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
2878 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
2879 /* eee_idle in 1u --> eee_status in 16u */
2880 eee_idle >>= 4;
2881 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
2882 SHMEM_EEE_TIME_OUTPUT_BIT;
2883 } else {
2884 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
2885 return -EINVAL;
2886 vars->eee_status |= eee_mode;
2887 }
2888
2889 return 0;
2890}
2891
2892static int bnx2x_eee_initial_config(struct link_params *params,
2893 struct link_vars *vars, u8 mode)
2894{
2895 vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
2896
2897 /* Propogate params' bits --> vars (for migration exposure) */
2898 if (params->eee_mode & EEE_MODE_ENABLE_LPI)
2899 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
2900 else
2901 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
2902
2903 if (params->eee_mode & EEE_MODE_ADV_LPI)
2904 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
2905 else
2906 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
2907
2908 return bnx2x_eee_set_timers(params, vars);
2909}
2910
2911static int bnx2x_eee_disable(struct bnx2x_phy *phy,
2912 struct link_params *params,
2913 struct link_vars *vars)
2914{
2915 struct bnx2x *bp = params->bp;
2916
2917 /* Make Certain LPI is disabled */
2918 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
2919
2920 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
2921
2922 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
2923
2924 return 0;
2925}
2926
2927static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
2928 struct link_params *params,
2929 struct link_vars *vars, u8 modes)
2930{
2931 struct bnx2x *bp = params->bp;
2932 u16 val = 0;
2933
2934 /* Mask events preventing LPI generation */
2935 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
2936
2937 if (modes & SHMEM_EEE_10G_ADV) {
2938 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
2939 val |= 0x8;
2940 }
2941 if (modes & SHMEM_EEE_1G_ADV) {
2942 DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
2943 val |= 0x4;
2944 }
2945
2946 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
2947
2948 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
2949 vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
2950
2951 return 0;
2952}
2953
2954static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2955{
2956 struct bnx2x *bp = params->bp;
2957
2958 if (bnx2x_eee_has_cap(params))
2959 REG_WR(bp, params->shmem2_base +
2960 offsetof(struct shmem2_region,
2961 eee_status[params->port]), eee_status);
2962}
2963
2964static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
2965 struct link_params *params,
2966 struct link_vars *vars)
2967{
2968 struct bnx2x *bp = params->bp;
2969 u16 adv = 0, lp = 0;
2970 u32 lp_adv = 0;
2971 u8 neg = 0;
2972
2973 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
2974 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
2975
2976 if (lp & 0x2) {
2977 lp_adv |= SHMEM_EEE_100M_ADV;
2978 if (adv & 0x2) {
2979 if (vars->line_speed == SPEED_100)
2980 neg = 1;
2981 DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
2982 }
2983 }
2984 if (lp & 0x14) {
2985 lp_adv |= SHMEM_EEE_1G_ADV;
2986 if (adv & 0x14) {
2987 if (vars->line_speed == SPEED_1000)
2988 neg = 1;
2989 DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
2990 }
2991 }
2992 if (lp & 0x68) {
2993 lp_adv |= SHMEM_EEE_10G_ADV;
2994 if (adv & 0x68) {
2995 if (vars->line_speed == SPEED_10000)
2996 neg = 1;
2997 DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
2998 }
2999 }
3000
3001 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
3002 vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
3003
3004 if (neg) {
3005 DP(NETIF_MSG_LINK, "EEE is active\n");
3006 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
3007 }
3008
3009}
3010
3236/******************************************************************/ 3011/******************************************************************/
3237/* BSC access functions from E3 */ 3012/* BSC access functions from E3 */
3238/******************************************************************/ 3013/******************************************************************/
@@ -3754,6 +3529,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3754 * init configuration, and set/clear SGMII flag. Internal 3529 * init configuration, and set/clear SGMII flag. Internal
3755 * phy init is done purely in phy_init stage. 3530 * phy init is done purely in phy_init stage.
3756 */ 3531 */
3532
3533static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3534 struct link_params *params)
3535{
3536 struct bnx2x *bp = params->bp;
3537
3538 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
3539 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3540 MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
3541 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3542 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
3543}
3544
3757static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3545static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3758 struct link_params *params, 3546 struct link_params *params,
3759 struct link_vars *vars) { 3547 struct link_vars *vars) {
@@ -4013,13 +3801,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
4013 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3801 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4014 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); 3802 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
4015 3803
4016 /* Enable LPI pass through */ 3804 bnx2x_warpcore_set_lpi_passthrough(phy, params);
4017 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
4018 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4019 MDIO_WC_REG_EEE_COMBO_CONTROL0,
4020 0x7c);
4021 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4022 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
4023 3805
4024 /* 10G XFI Full Duplex */ 3806 /* 10G XFI Full Duplex */
4025 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3807 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4116,6 +3898,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
4116 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3898 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4117 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); 3899 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
4118 3900
3901 bnx2x_warpcore_set_lpi_passthrough(phy, params);
3902
4119 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { 3903 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
4120 /* SGMII Autoneg */ 3904 /* SGMII Autoneg */
4121 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3905 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4409,7 +4193,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4409 "serdes_net_if = 0x%x\n", 4193 "serdes_net_if = 0x%x\n",
4410 vars->line_speed, serdes_net_if); 4194 vars->line_speed, serdes_net_if);
4411 bnx2x_set_aer_mmd(params, phy); 4195 bnx2x_set_aer_mmd(params, phy);
4412 4196 bnx2x_warpcore_reset_lane(bp, phy, 1);
4413 vars->phy_flags |= PHY_XGXS_FLAG; 4197 vars->phy_flags |= PHY_XGXS_FLAG;
4414 if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || 4198 if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
4415 (phy->req_line_speed && 4199 (phy->req_line_speed &&
@@ -4718,6 +4502,10 @@ void bnx2x_link_status_update(struct link_params *params,
4718 vars->link_status = REG_RD(bp, params->shmem_base + 4502 vars->link_status = REG_RD(bp, params->shmem_base +
4719 offsetof(struct shmem_region, 4503 offsetof(struct shmem_region,
4720 port_mb[port].link_status)); 4504 port_mb[port].link_status));
4505 if (bnx2x_eee_has_cap(params))
4506 vars->eee_status = REG_RD(bp, params->shmem2_base +
4507 offsetof(struct shmem2_region,
4508 eee_status[params->port]));
4721 4509
4722 vars->phy_flags = PHY_XGXS_FLAG; 4510 vars->phy_flags = PHY_XGXS_FLAG;
4723 bnx2x_sync_link(params, vars); 4511 bnx2x_sync_link(params, vars);
@@ -6530,25 +6318,21 @@ static int bnx2x_update_link_down(struct link_params *params,
6530 usleep_range(10000, 20000); 6318 usleep_range(10000, 20000);
6531 /* Reset BigMac/Xmac */ 6319 /* Reset BigMac/Xmac */
6532 if (CHIP_IS_E1x(bp) || 6320 if (CHIP_IS_E1x(bp) ||
6533 CHIP_IS_E2(bp)) { 6321 CHIP_IS_E2(bp))
6534 bnx2x_bmac_rx_disable(bp, params->port); 6322 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
6535 REG_WR(bp, GRCBASE_MISC + 6323
6536 MISC_REGISTERS_RESET_REG_2_CLEAR,
6537 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6538 }
6539 if (CHIP_IS_E3(bp)) { 6324 if (CHIP_IS_E3(bp)) {
6540 /* Prevent LPI Generation by chip */ 6325 /* Prevent LPI Generation by chip */
6541 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 6326 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
6542 0); 6327 0);
6543 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
6544 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), 6328 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
6545 0); 6329 0);
6546 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | 6330 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
6547 SHMEM_EEE_ACTIVE_BIT); 6331 SHMEM_EEE_ACTIVE_BIT);
6548 6332
6549 bnx2x_update_mng_eee(params, vars->eee_status); 6333 bnx2x_update_mng_eee(params, vars->eee_status);
6550 bnx2x_xmac_disable(params); 6334 bnx2x_set_xmac_rxtx(params, 0);
6551 bnx2x_umac_disable(params); 6335 bnx2x_set_umac_rxtx(params, 0);
6552 } 6336 }
6553 6337
6554 return 0; 6338 return 0;
@@ -6600,7 +6384,7 @@ static int bnx2x_update_link_up(struct link_params *params,
6600 if ((CHIP_IS_E1x(bp) || 6384 if ((CHIP_IS_E1x(bp) ||
6601 CHIP_IS_E2(bp))) { 6385 CHIP_IS_E2(bp))) {
6602 if (link_10g) { 6386 if (link_10g) {
6603 if (bnx2x_bmac_enable(params, vars, 0) == 6387 if (bnx2x_bmac_enable(params, vars, 0, 1) ==
6604 -ESRCH) { 6388 -ESRCH) {
6605 DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); 6389 DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
6606 vars->link_up = 0; 6390 vars->link_up = 0;
@@ -7207,6 +6991,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
7207 msleep(500); 6991 msleep(500);
7208} 6992}
7209 6993
6994static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
6995 struct link_params *params,
6996 u32 action)
6997{
6998 struct bnx2x *bp = params->bp;
6999 switch (action) {
7000 case PHY_INIT:
7001 /* Enable LASI */
7002 bnx2x_cl45_write(bp, phy,
7003 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
7004 bnx2x_cl45_write(bp, phy,
7005 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
7006 break;
7007 }
7008}
7009
7210static int bnx2x_8073_config_init(struct bnx2x_phy *phy, 7010static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7211 struct link_params *params, 7011 struct link_params *params,
7212 struct link_vars *vars) 7012 struct link_vars *vars)
@@ -7227,12 +7027,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7227 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7027 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
7228 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 7028 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
7229 7029
7230 /* Enable LASI */ 7030 bnx2x_8073_specific_func(phy, params, PHY_INIT);
7231 bnx2x_cl45_write(bp, phy,
7232 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
7233 bnx2x_cl45_write(bp, phy,
7234 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
7235
7236 bnx2x_8073_set_pause_cl37(params, phy, vars); 7031 bnx2x_8073_set_pause_cl37(params, phy, vars);
7237 7032
7238 bnx2x_cl45_read(bp, phy, 7033 bnx2x_cl45_read(bp, phy,
@@ -8267,7 +8062,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
8267 u32 action) 8062 u32 action)
8268{ 8063{
8269 struct bnx2x *bp = params->bp; 8064 struct bnx2x *bp = params->bp;
8270 8065 u16 val;
8271 switch (action) { 8066 switch (action) {
8272 case DISABLE_TX: 8067 case DISABLE_TX:
8273 bnx2x_sfp_set_transmitter(params, phy, 0); 8068 bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -8276,6 +8071,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
8276 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 8071 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
8277 bnx2x_sfp_set_transmitter(params, phy, 1); 8072 bnx2x_sfp_set_transmitter(params, phy, 1);
8278 break; 8073 break;
8074 case PHY_INIT:
8075 bnx2x_cl45_write(bp, phy,
8076 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8077 (1<<2) | (1<<5));
8078 bnx2x_cl45_write(bp, phy,
8079 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
8080 0);
8081 bnx2x_cl45_write(bp, phy,
8082 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
8083 /* Make MOD_ABS give interrupt on change */
8084 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
8085 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8086 &val);
8087 val |= (1<<12);
8088 if (phy->flags & FLAGS_NOC)
8089 val |= (3<<5);
8090 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
8091 * status which reflect SFP+ module over-current
8092 */
8093 if (!(phy->flags & FLAGS_NOC))
8094 val &= 0xff8f; /* Reset bits 4-6 */
8095 bnx2x_cl45_write(bp, phy,
8096 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8097 val);
8098
8099 /* Set 2-wire transfer rate of SFP+ module EEPROM
8100 * to 100Khz since some DACs(direct attached cables) do
8101 * not work at 400Khz.
8102 */
8103 bnx2x_cl45_write(bp, phy,
8104 MDIO_PMA_DEVAD,
8105 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
8106 0xa001);
8107 break;
8279 default: 8108 default:
8280 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 8109 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
8281 action); 8110 action);
@@ -9058,28 +8887,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9058 struct link_vars *vars) 8887 struct link_vars *vars)
9059{ 8888{
9060 u32 tx_en_mode; 8889 u32 tx_en_mode;
9061 u16 tmp1, val, mod_abs, tmp2; 8890 u16 tmp1, mod_abs, tmp2;
9062 u16 rx_alarm_ctrl_val;
9063 u16 lasi_ctrl_val;
9064 struct bnx2x *bp = params->bp; 8891 struct bnx2x *bp = params->bp;
9065 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 8892 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
9066 8893
9067 bnx2x_wait_reset_complete(bp, phy, params); 8894 bnx2x_wait_reset_complete(bp, phy, params);
9068 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
9069 /* Should be 0x6 to enable XS on Tx side. */
9070 lasi_ctrl_val = 0x0006;
9071 8895
9072 DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); 8896 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
9073 /* Enable LASI */
9074 bnx2x_cl45_write(bp, phy,
9075 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
9076 rx_alarm_ctrl_val);
9077 bnx2x_cl45_write(bp, phy,
9078 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
9079 0);
9080 bnx2x_cl45_write(bp, phy,
9081 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
9082 8897
8898 bnx2x_8727_specific_func(phy, params, PHY_INIT);
9083 /* Initially configure MOD_ABS to interrupt when module is 8899 /* Initially configure MOD_ABS to interrupt when module is
9084 * presence( bit 8) 8900 * presence( bit 8)
9085 */ 8901 */
@@ -9095,25 +8911,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9095 bnx2x_cl45_write(bp, phy, 8911 bnx2x_cl45_write(bp, phy,
9096 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 8912 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9097 8913
9098
9099 /* Enable/Disable PHY transmitter output */ 8914 /* Enable/Disable PHY transmitter output */
9100 bnx2x_set_disable_pmd_transmit(params, phy, 0); 8915 bnx2x_set_disable_pmd_transmit(params, phy, 0);
9101 8916
9102 /* Make MOD_ABS give interrupt on change */
9103 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
9104 &val);
9105 val |= (1<<12);
9106 if (phy->flags & FLAGS_NOC)
9107 val |= (3<<5);
9108
9109 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9110 * status which reflect SFP+ module over-current
9111 */
9112 if (!(phy->flags & FLAGS_NOC))
9113 val &= 0xff8f; /* Reset bits 4-6 */
9114 bnx2x_cl45_write(bp, phy,
9115 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
9116
9117 bnx2x_8727_power_module(bp, phy, 1); 8917 bnx2x_8727_power_module(bp, phy, 1);
9118 8918
9119 bnx2x_cl45_read(bp, phy, 8919 bnx2x_cl45_read(bp, phy,
@@ -9123,13 +8923,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9123 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); 8923 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
9124 8924
9125 bnx2x_8727_config_speed(phy, params); 8925 bnx2x_8727_config_speed(phy, params);
9126 /* Set 2-wire transfer rate of SFP+ module EEPROM 8926
9127 * to 100Khz since some DACs(direct attached cables) do
9128 * not work at 400Khz.
9129 */
9130 bnx2x_cl45_write(bp, phy,
9131 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
9132 0xa001);
9133 8927
9134 /* Set TX PreEmphasis if needed */ 8928 /* Set TX PreEmphasis if needed */
9135 if ((params->feature_config_flags & 8929 if ((params->feature_config_flags &
@@ -9558,6 +9352,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9558 0xFFFB, 0xFFFD); 9352 0xFFFB, 0xFFFD);
9559} 9353}
9560 9354
9355static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9356 struct link_params *params,
9357 u32 action)
9358{
9359 struct bnx2x *bp = params->bp;
9360 switch (action) {
9361 case PHY_INIT:
9362 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9363 /* Save spirom version */
9364 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9365 }
9366 /* This phy uses the NIG latch mechanism since link indication
9367 * arrives through its LED4 and not via its LASI signal, so we
9368 * get steady signal instead of clear on read
9369 */
9370 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
9371 1 << NIG_LATCH_BC_ENABLE_MI_INT);
9372
9373 bnx2x_848xx_set_led(bp, phy);
9374 break;
9375 }
9376}
9377
9561static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, 9378static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9562 struct link_params *params, 9379 struct link_params *params,
9563 struct link_vars *vars) 9380 struct link_vars *vars)
@@ -9565,22 +9382,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9565 struct bnx2x *bp = params->bp; 9382 struct bnx2x *bp = params->bp;
9566 u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; 9383 u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
9567 9384
9568 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9385 bnx2x_848xx_specific_func(phy, params, PHY_INIT);
9569 /* Save spirom version */
9570 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9571 }
9572 /* This phy uses the NIG latch mechanism since link indication
9573 * arrives through its LED4 and not via its LASI signal, so we
9574 * get steady signal instead of clear on read
9575 */
9576 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
9577 1 << NIG_LATCH_BC_ENABLE_MI_INT);
9578
9579 bnx2x_cl45_write(bp, phy, 9386 bnx2x_cl45_write(bp, phy,
9580 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); 9387 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
9581 9388
9582 bnx2x_848xx_set_led(bp, phy);
9583
9584 /* set 1000 speed advertisement */ 9389 /* set 1000 speed advertisement */
9585 bnx2x_cl45_read(bp, phy, 9390 bnx2x_cl45_read(bp, phy,
9586 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9391 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
@@ -9887,39 +9692,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9887 return 0; 9692 return 0;
9888} 9693}
9889 9694
9890static int bnx2x_8483x_eee_timers(struct link_params *params,
9891 struct link_vars *vars)
9892{
9893 u32 eee_idle = 0, eee_mode;
9894 struct bnx2x *bp = params->bp;
9895
9896 eee_idle = bnx2x_eee_calc_timer(params);
9897
9898 if (eee_idle) {
9899 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
9900 eee_idle);
9901 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
9902 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
9903 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
9904 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
9905 return -EINVAL;
9906 }
9907
9908 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
9909 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
9910 /* eee_idle in 1u --> eee_status in 16u */
9911 eee_idle >>= 4;
9912 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
9913 SHMEM_EEE_TIME_OUTPUT_BIT;
9914 } else {
9915 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
9916 return -EINVAL;
9917 vars->eee_status |= eee_mode;
9918 }
9919
9920 return 0;
9921}
9922
9923static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, 9695static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9924 struct link_params *params, 9696 struct link_params *params,
9925 struct link_vars *vars) 9697 struct link_vars *vars)
@@ -9930,10 +9702,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9930 9702
9931 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 9703 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
9932 9704
9933 /* Make Certain LPI is disabled */
9934 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
9935 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
9936
9937 /* Prevent Phy from working in EEE and advertising it */ 9705 /* Prevent Phy from working in EEE and advertising it */
9938 rc = bnx2x_84833_cmd_hdlr(phy, params, 9706 rc = bnx2x_84833_cmd_hdlr(phy, params,
9939 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); 9707 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
@@ -9942,10 +9710,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9942 return rc; 9710 return rc;
9943 } 9711 }
9944 9712
9945 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0); 9713 return bnx2x_eee_disable(phy, params, vars);
9946 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9947
9948 return 0;
9949} 9714}
9950 9715
9951static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, 9716static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
@@ -9956,8 +9721,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
9956 struct bnx2x *bp = params->bp; 9721 struct bnx2x *bp = params->bp;
9957 u16 cmd_args = 1; 9722 u16 cmd_args = 1;
9958 9723
9959 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
9960
9961 rc = bnx2x_84833_cmd_hdlr(phy, params, 9724 rc = bnx2x_84833_cmd_hdlr(phy, params,
9962 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); 9725 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
9963 if (rc) { 9726 if (rc) {
@@ -9965,15 +9728,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
9965 return rc; 9728 return rc;
9966 } 9729 }
9967 9730
9968 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8); 9731 return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
9969
9970 /* Mask events preventing LPI generation */
9971 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
9972
9973 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9974 vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
9975
9976 return 0;
9977} 9732}
9978 9733
9979#define PHY84833_CONSTANT_LATENCY 1193 9734#define PHY84833_CONSTANT_LATENCY 1193
@@ -10105,22 +9860,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10105 MDIO_84833_TOP_CFG_FW_REV, &val); 9860 MDIO_84833_TOP_CFG_FW_REV, &val);
10106 9861
10107 /* Configure EEE support */ 9862 /* Configure EEE support */
10108 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) { 9863 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
10109 phy->flags |= FLAGS_EEE_10GBT; 9864 (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
10110 vars->eee_status |= SHMEM_EEE_10G_ADV << 9865 bnx2x_eee_has_cap(params)) {
10111 SHMEM_EEE_SUPPORTED_SHIFT; 9866 rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
10112 /* Propogate params' bits --> vars (for migration exposure) */
10113 if (params->eee_mode & EEE_MODE_ENABLE_LPI)
10114 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
10115 else
10116 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
10117
10118 if (params->eee_mode & EEE_MODE_ADV_LPI)
10119 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
10120 else
10121 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
10122
10123 rc = bnx2x_8483x_eee_timers(params, vars);
10124 if (rc) { 9867 if (rc) {
10125 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); 9868 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
10126 bnx2x_8483x_disable_eee(phy, params, vars); 9869 bnx2x_8483x_disable_eee(phy, params, vars);
@@ -10139,7 +9882,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10139 return rc; 9882 return rc;
10140 } 9883 }
10141 } else { 9884 } else {
10142 phy->flags &= ~FLAGS_EEE_10GBT;
10143 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 9885 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10144 } 9886 }
10145 9887
@@ -10278,29 +10020,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10278 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10020 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
10279 10021
10280 /* Determine if EEE was negotiated */ 10022 /* Determine if EEE was negotiated */
10281 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10023 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10282 u32 eee_shmem = 0; 10024 bnx2x_eee_an_resolve(phy, params, vars);
10283
10284 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10285 MDIO_AN_REG_EEE_ADV, &val1);
10286 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10287 MDIO_AN_REG_LP_EEE_ADV, &val2);
10288 if ((val1 & val2) & 0x8) {
10289 DP(NETIF_MSG_LINK, "EEE negotiated\n");
10290 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
10291 }
10292
10293 if (val2 & 0x12)
10294 eee_shmem |= SHMEM_EEE_100M_ADV;
10295 if (val2 & 0x4)
10296 eee_shmem |= SHMEM_EEE_1G_ADV;
10297 if (val2 & 0x68)
10298 eee_shmem |= SHMEM_EEE_10G_ADV;
10299
10300 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
10301 vars->eee_status |= (eee_shmem <<
10302 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
10303 }
10304 } 10025 }
10305 10026
10306 return link_up; 10027 return link_up;
@@ -10569,6 +10290,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10569/******************************************************************/ 10290/******************************************************************/
10570/* 54618SE PHY SECTION */ 10291/* 54618SE PHY SECTION */
10571/******************************************************************/ 10292/******************************************************************/
10293static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
10294 struct link_params *params,
10295 u32 action)
10296{
10297 struct bnx2x *bp = params->bp;
10298 u16 temp;
10299 switch (action) {
10300 case PHY_INIT:
10301 /* Configure LED4: set to INTR (0x6). */
10302 /* Accessing shadow register 0xe. */
10303 bnx2x_cl22_write(bp, phy,
10304 MDIO_REG_GPHY_SHADOW,
10305 MDIO_REG_GPHY_SHADOW_LED_SEL2);
10306 bnx2x_cl22_read(bp, phy,
10307 MDIO_REG_GPHY_SHADOW,
10308 &temp);
10309 temp &= ~(0xf << 4);
10310 temp |= (0x6 << 4);
10311 bnx2x_cl22_write(bp, phy,
10312 MDIO_REG_GPHY_SHADOW,
10313 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10314 /* Configure INTR based on link status change. */
10315 bnx2x_cl22_write(bp, phy,
10316 MDIO_REG_INTR_MASK,
10317 ~MDIO_REG_INTR_MASK_LINK_STATUS);
10318 break;
10319 }
10320}
10321
10572static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 10322static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10573 struct link_params *params, 10323 struct link_params *params,
10574 struct link_vars *vars) 10324 struct link_vars *vars)
@@ -10606,24 +10356,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10606 /* Wait for GPHY to reset */ 10356 /* Wait for GPHY to reset */
10607 msleep(50); 10357 msleep(50);
10608 10358
10609 /* Configure LED4: set to INTR (0x6). */
10610 /* Accessing shadow register 0xe. */
10611 bnx2x_cl22_write(bp, phy,
10612 MDIO_REG_GPHY_SHADOW,
10613 MDIO_REG_GPHY_SHADOW_LED_SEL2);
10614 bnx2x_cl22_read(bp, phy,
10615 MDIO_REG_GPHY_SHADOW,
10616 &temp);
10617 temp &= ~(0xf << 4);
10618 temp |= (0x6 << 4);
10619 bnx2x_cl22_write(bp, phy,
10620 MDIO_REG_GPHY_SHADOW,
10621 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10622 /* Configure INTR based on link status change. */
10623 bnx2x_cl22_write(bp, phy,
10624 MDIO_REG_INTR_MASK,
10625 ~MDIO_REG_INTR_MASK_LINK_STATUS);
10626 10359
10360 bnx2x_54618se_specific_func(phy, params, PHY_INIT);
10627 /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ 10361 /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
10628 bnx2x_cl22_write(bp, phy, 10362 bnx2x_cl22_write(bp, phy,
10629 MDIO_REG_GPHY_SHADOW, 10363 MDIO_REG_GPHY_SHADOW,
@@ -10728,28 +10462,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10728 DP(NETIF_MSG_LINK, "Setting 10M force\n"); 10462 DP(NETIF_MSG_LINK, "Setting 10M force\n");
10729 } 10463 }
10730 10464
10731 /* Check if we should turn on Auto-GrEEEn */ 10465 if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
10732 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); 10466 int rc;
10733 if (temp == MDIO_REG_GPHY_ID_54618SE) { 10467
10734 if (params->feature_config_flags & 10468 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
10735 FEATURE_CONFIG_AUTOGREEEN_ENABLED) { 10469 MDIO_REG_GPHY_EXP_ACCESS_TOP |
10736 temp = 6; 10470 MDIO_REG_GPHY_EXP_TOP_2K_BUF);
10737 DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); 10471 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
10472 temp &= 0xfffe;
10473 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
10474
10475 rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
10476 if (rc) {
10477 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
10478 bnx2x_eee_disable(phy, params, vars);
10479 } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
10480 (phy->req_duplex == DUPLEX_FULL) &&
10481 (bnx2x_eee_calc_timer(params) ||
10482 !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
10483 /* Need to advertise EEE only when requested,
10484 * and either no LPI assertion was requested,
10485 * or it was requested and a valid timer was set.
10486 * Also notice full duplex is required for EEE.
10487 */
10488 bnx2x_eee_advertise(phy, params, vars,
10489 SHMEM_EEE_1G_ADV);
10738 } else { 10490 } else {
10739 temp = 0; 10491 DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
10740 DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); 10492 bnx2x_eee_disable(phy, params, vars);
10493 }
10494 } else {
10495 vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
10496 SHMEM_EEE_SUPPORTED_SHIFT;
10497
10498 if (phy->flags & FLAGS_EEE) {
10499 /* Handle legacy auto-grEEEn */
10500 if (params->feature_config_flags &
10501 FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
10502 temp = 6;
10503 DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
10504 } else {
10505 temp = 0;
10506 DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
10507 }
10508 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10509 MDIO_AN_REG_EEE_ADV, temp);
10741 } 10510 }
10742 bnx2x_cl22_write(bp, phy,
10743 MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
10744 bnx2x_cl22_write(bp, phy,
10745 MDIO_REG_GPHY_CL45_DATA_REG,
10746 MDIO_REG_GPHY_EEE_ADV);
10747 bnx2x_cl22_write(bp, phy,
10748 MDIO_REG_GPHY_CL45_ADDR_REG,
10749 (0x1 << 14) | MDIO_AN_DEVAD);
10750 bnx2x_cl22_write(bp, phy,
10751 MDIO_REG_GPHY_CL45_DATA_REG,
10752 temp);
10753 } 10511 }
10754 10512
10755 bnx2x_cl22_write(bp, phy, 10513 bnx2x_cl22_write(bp, phy,
@@ -10896,29 +10654,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10896 DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", 10654 DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
10897 vars->line_speed); 10655 vars->line_speed);
10898 10656
10899 /* Report whether EEE is resolved. */
10900 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
10901 if (val == MDIO_REG_GPHY_ID_54618SE) {
10902 if (vars->link_status &
10903 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
10904 val = 0;
10905 else {
10906 bnx2x_cl22_write(bp, phy,
10907 MDIO_REG_GPHY_CL45_ADDR_REG,
10908 MDIO_AN_DEVAD);
10909 bnx2x_cl22_write(bp, phy,
10910 MDIO_REG_GPHY_CL45_DATA_REG,
10911 MDIO_REG_GPHY_EEE_RESOLVED);
10912 bnx2x_cl22_write(bp, phy,
10913 MDIO_REG_GPHY_CL45_ADDR_REG,
10914 (0x1 << 14) | MDIO_AN_DEVAD);
10915 bnx2x_cl22_read(bp, phy,
10916 MDIO_REG_GPHY_CL45_DATA_REG,
10917 &val);
10918 }
10919 DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
10920 }
10921
10922 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10657 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10923 10658
10924 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 10659 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
@@ -10948,6 +10683,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10948 if (val & (1<<11)) 10683 if (val & (1<<11))
10949 vars->link_status |= 10684 vars->link_status |=
10950 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 10685 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
10686
10687 if ((phy->flags & FLAGS_EEE) &&
10688 bnx2x_eee_has_cap(params))
10689 bnx2x_eee_an_resolve(phy, params, vars);
10951 } 10690 }
10952 } 10691 }
10953 return link_up; 10692 return link_up;
@@ -11353,7 +11092,7 @@ static struct bnx2x_phy phy_8073 = {
11353 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, 11092 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
11354 .hw_reset = (hw_reset_t)NULL, 11093 .hw_reset = (hw_reset_t)NULL,
11355 .set_link_led = (set_link_led_t)NULL, 11094 .set_link_led = (set_link_led_t)NULL,
11356 .phy_specific_func = (phy_specific_func_t)NULL 11095 .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
11357}; 11096};
11358static struct bnx2x_phy phy_8705 = { 11097static struct bnx2x_phy phy_8705 = {
11359 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, 11098 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11546,7 +11285,7 @@ static struct bnx2x_phy phy_84823 = {
11546 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11285 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
11547 .hw_reset = (hw_reset_t)NULL, 11286 .hw_reset = (hw_reset_t)NULL,
11548 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11287 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11549 .phy_specific_func = (phy_specific_func_t)NULL 11288 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11550}; 11289};
11551 11290
11552static struct bnx2x_phy phy_84833 = { 11291static struct bnx2x_phy phy_84833 = {
@@ -11555,8 +11294,7 @@ static struct bnx2x_phy phy_84833 = {
11555 .def_md_devad = 0, 11294 .def_md_devad = 0,
11556 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11295 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11557 FLAGS_REARM_LATCH_SIGNAL | 11296 FLAGS_REARM_LATCH_SIGNAL |
11558 FLAGS_TX_ERROR_CHECK | 11297 FLAGS_TX_ERROR_CHECK),
11559 FLAGS_EEE_10GBT),
11560 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11298 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11561 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11299 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11562 .mdio_ctrl = 0, 11300 .mdio_ctrl = 0,
@@ -11582,7 +11320,7 @@ static struct bnx2x_phy phy_84833 = {
11582 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11320 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
11583 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 11321 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11584 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11322 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11585 .phy_specific_func = (phy_specific_func_t)NULL 11323 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11586}; 11324};
11587 11325
11588static struct bnx2x_phy phy_54618se = { 11326static struct bnx2x_phy phy_54618se = {
@@ -11616,7 +11354,7 @@ static struct bnx2x_phy phy_54618se = {
11616 .format_fw_ver = (format_fw_ver_t)NULL, 11354 .format_fw_ver = (format_fw_ver_t)NULL,
11617 .hw_reset = (hw_reset_t)NULL, 11355 .hw_reset = (hw_reset_t)NULL,
11618 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, 11356 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
11619 .phy_specific_func = (phy_specific_func_t)NULL 11357 .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
11620}; 11358};
11621/*****************************************************************/ 11359/*****************************************************************/
11622/* */ 11360/* */
@@ -11862,6 +11600,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11862 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: 11600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
11863 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: 11601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
11864 *phy = phy_54618se; 11602 *phy = phy_54618se;
11603 if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
11604 phy->flags |= FLAGS_EEE;
11865 break; 11605 break;
11866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 11606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
11867 *phy = phy_7101; 11607 *phy = phy_7101;
@@ -12141,7 +11881,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
12141 bnx2x_xgxs_deassert(params); 11881 bnx2x_xgxs_deassert(params);
12142 11882
12143 /* set bmac loopback */ 11883 /* set bmac loopback */
12144 bnx2x_bmac_enable(params, vars, 1); 11884 bnx2x_bmac_enable(params, vars, 1, 1);
12145 11885
12146 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 11886 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12147} 11887}
@@ -12233,7 +11973,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
12233 if (USES_WARPCORE(bp)) 11973 if (USES_WARPCORE(bp))
12234 bnx2x_xmac_enable(params, vars, 0); 11974 bnx2x_xmac_enable(params, vars, 0);
12235 else 11975 else
12236 bnx2x_bmac_enable(params, vars, 0); 11976 bnx2x_bmac_enable(params, vars, 0, 1);
12237 } 11977 }
12238 11978
12239 if (params->loopback_mode == LOOPBACK_XGXS) { 11979 if (params->loopback_mode == LOOPBACK_XGXS) {
@@ -12258,8 +11998,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
12258 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 11998 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
12259} 11999}
12260 12000
12001static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
12002{
12003 struct bnx2x *bp = params->bp;
12004 u8 val = en * 0x1F;
12005
12006 /* Open the gate between the NIG to the BRB */
12007 if (!CHIP_IS_E1x(bp))
12008 val |= en * 0x20;
12009 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
12010
12011 if (!CHIP_IS_E1(bp)) {
12012 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
12013 en*0x3);
12014 }
12015
12016 REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
12017 NIG_REG_LLH0_BRB1_NOT_MCP), en);
12018}
12019static int bnx2x_avoid_link_flap(struct link_params *params,
12020 struct link_vars *vars)
12021{
12022 u32 phy_idx;
12023 u32 dont_clear_stat, lfa_sts;
12024 struct bnx2x *bp = params->bp;
12025
12026 /* Sync the link parameters */
12027 bnx2x_link_status_update(params, vars);
12028
12029 /*
12030 * The module verification was already done by previous link owner,
12031 * so this call is meant only to get warning message
12032 */
12033
12034 for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
12035 struct bnx2x_phy *phy = &params->phy[phy_idx];
12036 if (phy->phy_specific_func) {
12037 DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
12038 phy->phy_specific_func(phy, params, PHY_INIT);
12039 }
12040 if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
12041 (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
12042 (phy->media_type == ETH_PHY_DA_TWINAX))
12043 bnx2x_verify_sfp_module(phy, params);
12044 }
12045 lfa_sts = REG_RD(bp, params->lfa_base +
12046 offsetof(struct shmem_lfa,
12047 lfa_sts));
12048
12049 dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
12050
12051 /* Re-enable the NIG/MAC */
12052 if (CHIP_IS_E3(bp)) {
12053 if (!dont_clear_stat) {
12054 REG_WR(bp, GRCBASE_MISC +
12055 MISC_REGISTERS_RESET_REG_2_CLEAR,
12056 (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
12057 params->port));
12058 REG_WR(bp, GRCBASE_MISC +
12059 MISC_REGISTERS_RESET_REG_2_SET,
12060 (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
12061 params->port));
12062 }
12063 if (vars->line_speed < SPEED_10000)
12064 bnx2x_umac_enable(params, vars, 0);
12065 else
12066 bnx2x_xmac_enable(params, vars, 0);
12067 } else {
12068 if (vars->line_speed < SPEED_10000)
12069 bnx2x_emac_enable(params, vars, 0);
12070 else
12071 bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
12072 }
12073
12074 /* Increment LFA count */
12075 lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
12076 (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
12077 LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
12078 << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
12079 /* Clear link flap reason */
12080 lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
12081
12082 REG_WR(bp, params->lfa_base +
12083 offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
12084
12085 /* Disable NIG DRAIN */
12086 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12087
12088 /* Enable interrupts */
12089 bnx2x_link_int_enable(params);
12090 return 0;
12091}
12092
12093static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
12094 struct link_vars *vars,
12095 int lfa_status)
12096{
12097 u32 lfa_sts, cfg_idx, tmp_val;
12098 struct bnx2x *bp = params->bp;
12099
12100 bnx2x_link_reset(params, vars, 1);
12101
12102 if (!params->lfa_base)
12103 return;
12104 /* Store the new link parameters */
12105 REG_WR(bp, params->lfa_base +
12106 offsetof(struct shmem_lfa, req_duplex),
12107 params->req_duplex[0] | (params->req_duplex[1] << 16));
12108
12109 REG_WR(bp, params->lfa_base +
12110 offsetof(struct shmem_lfa, req_flow_ctrl),
12111 params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
12112
12113 REG_WR(bp, params->lfa_base +
12114 offsetof(struct shmem_lfa, req_line_speed),
12115 params->req_line_speed[0] | (params->req_line_speed[1] << 16));
12116
12117 for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
12118 REG_WR(bp, params->lfa_base +
12119 offsetof(struct shmem_lfa,
12120 speed_cap_mask[cfg_idx]),
12121 params->speed_cap_mask[cfg_idx]);
12122 }
12123
12124 tmp_val = REG_RD(bp, params->lfa_base +
12125 offsetof(struct shmem_lfa, additional_config));
12126 tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
12127 tmp_val |= params->req_fc_auto_adv;
12128
12129 REG_WR(bp, params->lfa_base +
12130 offsetof(struct shmem_lfa, additional_config), tmp_val);
12131
12132 lfa_sts = REG_RD(bp, params->lfa_base +
12133 offsetof(struct shmem_lfa, lfa_sts));
12134
12135 /* Clear the "Don't Clear Statistics" bit, and set reason */
12136 lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
12137
12138 /* Set link flap reason */
12139 lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
12140 lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
12141 LFA_LINK_FLAP_REASON_OFFSET);
12142
12143 /* Increment link flap counter */
12144 lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
12145 (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
12146 LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
12147 << LINK_FLAP_COUNT_OFFSET));
12148 REG_WR(bp, params->lfa_base +
12149 offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
12150 /* Proceed with regular link initialization */
12151}
12152
12261int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) 12153int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12262{ 12154{
12155 int lfa_status;
12263 struct bnx2x *bp = params->bp; 12156 struct bnx2x *bp = params->bp;
12264 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 12157 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
12265 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", 12158 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
@@ -12274,6 +12167,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12274 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12167 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12275 vars->mac_type = MAC_TYPE_NONE; 12168 vars->mac_type = MAC_TYPE_NONE;
12276 vars->phy_flags = 0; 12169 vars->phy_flags = 0;
12170 /* Driver opens NIG-BRB filters */
12171 bnx2x_set_rx_filter(params, 1);
12172 /* Check if link flap can be avoided */
12173 lfa_status = bnx2x_check_lfa(params);
12174
12175 if (lfa_status == 0) {
12176 DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
12177 return bnx2x_avoid_link_flap(params, vars);
12178 }
12179
12180 DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
12181 lfa_status);
12182 bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
12277 12183
12278 /* Disable attentions */ 12184 /* Disable attentions */
12279 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 12185 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -12356,13 +12262,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12356 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); 12262 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
12357 } 12263 }
12358 12264
12359 /* Stop BigMac rx */ 12265 if (!CHIP_IS_E3(bp)) {
12360 if (!CHIP_IS_E3(bp)) 12266 bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
12361 bnx2x_bmac_rx_disable(bp, port); 12267 } else {
12362 else { 12268 bnx2x_set_xmac_rxtx(params, 0);
12363 bnx2x_xmac_disable(params); 12269 bnx2x_set_umac_rxtx(params, 0);
12364 bnx2x_umac_disable(params); 12270 }
12365 }
12366 /* Disable emac */ 12271 /* Disable emac */
12367 if (!CHIP_IS_E3(bp)) 12272 if (!CHIP_IS_E3(bp))
12368 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 12273 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -12420,6 +12325,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12420 vars->phy_flags = 0; 12325 vars->phy_flags = 0;
12421 return 0; 12326 return 0;
12422} 12327}
12328int bnx2x_lfa_reset(struct link_params *params,
12329 struct link_vars *vars)
12330{
12331 struct bnx2x *bp = params->bp;
12332 vars->link_up = 0;
12333 vars->phy_flags = 0;
12334 if (!params->lfa_base)
12335 return bnx2x_link_reset(params, vars, 1);
12336 /*
12337 * Activate NIG drain so that during this time the device won't send
12338 * anything while it is unable to response.
12339 */
12340 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
12341
12342 /*
12343 * Close gracefully the gate from BMAC to NIG such that no half packets
12344 * are passed.
12345 */
12346 if (!CHIP_IS_E3(bp))
12347 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
12348
12349 if (CHIP_IS_E3(bp)) {
12350 bnx2x_set_xmac_rxtx(params, 0);
12351 bnx2x_set_umac_rxtx(params, 0);
12352 }
12353 /* Wait 10ms for the pipe to clean up*/
12354 usleep_range(10000, 20000);
12355
12356 /* Clean the NIG-BRB using the network filters in a way that will
12357 * not cut a packet in the middle.
12358 */
12359 bnx2x_set_rx_filter(params, 0);
12360
12361 /*
12362 * Re-open the gate between the BMAC and the NIG, after verifying the
12363 * gate to the BRB is closed, otherwise packets may arrive to the
12364 * firmware before driver had initialized it. The target is to achieve
12365 * minimum management protocol down time.
12366 */
12367 if (!CHIP_IS_E3(bp))
12368 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
12369
12370 if (CHIP_IS_E3(bp)) {
12371 bnx2x_set_xmac_rxtx(params, 1);
12372 bnx2x_set_umac_rxtx(params, 1);
12373 }
12374 /* Disable NIG drain */
12375 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12376 return 0;
12377}
12423 12378
12424/****************************************************************************/ 12379/****************************************************************************/
12425/* Common function */ 12380/* Common function */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 51cac8130051..9165b89a4b19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -155,7 +155,7 @@ struct bnx2x_phy {
155#define FLAGS_DUMMY_READ (1<<9) 155#define FLAGS_DUMMY_READ (1<<9)
156#define FLAGS_MDC_MDIO_WA_B0 (1<<10) 156#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
157#define FLAGS_TX_ERROR_CHECK (1<<12) 157#define FLAGS_TX_ERROR_CHECK (1<<12)
158#define FLAGS_EEE_10GBT (1<<13) 158#define FLAGS_EEE (1<<13)
159 159
160 /* preemphasis values for the rx side */ 160 /* preemphasis values for the rx side */
161 u16 rx_preemphasis[4]; 161 u16 rx_preemphasis[4];
@@ -216,6 +216,7 @@ struct bnx2x_phy {
216 phy_specific_func_t phy_specific_func; 216 phy_specific_func_t phy_specific_func;
217#define DISABLE_TX 1 217#define DISABLE_TX 1
218#define ENABLE_TX 2 218#define ENABLE_TX 2
219#define PHY_INIT 3
219}; 220};
220 221
221/* Inputs parameters to the CLC */ 222/* Inputs parameters to the CLC */
@@ -304,6 +305,8 @@ struct link_params {
304 struct bnx2x *bp; 305 struct bnx2x *bp;
305 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when 306 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
306 req_flow_ctrl is set to AUTO */ 307 req_flow_ctrl is set to AUTO */
308 u16 rsrv1;
309 u32 lfa_base;
307}; 310};
308 311
309/* Output parameters */ 312/* Output parameters */
@@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
356 to 0 */ 359 to 0 */
357int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 360int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
358 u8 reset_ext_phy); 361 u8 reset_ext_phy);
359 362int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
360/* bnx2x_link_update should be called upon link interrupt */ 363/* bnx2x_link_update should be called upon link interrupt */
361int bnx2x_link_update(struct link_params *params, struct link_vars *vars); 364int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
362 365
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e11485ca037d..f7ed122f4071 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2166,7 +2166,6 @@ void bnx2x_link_set(struct bnx2x *bp)
2166{ 2166{
2167 if (!BP_NOMCP(bp)) { 2167 if (!BP_NOMCP(bp)) {
2168 bnx2x_acquire_phy_lock(bp); 2168 bnx2x_acquire_phy_lock(bp);
2169 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2170 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2169 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2171 bnx2x_release_phy_lock(bp); 2170 bnx2x_release_phy_lock(bp);
2172 2171
@@ -2179,12 +2178,19 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2179{ 2178{
2180 if (!BP_NOMCP(bp)) { 2179 if (!BP_NOMCP(bp)) {
2181 bnx2x_acquire_phy_lock(bp); 2180 bnx2x_acquire_phy_lock(bp);
2182 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2181 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2183 bnx2x_release_phy_lock(bp); 2182 bnx2x_release_phy_lock(bp);
2184 } else 2183 } else
2185 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2184 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2186} 2185}
2187 2186
2187void bnx2x_force_link_reset(struct bnx2x *bp)
2188{
2189 bnx2x_acquire_phy_lock(bp);
2190 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2191 bnx2x_release_phy_lock(bp);
2192}
2193
2188u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2194u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2189{ 2195{
2190 u8 rc = 0; 2196 u8 rc = 0;
@@ -6751,7 +6757,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6751 u32 low, high; 6757 u32 low, high;
6752 u32 val; 6758 u32 val;
6753 6759
6754 bnx2x__link_reset(bp);
6755 6760
6756 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 6761 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
6757 6762
@@ -8244,12 +8249,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8244 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8249 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8245 * 8250 *
8246 * @bp: driver handle 8251 * @bp: driver handle
8252 * @keep_link: true iff link should be kept up
8247 */ 8253 */
8248void bnx2x_send_unload_done(struct bnx2x *bp) 8254void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8249{ 8255{
8256 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8257
8250 /* Report UNLOAD_DONE to MCP */ 8258 /* Report UNLOAD_DONE to MCP */
8251 if (!BP_NOMCP(bp)) 8259 if (!BP_NOMCP(bp))
8252 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8260 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8253} 8261}
8254 8262
8255static int bnx2x_func_wait_started(struct bnx2x *bp) 8263static int bnx2x_func_wait_started(struct bnx2x *bp)
@@ -8318,7 +8326,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
8318 return 0; 8326 return 0;
8319} 8327}
8320 8328
8321void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 8329void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8322{ 8330{
8323 int port = BP_PORT(bp); 8331 int port = BP_PORT(bp);
8324 int i, rc = 0; 8332 int i, rc = 0;
@@ -8440,7 +8448,7 @@ unload_error:
8440 8448
8441 8449
8442 /* Report UNLOAD_DONE to MCP */ 8450 /* Report UNLOAD_DONE to MCP */
8443 bnx2x_send_unload_done(bp); 8451 bnx2x_send_unload_done(bp, keep_link);
8444} 8452}
8445 8453
8446void bnx2x_disable_close_the_gate(struct bnx2x *bp) 8454void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -8852,7 +8860,8 @@ int bnx2x_leader_reset(struct bnx2x *bp)
8852 * driver is owner of the HW 8860 * driver is owner of the HW
8853 */ 8861 */
8854 if (!global && !BP_NOMCP(bp)) { 8862 if (!global && !BP_NOMCP(bp)) {
8855 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 8863 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
8864 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
8856 if (!load_code) { 8865 if (!load_code) {
8857 BNX2X_ERR("MCP response failure, aborting\n"); 8866 BNX2X_ERR("MCP response failure, aborting\n");
8858 rc = -EAGAIN; 8867 rc = -EAGAIN;
@@ -8958,7 +8967,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
8958 8967
8959 /* Stop the driver */ 8968 /* Stop the driver */
8960 /* If interface has been removed - break */ 8969 /* If interface has been removed - break */
8961 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) 8970 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
8962 return; 8971 return;
8963 8972
8964 bp->recovery_state = BNX2X_RECOVERY_WAIT; 8973 bp->recovery_state = BNX2X_RECOVERY_WAIT;
@@ -9124,7 +9133,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
9124 bp->sp_rtnl_state = 0; 9133 bp->sp_rtnl_state = 0;
9125 smp_mb(); 9134 smp_mb();
9126 9135
9127 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 9136 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9128 bnx2x_nic_load(bp, LOAD_NORMAL); 9137 bnx2x_nic_load(bp, LOAD_NORMAL);
9129 9138
9130 goto sp_rtnl_exit; 9139 goto sp_rtnl_exit;
@@ -9310,7 +9319,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
9310 9319
9311static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) 9320static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
9312{ 9321{
9313 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9322 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9323 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9314 if (!rc) { 9324 if (!rc) {
9315 BNX2X_ERR("MCP response failure, aborting\n"); 9325 BNX2X_ERR("MCP response failure, aborting\n");
9316 return -EBUSY; 9326 return -EBUSY;
@@ -11000,7 +11010,7 @@ static int bnx2x_close(struct net_device *dev)
11000 struct bnx2x *bp = netdev_priv(dev); 11010 struct bnx2x *bp = netdev_priv(dev);
11001 11011
11002 /* Unload the driver, release IRQs */ 11012 /* Unload the driver, release IRQs */
11003 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 11013 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11004 11014
11005 /* Power off */ 11015 /* Power off */
11006 bnx2x_set_power_state(bp, PCI_D3hot); 11016 bnx2x_set_power_state(bp, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 28a0bcfe61ff..1b1999d34c71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -4949,6 +4949,10 @@
4949#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) 4949#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
4950#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) 4950#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
4951#define UMAC_REG_COMMAND_CONFIG 0x8 4951#define UMAC_REG_COMMAND_CONFIG 0x8
4952/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
4953 * state from LPI state when it receives packet for transmission. The
4954 * decrement unit is 1 micro-second. */
4955#define UMAC_REG_EEE_WAKE_TIMER 0x6c
4952/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers 4956/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
4953 * to bit 17 of the MAC address etc. */ 4957 * to bit 17 of the MAC address etc. */
4954#define UMAC_REG_MAC_ADDR0 0xc 4958#define UMAC_REG_MAC_ADDR0 0xc
@@ -4958,6 +4962,8 @@
4958/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive 4962/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
4959 * logic to check frames. */ 4963 * logic to check frames. */
4960#define UMAC_REG_MAXFR 0x14 4964#define UMAC_REG_MAXFR 0x14
4965#define UMAC_REG_UMAC_EEE_CTRL 0x64
4966#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3)
4961/* [RW 8] The event id for aggregated interrupt 0 */ 4967/* [RW 8] The event id for aggregated interrupt 0 */
4962#define USDM_REG_AGG_INT_EVENT_0 0xc4038 4968#define USDM_REG_AGG_INT_EVENT_0 0xc4038
4963#define USDM_REG_AGG_INT_EVENT_1 0xc403c 4969#define USDM_REG_AGG_INT_EVENT_1 0xc403c
@@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/
6992/* BCM84833 only */ 6998/* BCM84833 only */
6993#define MDIO_84833_TOP_CFG_FW_REV 0x400f 6999#define MDIO_84833_TOP_CFG_FW_REV 0x400f
6994#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 7000#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
7001#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
6995#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a 7002#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
6996#define MDIO_84833_SUPER_ISOLATE 0x8000 7003#define MDIO_84833_SUPER_ISOLATE 0x8000
6997/* These are mailbox register set used by 84833. */ 7004/* These are mailbox register set used by 84833. */
@@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/
7160#define MDIO_REG_GPHY_ID_54618SE 0x5cd5 7167#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
7161#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd 7168#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
7162#define MDIO_REG_GPHY_CL45_DATA_REG 0xe 7169#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
7163#define MDIO_REG_GPHY_EEE_ADV 0x3c
7164#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
7165#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
7166#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e 7170#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
7171#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
7172#define MDIO_REG_GPHY_EXP_ACCESS 0x17
7173#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
7174#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
7167#define MDIO_REG_GPHY_AUX_STATUS 0x19 7175#define MDIO_REG_GPHY_AUX_STATUS 0x19
7168#define MDIO_REG_INTR_STATUS 0x1a 7176#define MDIO_REG_INTR_STATUS 0x1a
7169#define MDIO_REG_INTR_MASK 0x1b 7177#define MDIO_REG_INTR_MASK 0x1b
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 62f754bd0dfe..71971a161bd1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
229 */ 229 */
230 list_add_tail(&spacer.link, &o->pending_comp); 230 list_add_tail(&spacer.link, &o->pending_comp);
231 mb(); 231 mb();
232 list_del(&elem->link); 232 list_move_tail(&elem->link, &o->pending_comp);
233 list_add_tail(&elem->link, &o->pending_comp);
234 list_del(&spacer.link); 233 list_del(&spacer.link);
235 } else 234 } else
236 break; 235 break;
@@ -5620,7 +5619,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5620 memset(rdata, 0, sizeof(*rdata)); 5619 memset(rdata, 0, sizeof(*rdata));
5621 5620
5622 /* Fill the ramrod data with provided parameters */ 5621 /* Fill the ramrod data with provided parameters */
5623 rdata->function_mode = cpu_to_le16(start_params->mf_mode); 5622 rdata->function_mode = (u8)start_params->mf_mode;
5624 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5623 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5625 rdata->path_id = BP_PATH(bp); 5624 rdata->path_id = BP_PATH(bp);
5626 rdata->network_cos_mode = start_params->network_cos_mode; 5625 rdata->network_cos_mode = start_params->network_cos_mode;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index a1d0446b39b3..348ed02d3c69 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref)
39#endif 39#endif
40} 40}
41 41
42static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 42static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43{ 43{
44 u16 res = sizeof(struct host_port_stats) >> 2; 44 u16 res = 0;
45 45
46 /* if PFC stats are not supported by the MFW, don't DMA them */ 46 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) 47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 res -= (sizeof(u32)*4) >> 2; 48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49 if (size)
50 res = size;
49 51
52 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
55 }
56
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
59 */
60 if (!res) {
61 res = offsetof(struct host_port_stats, not_used) + 4;
62
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
66 pfc_frames_rx_lo) -
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
69 }
70 }
71
72 res >>= 2;
73
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
50 return res; 75 return res;
51} 76}
52 77
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3b4fc61f24cf..cc8434fd606e 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
823 } 823 }
824} 824}
825 825
826static void __cnic_free_uio(struct cnic_uio_dev *udev) 826static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
827{ 827{
828 uio_unregister_device(&udev->cnic_uinfo);
829
830 if (udev->l2_buf) { 828 if (udev->l2_buf) {
831 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 829 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
832 udev->l2_buf, udev->l2_buf_map); 830 udev->l2_buf, udev->l2_buf_map);
@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
839 udev->l2_ring = NULL; 837 udev->l2_ring = NULL;
840 } 838 }
841 839
840}
841
842static void __cnic_free_uio(struct cnic_uio_dev *udev)
843{
844 uio_unregister_device(&udev->cnic_uinfo);
845
846 __cnic_free_uio_rings(udev);
847
842 pci_dev_put(udev->pdev); 848 pci_dev_put(udev->pdev);
843 kfree(udev); 849 kfree(udev);
844} 850}
@@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev)
862 if (udev) { 868 if (udev) {
863 udev->dev = NULL; 869 udev->dev = NULL;
864 cp->udev = NULL; 870 cp->udev = NULL;
871 if (udev->uio_dev == -1)
872 __cnic_free_uio_rings(udev);
865 } 873 }
866 874
867 cnic_free_context(dev); 875 cnic_free_context(dev);
@@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
996 return 0; 1004 return 0;
997} 1005}
998 1006
1007static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1008{
1009 struct cnic_local *cp = udev->dev->cnic_priv;
1010
1011 if (udev->l2_ring)
1012 return 0;
1013
1014 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1015 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1016 &udev->l2_ring_map,
1017 GFP_KERNEL | __GFP_COMP);
1018 if (!udev->l2_ring)
1019 return -ENOMEM;
1020
1021 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1022 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1023 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1024 &udev->l2_buf_map,
1025 GFP_KERNEL | __GFP_COMP);
1026 if (!udev->l2_buf) {
1027 __cnic_free_uio_rings(udev);
1028 return -ENOMEM;
1029 }
1030
1031 return 0;
1032
1033}
1034
999static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 1035static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1000{ 1036{
1001 struct cnic_local *cp = dev->cnic_priv; 1037 struct cnic_local *cp = dev->cnic_priv;
@@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1005 list_for_each_entry(udev, &cnic_udev_list, list) { 1041 list_for_each_entry(udev, &cnic_udev_list, list) {
1006 if (udev->pdev == dev->pcidev) { 1042 if (udev->pdev == dev->pcidev) {
1007 udev->dev = dev; 1043 udev->dev = dev;
1044 if (__cnic_alloc_uio_rings(udev, pages)) {
1045 udev->dev = NULL;
1046 read_unlock(&cnic_dev_lock);
1047 return -ENOMEM;
1048 }
1008 cp->udev = udev; 1049 cp->udev = udev;
1009 read_unlock(&cnic_dev_lock); 1050 read_unlock(&cnic_dev_lock);
1010 return 0; 1051 return 0;
@@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1020 1061
1021 udev->dev = dev; 1062 udev->dev = dev;
1022 udev->pdev = dev->pcidev; 1063 udev->pdev = dev->pcidev;
1023 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1024 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1025 &udev->l2_ring_map,
1026 GFP_KERNEL | __GFP_COMP);
1027 if (!udev->l2_ring)
1028 goto err_udev;
1029 1064
1030 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 1065 if (__cnic_alloc_uio_rings(udev, pages))
1031 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 1066 goto err_udev;
1032 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1033 &udev->l2_buf_map,
1034 GFP_KERNEL | __GFP_COMP);
1035 if (!udev->l2_buf)
1036 goto err_dma;
1037 1067
1038 write_lock(&cnic_dev_lock); 1068 write_lock(&cnic_dev_lock);
1039 list_add(&udev->list, &cnic_udev_list); 1069 list_add(&udev->list, &cnic_udev_list);
@@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1044 cp->udev = udev; 1074 cp->udev = udev;
1045 1075
1046 return 0; 1076 return 0;
1047 err_dma: 1077
1048 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1049 udev->l2_ring, udev->l2_ring_map);
1050 err_udev: 1078 err_udev:
1051 kfree(udev); 1079 kfree(udev);
1052 return -ENOMEM; 1080 return -ENOMEM;
@@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1260 if (ret) 1288 if (ret)
1261 goto error; 1289 goto error;
1262 1290
1263 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1291 if (CNIC_SUPPORTS_FCOE(cp)) {
1264 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1292 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1265 if (ret) 1293 if (ret)
1266 goto error; 1294 goto error;
@@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1275 if (ret) 1303 if (ret)
1276 goto error; 1304 goto error;
1277 1305
1306 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1307 return 0;
1308
1278 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1309 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1279 1310
1280 cp->l2_rx_ring_size = 15; 1311 cp->l2_rx_ring_size = 15;
@@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3050 IGU_INT_DISABLE, 0); 3081 IGU_INT_DISABLE, 0);
3051} 3082}
3052 3083
3084static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3085{
3086 struct cnic_local *cp = dev->cnic_priv;
3087
3088 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3089 IGU_INT_ENABLE, 1);
3090}
3091
3092static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3093{
3094 struct cnic_local *cp = dev->cnic_priv;
3095
3096 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3097 IGU_INT_ENABLE, 1);
3098}
3099
3053static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 3100static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3054{ 3101{
3055 u32 last_status = *info->status_idx_ptr; 3102 u32 last_status = *info->status_idx_ptr;
@@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data)
3086 CNIC_WR16(dev, cp->kcq1.io_addr, 3133 CNIC_WR16(dev, cp->kcq1.io_addr,
3087 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3134 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3088 3135
3089 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 3136 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
3090 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 3137 cp->arm_int(dev, status_idx);
3091 status_idx, IGU_INT_ENABLE, 1);
3092 break; 3138 break;
3093 } 3139 }
3094 3140
@@ -4845,6 +4891,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4845 buf_map = udev->l2_buf_map; 4891 buf_map = udev->l2_buf_map;
4846 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4892 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4847 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4893 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4894 struct eth_tx_parse_bd_e1x *pbd_e1x =
4895 &((txbd + 1)->parse_bd_e1x);
4896 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4848 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4897 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4849 4898
4850 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4899 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
@@ -4854,10 +4903,15 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4854 start_bd->nbytes = cpu_to_le16(0x10); 4903 start_bd->nbytes = cpu_to_le16(0x10);
4855 start_bd->nbd = cpu_to_le16(3); 4904 start_bd->nbd = cpu_to_le16(3);
4856 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4905 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4857 start_bd->general_data = (UNICAST_ADDRESS << 4906 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4858 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4859 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4907 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4860 4908
4909 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
4910 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4911 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4912 else
4913 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4914 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4861 } 4915 }
4862 4916
4863 val = (u64) ring_map >> 32; 4917 val = (u64) ring_map >> 32;
@@ -5308,7 +5362,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
5308 /* Need to wait for the ring shutdown event to complete 5362 /* Need to wait for the ring shutdown event to complete
5309 * before clearing the CNIC_UP flag. 5363 * before clearing the CNIC_UP flag.
5310 */ 5364 */
5311 while (cp->udev->uio_dev != -1 && i < 15) { 5365 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5312 msleep(100); 5366 msleep(100);
5313 i++; 5367 i++;
5314 } 5368 }
@@ -5473,8 +5527,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5473 5527
5474 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5528 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5475 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5529 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5476 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 5530 if (CNIC_SUPPORTS_FCOE(cp))
5477 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5478 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5531 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5479 5532
5480 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5533 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
@@ -5492,10 +5545,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5492 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5545 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5493 cp->enable_int = cnic_enable_bnx2x_int; 5546 cp->enable_int = cnic_enable_bnx2x_int;
5494 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5547 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5495 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 5548 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5496 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5549 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5497 else 5550 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5551 } else {
5498 cp->ack_int = cnic_ack_bnx2x_msix; 5552 cp->ack_int = cnic_ack_bnx2x_msix;
5553 cp->arm_int = cnic_arm_bnx2x_msix;
5554 }
5499 cp->close_conn = cnic_close_bnx2x_conn; 5555 cp->close_conn = cnic_close_bnx2x_conn;
5500 return cdev; 5556 return cdev;
5501} 5557}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 30328097f516..148604c3fa0c 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -334,6 +334,7 @@ struct cnic_local {
334 void (*enable_int)(struct cnic_dev *); 334 void (*enable_int)(struct cnic_dev *);
335 void (*disable_int_sync)(struct cnic_dev *); 335 void (*disable_int_sync)(struct cnic_dev *);
336 void (*ack_int)(struct cnic_dev *); 336 void (*ack_int)(struct cnic_dev *);
337 void (*arm_int)(struct cnic_dev *, u32 index);
337 void (*close_conn)(struct cnic_sock *, u32 opcode); 338 void (*close_conn)(struct cnic_sock *, u32 opcode);
338}; 339};
339 340
@@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next {
474 MAX_STAT_COUNTER_ID_E1)) 475 MAX_STAT_COUNTER_ID_E1))
475#endif 476#endif
476 477
478#define CNIC_SUPPORTS_FCOE(cp) \
479 (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
480 !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
481
477#define CNIC_RAMROD_TMO (HZ / 4) 482#define CNIC_RAMROD_TMO (HZ / 4)
478 483
479#endif 484#endif
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 382c98b0cc0c..ede3db35d757 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -896,7 +896,7 @@ struct tstorm_tcp_tcp_ag_context_section {
896 u32 snd_nxt; 896 u32 snd_nxt;
897 u32 rtt_seq; 897 u32 rtt_seq;
898 u32 rtt_time; 898 u32 rtt_time;
899 u32 __reserved66; 899 u32 wnd_right_edge_local;
900 u32 wnd_right_edge; 900 u32 wnd_right_edge;
901 u32 tcp_agg_vars1; 901 u32 tcp_agg_vars1;
902#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) 902#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 5cb88881bba1..865095aad1f6 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
14 14
15#include "bnx2x/bnx2x_mfw_req.h" 15#include "bnx2x/bnx2x_mfw_req.h"
16 16
17#define CNIC_MODULE_VERSION "2.5.12" 17#define CNIC_MODULE_VERSION "2.5.14"
18#define CNIC_MODULE_RELDATE "June 29, 2012" 18#define CNIC_MODULE_RELDATE "Sep 30, 2012"
19 19
20#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 388d32213937..46280ba4c5d4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,10 +44,8 @@
44#include <linux/prefetch.h> 44#include <linux/prefetch.h>
45#include <linux/dma-mapping.h> 45#include <linux/dma-mapping.h>
46#include <linux/firmware.h> 46#include <linux/firmware.h>
47#if IS_ENABLED(CONFIG_HWMON)
48#include <linux/hwmon.h> 47#include <linux/hwmon.h>
49#include <linux/hwmon-sysfs.h> 48#include <linux/hwmon-sysfs.h>
50#endif
51 49
52#include <net/checksum.h> 50#include <net/checksum.h>
53#include <net/ip.h> 51#include <net/ip.h>
@@ -92,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92 90
93#define DRV_MODULE_NAME "tg3" 91#define DRV_MODULE_NAME "tg3"
94#define TG3_MAJ_NUM 3 92#define TG3_MAJ_NUM 3
95#define TG3_MIN_NUM 124 93#define TG3_MIN_NUM 125
96#define DRV_MODULE_VERSION \ 94#define DRV_MODULE_VERSION \
97 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98#define DRV_MODULE_RELDATE "March 21, 2012" 96#define DRV_MODULE_RELDATE "September 26, 2012"
99 97
100#define RESET_KIND_SHUTDOWN 0 98#define RESET_KIND_SHUTDOWN 0
101#define RESET_KIND_INIT 1 99#define RESET_KIND_INIT 1
@@ -6263,7 +6261,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6263 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 6261 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6264 6262
6265 tp->rx_refill = false; 6263 tp->rx_refill = false;
6266 for (i = 1; i < tp->irq_cnt; i++) 6264 for (i = 1; i <= tp->rxq_cnt; i++)
6267 err |= tg3_rx_prodring_xfer(tp, dpr, 6265 err |= tg3_rx_prodring_xfer(tp, dpr,
6268 &tp->napi[i].prodring); 6266 &tp->napi[i].prodring);
6269 6267
@@ -7592,15 +7590,11 @@ static int tg3_init_rings(struct tg3 *tp)
7592 return 0; 7590 return 0;
7593} 7591}
7594 7592
7595/* 7593static void tg3_mem_tx_release(struct tg3 *tp)
7596 * Must not be invoked with interrupt sources disabled and
7597 * the hardware shutdown down.
7598 */
7599static void tg3_free_consistent(struct tg3 *tp)
7600{ 7594{
7601 int i; 7595 int i;
7602 7596
7603 for (i = 0; i < tp->irq_cnt; i++) { 7597 for (i = 0; i < tp->irq_max; i++) {
7604 struct tg3_napi *tnapi = &tp->napi[i]; 7598 struct tg3_napi *tnapi = &tp->napi[i];
7605 7599
7606 if (tnapi->tx_ring) { 7600 if (tnapi->tx_ring) {
@@ -7611,17 +7605,114 @@ static void tg3_free_consistent(struct tg3 *tp)
7611 7605
7612 kfree(tnapi->tx_buffers); 7606 kfree(tnapi->tx_buffers);
7613 tnapi->tx_buffers = NULL; 7607 tnapi->tx_buffers = NULL;
7608 }
7609}
7614 7610
7615 if (tnapi->rx_rcb) { 7611static int tg3_mem_tx_acquire(struct tg3 *tp)
7616 dma_free_coherent(&tp->pdev->dev, 7612{
7617 TG3_RX_RCB_RING_BYTES(tp), 7613 int i;
7618 tnapi->rx_rcb, 7614 struct tg3_napi *tnapi = &tp->napi[0];
7619 tnapi->rx_rcb_mapping); 7615
7620 tnapi->rx_rcb = NULL; 7616 /* If multivector TSS is enabled, vector 0 does not handle
7621 } 7617 * tx interrupts. Don't allocate any resources for it.
7618 */
7619 if (tg3_flag(tp, ENABLE_TSS))
7620 tnapi++;
7621
7622 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7623 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7624 TG3_TX_RING_SIZE, GFP_KERNEL);
7625 if (!tnapi->tx_buffers)
7626 goto err_out;
7627
7628 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7629 TG3_TX_RING_BYTES,
7630 &tnapi->tx_desc_mapping,
7631 GFP_KERNEL);
7632 if (!tnapi->tx_ring)
7633 goto err_out;
7634 }
7635
7636 return 0;
7637
7638err_out:
7639 tg3_mem_tx_release(tp);
7640 return -ENOMEM;
7641}
7642
7643static void tg3_mem_rx_release(struct tg3 *tp)
7644{
7645 int i;
7646
7647 for (i = 0; i < tp->irq_max; i++) {
7648 struct tg3_napi *tnapi = &tp->napi[i];
7622 7649
7623 tg3_rx_prodring_fini(tp, &tnapi->prodring); 7650 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7624 7651
7652 if (!tnapi->rx_rcb)
7653 continue;
7654
7655 dma_free_coherent(&tp->pdev->dev,
7656 TG3_RX_RCB_RING_BYTES(tp),
7657 tnapi->rx_rcb,
7658 tnapi->rx_rcb_mapping);
7659 tnapi->rx_rcb = NULL;
7660 }
7661}
7662
7663static int tg3_mem_rx_acquire(struct tg3 *tp)
7664{
7665 unsigned int i, limit;
7666
7667 limit = tp->rxq_cnt;
7668
7669 /* If RSS is enabled, we need a (dummy) producer ring
7670 * set on vector zero. This is the true hw prodring.
7671 */
7672 if (tg3_flag(tp, ENABLE_RSS))
7673 limit++;
7674
7675 for (i = 0; i < limit; i++) {
7676 struct tg3_napi *tnapi = &tp->napi[i];
7677
7678 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7679 goto err_out;
7680
7681 /* If multivector RSS is enabled, vector 0
7682 * does not handle rx or tx interrupts.
7683 * Don't allocate any resources for it.
7684 */
7685 if (!i && tg3_flag(tp, ENABLE_RSS))
7686 continue;
7687
7688 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7689 TG3_RX_RCB_RING_BYTES(tp),
7690 &tnapi->rx_rcb_mapping,
7691 GFP_KERNEL);
7692 if (!tnapi->rx_rcb)
7693 goto err_out;
7694
7695 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7696 }
7697
7698 return 0;
7699
7700err_out:
7701 tg3_mem_rx_release(tp);
7702 return -ENOMEM;
7703}
7704
7705/*
7706 * Must not be invoked with interrupt sources disabled and
7707 * the hardware shutdown down.
7708 */
7709static void tg3_free_consistent(struct tg3 *tp)
7710{
7711 int i;
7712
7713 for (i = 0; i < tp->irq_cnt; i++) {
7714 struct tg3_napi *tnapi = &tp->napi[i];
7715
7625 if (tnapi->hw_status) { 7716 if (tnapi->hw_status) {
7626 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 7717 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7627 tnapi->hw_status, 7718 tnapi->hw_status,
@@ -7630,6 +7721,9 @@ static void tg3_free_consistent(struct tg3 *tp)
7630 } 7721 }
7631 } 7722 }
7632 7723
7724 tg3_mem_rx_release(tp);
7725 tg3_mem_tx_release(tp);
7726
7633 if (tp->hw_stats) { 7727 if (tp->hw_stats) {
7634 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 7728 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7635 tp->hw_stats, tp->stats_mapping); 7729 tp->hw_stats, tp->stats_mapping);
@@ -7668,72 +7762,38 @@ static int tg3_alloc_consistent(struct tg3 *tp)
7668 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 7762 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7669 sblk = tnapi->hw_status; 7763 sblk = tnapi->hw_status;
7670 7764
7671 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 7765 if (tg3_flag(tp, ENABLE_RSS)) {
7672 goto err_out; 7766 u16 *prodptr = 0;
7673 7767
7674 /* If multivector TSS is enabled, vector 0 does not handle 7768 /*
7675 * tx interrupts. Don't allocate any resources for it. 7769 * When RSS is enabled, the status block format changes
7676 */ 7770 * slightly. The "rx_jumbo_consumer", "reserved",
7677 if ((!i && !tg3_flag(tp, ENABLE_TSS)) || 7771 * and "rx_mini_consumer" members get mapped to the
7678 (i && tg3_flag(tp, ENABLE_TSS))) { 7772 * other three rx return ring producer indexes.
7679 tnapi->tx_buffers = kzalloc( 7773 */
7680 sizeof(struct tg3_tx_ring_info) * 7774 switch (i) {
7681 TG3_TX_RING_SIZE, GFP_KERNEL); 7775 case 1:
7682 if (!tnapi->tx_buffers) 7776 prodptr = &sblk->idx[0].rx_producer;
7683 goto err_out; 7777 break;
7684 7778 case 2:
7685 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 7779 prodptr = &sblk->rx_jumbo_consumer;
7686 TG3_TX_RING_BYTES, 7780 break;
7687 &tnapi->tx_desc_mapping, 7781 case 3:
7688 GFP_KERNEL); 7782 prodptr = &sblk->reserved;
7689 if (!tnapi->tx_ring) 7783 break;
7690 goto err_out; 7784 case 4:
7691 } 7785 prodptr = &sblk->rx_mini_consumer;
7692
7693 /*
7694 * When RSS is enabled, the status block format changes
7695 * slightly. The "rx_jumbo_consumer", "reserved",
7696 * and "rx_mini_consumer" members get mapped to the
7697 * other three rx return ring producer indexes.
7698 */
7699 switch (i) {
7700 default:
7701 if (tg3_flag(tp, ENABLE_RSS)) {
7702 tnapi->rx_rcb_prod_idx = NULL;
7703 break; 7786 break;
7704 } 7787 }
7705 /* Fall through */ 7788 tnapi->rx_rcb_prod_idx = prodptr;
7706 case 1: 7789 } else {
7707 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 7790 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7708 break;
7709 case 2:
7710 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7711 break;
7712 case 3:
7713 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7714 break;
7715 case 4:
7716 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7717 break;
7718 } 7791 }
7719
7720 /*
7721 * If multivector RSS is enabled, vector 0 does not handle
7722 * rx or tx interrupts. Don't allocate any resources for it.
7723 */
7724 if (!i && tg3_flag(tp, ENABLE_RSS))
7725 continue;
7726
7727 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7728 TG3_RX_RCB_RING_BYTES(tp),
7729 &tnapi->rx_rcb_mapping,
7730 GFP_KERNEL);
7731 if (!tnapi->rx_rcb)
7732 goto err_out;
7733
7734 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7735 } 7792 }
7736 7793
7794 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7795 goto err_out;
7796
7737 return 0; 7797 return 0;
7738 7798
7739err_out: 7799err_out:
@@ -8247,9 +8307,10 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8247 nic_addr); 8307 nic_addr);
8248} 8308}
8249 8309
8250static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 8310
8311static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8251{ 8312{
8252 int i; 8313 int i = 0;
8253 8314
8254 if (!tg3_flag(tp, ENABLE_TSS)) { 8315 if (!tg3_flag(tp, ENABLE_TSS)) {
8255 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 8316 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
@@ -8259,31 +8320,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8259 tw32(HOSTCC_TXCOL_TICKS, 0); 8320 tw32(HOSTCC_TXCOL_TICKS, 0);
8260 tw32(HOSTCC_TXMAX_FRAMES, 0); 8321 tw32(HOSTCC_TXMAX_FRAMES, 0);
8261 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 8322 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8323
8324 for (; i < tp->txq_cnt; i++) {
8325 u32 reg;
8326
8327 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8328 tw32(reg, ec->tx_coalesce_usecs);
8329 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8330 tw32(reg, ec->tx_max_coalesced_frames);
8331 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8332 tw32(reg, ec->tx_max_coalesced_frames_irq);
8333 }
8262 } 8334 }
8263 8335
8336 for (; i < tp->irq_max - 1; i++) {
8337 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8338 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8339 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8340 }
8341}
8342
8343static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8344{
8345 int i = 0;
8346 u32 limit = tp->rxq_cnt;
8347
8264 if (!tg3_flag(tp, ENABLE_RSS)) { 8348 if (!tg3_flag(tp, ENABLE_RSS)) {
8265 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 8349 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8266 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 8350 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8267 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 8351 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8352 limit--;
8268 } else { 8353 } else {
8269 tw32(HOSTCC_RXCOL_TICKS, 0); 8354 tw32(HOSTCC_RXCOL_TICKS, 0);
8270 tw32(HOSTCC_RXMAX_FRAMES, 0); 8355 tw32(HOSTCC_RXMAX_FRAMES, 0);
8271 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 8356 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8272 } 8357 }
8273 8358
8274 if (!tg3_flag(tp, 5705_PLUS)) { 8359 for (; i < limit; i++) {
8275 u32 val = ec->stats_block_coalesce_usecs;
8276
8277 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8278 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8279
8280 if (!netif_carrier_ok(tp->dev))
8281 val = 0;
8282
8283 tw32(HOSTCC_STAT_COAL_TICKS, val);
8284 }
8285
8286 for (i = 0; i < tp->irq_cnt - 1; i++) {
8287 u32 reg; 8360 u32 reg;
8288 8361
8289 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 8362 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
@@ -8292,27 +8365,30 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8292 tw32(reg, ec->rx_max_coalesced_frames); 8365 tw32(reg, ec->rx_max_coalesced_frames);
8293 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 8366 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8294 tw32(reg, ec->rx_max_coalesced_frames_irq); 8367 tw32(reg, ec->rx_max_coalesced_frames_irq);
8295
8296 if (tg3_flag(tp, ENABLE_TSS)) {
8297 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8298 tw32(reg, ec->tx_coalesce_usecs);
8299 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8300 tw32(reg, ec->tx_max_coalesced_frames);
8301 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8302 tw32(reg, ec->tx_max_coalesced_frames_irq);
8303 }
8304 } 8368 }
8305 8369
8306 for (; i < tp->irq_max - 1; i++) { 8370 for (; i < tp->irq_max - 1; i++) {
8307 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 8371 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8308 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 8372 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8309 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 8373 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8374 }
8375}
8310 8376
8311 if (tg3_flag(tp, ENABLE_TSS)) { 8377static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8312 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 8378{
8313 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 8379 tg3_coal_tx_init(tp, ec);
8314 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 8380 tg3_coal_rx_init(tp, ec);
8315 } 8381
8382 if (!tg3_flag(tp, 5705_PLUS)) {
8383 u32 val = ec->stats_block_coalesce_usecs;
8384
8385 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8386 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8387
8388 if (!netif_carrier_ok(tp->dev))
8389 val = 0;
8390
8391 tw32(HOSTCC_STAT_COAL_TICKS, val);
8316 } 8392 }
8317} 8393}
8318 8394
@@ -8570,13 +8646,12 @@ static void __tg3_set_rx_mode(struct net_device *dev)
8570 } 8646 }
8571} 8647}
8572 8648
8573static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) 8649static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8574{ 8650{
8575 int i; 8651 int i;
8576 8652
8577 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 8653 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8578 tp->rss_ind_tbl[i] = 8654 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8579 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8580} 8655}
8581 8656
8582static void tg3_rss_check_indir_tbl(struct tg3 *tp) 8657static void tg3_rss_check_indir_tbl(struct tg3 *tp)
@@ -8598,7 +8673,7 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8598 } 8673 }
8599 8674
8600 if (i != TG3_RSS_INDIR_TBL_SIZE) 8675 if (i != TG3_RSS_INDIR_TBL_SIZE)
8601 tg3_rss_init_dflt_indir_tbl(tp); 8676 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8602} 8677}
8603 8678
8604static void tg3_rss_write_indir_tbl(struct tg3 *tp) 8679static void tg3_rss_write_indir_tbl(struct tg3 *tp)
@@ -9495,7 +9570,6 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9495 return tg3_reset_hw(tp, reset_phy); 9570 return tg3_reset_hw(tp, reset_phy);
9496} 9571}
9497 9572
9498#if IS_ENABLED(CONFIG_HWMON)
9499static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 9573static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9500{ 9574{
9501 int i; 9575 int i;
@@ -9548,22 +9622,17 @@ static const struct attribute_group tg3_group = {
9548 .attrs = tg3_attributes, 9622 .attrs = tg3_attributes,
9549}; 9623};
9550 9624
9551#endif
9552
9553static void tg3_hwmon_close(struct tg3 *tp) 9625static void tg3_hwmon_close(struct tg3 *tp)
9554{ 9626{
9555#if IS_ENABLED(CONFIG_HWMON)
9556 if (tp->hwmon_dev) { 9627 if (tp->hwmon_dev) {
9557 hwmon_device_unregister(tp->hwmon_dev); 9628 hwmon_device_unregister(tp->hwmon_dev);
9558 tp->hwmon_dev = NULL; 9629 tp->hwmon_dev = NULL;
9559 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); 9630 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9560 } 9631 }
9561#endif
9562} 9632}
9563 9633
9564static void tg3_hwmon_open(struct tg3 *tp) 9634static void tg3_hwmon_open(struct tg3 *tp)
9565{ 9635{
9566#if IS_ENABLED(CONFIG_HWMON)
9567 int i, err; 9636 int i, err;
9568 u32 size = 0; 9637 u32 size = 0;
9569 struct pci_dev *pdev = tp->pdev; 9638 struct pci_dev *pdev = tp->pdev;
@@ -9595,7 +9664,6 @@ static void tg3_hwmon_open(struct tg3 *tp)
9595 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 9664 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9596 sysfs_remove_group(&pdev->dev.kobj, &tg3_group); 9665 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9597 } 9666 }
9598#endif
9599} 9667}
9600 9668
9601 9669
@@ -10119,21 +10187,43 @@ static int tg3_request_firmware(struct tg3 *tp)
10119 return 0; 10187 return 0;
10120} 10188}
10121 10189
10122static bool tg3_enable_msix(struct tg3 *tp) 10190static u32 tg3_irq_count(struct tg3 *tp)
10123{ 10191{
10124 int i, rc; 10192 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10125 struct msix_entry msix_ent[tp->irq_max];
10126 10193
10127 tp->irq_cnt = netif_get_num_default_rss_queues(); 10194 if (irq_cnt > 1) {
10128 if (tp->irq_cnt > 1) {
10129 /* We want as many rx rings enabled as there are cpus. 10195 /* We want as many rx rings enabled as there are cpus.
10130 * In multiqueue MSI-X mode, the first MSI-X vector 10196 * In multiqueue MSI-X mode, the first MSI-X vector
10131 * only deals with link interrupts, etc, so we add 10197 * only deals with link interrupts, etc, so we add
10132 * one to the number of vectors we are requesting. 10198 * one to the number of vectors we are requesting.
10133 */ 10199 */
10134 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max); 10200 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10135 } 10201 }
10136 10202
10203 return irq_cnt;
10204}
10205
10206static bool tg3_enable_msix(struct tg3 *tp)
10207{
10208 int i, rc;
10209 struct msix_entry msix_ent[tp->irq_max];
10210
10211 tp->txq_cnt = tp->txq_req;
10212 tp->rxq_cnt = tp->rxq_req;
10213 if (!tp->rxq_cnt)
10214 tp->rxq_cnt = netif_get_num_default_rss_queues();
10215 if (tp->rxq_cnt > tp->rxq_max)
10216 tp->rxq_cnt = tp->rxq_max;
10217
10218 /* Disable multiple TX rings by default. Simple round-robin hardware
10219 * scheduling of the TX rings can cause starvation of rings with
10220 * small packets when other rings have TSO or jumbo packets.
10221 */
10222 if (!tp->txq_req)
10223 tp->txq_cnt = 1;
10224
10225 tp->irq_cnt = tg3_irq_count(tp);
10226
10137 for (i = 0; i < tp->irq_max; i++) { 10227 for (i = 0; i < tp->irq_max; i++) {
10138 msix_ent[i].entry = i; 10228 msix_ent[i].entry = i;
10139 msix_ent[i].vector = 0; 10229 msix_ent[i].vector = 0;
@@ -10148,27 +10238,28 @@ static bool tg3_enable_msix(struct tg3 *tp)
10148 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 10238 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10149 tp->irq_cnt, rc); 10239 tp->irq_cnt, rc);
10150 tp->irq_cnt = rc; 10240 tp->irq_cnt = rc;
10241 tp->rxq_cnt = max(rc - 1, 1);
10242 if (tp->txq_cnt)
10243 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10151 } 10244 }
10152 10245
10153 for (i = 0; i < tp->irq_max; i++) 10246 for (i = 0; i < tp->irq_max; i++)
10154 tp->napi[i].irq_vec = msix_ent[i].vector; 10247 tp->napi[i].irq_vec = msix_ent[i].vector;
10155 10248
10156 netif_set_real_num_tx_queues(tp->dev, 1); 10249 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10157 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10158 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10159 pci_disable_msix(tp->pdev); 10250 pci_disable_msix(tp->pdev);
10160 return false; 10251 return false;
10161 } 10252 }
10162 10253
10163 if (tp->irq_cnt > 1) { 10254 if (tp->irq_cnt == 1)
10164 tg3_flag_set(tp, ENABLE_RSS); 10255 return true;
10165 10256
10166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 10257 tg3_flag_set(tp, ENABLE_RSS);
10167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { 10258
10168 tg3_flag_set(tp, ENABLE_TSS); 10259 if (tp->txq_cnt > 1)
10169 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); 10260 tg3_flag_set(tp, ENABLE_TSS);
10170 } 10261
10171 } 10262 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10172 10263
10173 return true; 10264 return true;
10174} 10265}
@@ -10202,6 +10293,11 @@ defcfg:
10202 if (!tg3_flag(tp, USING_MSIX)) { 10293 if (!tg3_flag(tp, USING_MSIX)) {
10203 tp->irq_cnt = 1; 10294 tp->irq_cnt = 1;
10204 tp->napi[0].irq_vec = tp->pdev->irq; 10295 tp->napi[0].irq_vec = tp->pdev->irq;
10296 }
10297
10298 if (tp->irq_cnt == 1) {
10299 tp->txq_cnt = 1;
10300 tp->rxq_cnt = 1;
10205 netif_set_real_num_tx_queues(tp->dev, 1); 10301 netif_set_real_num_tx_queues(tp->dev, 1);
10206 netif_set_real_num_rx_queues(tp->dev, 1); 10302 netif_set_real_num_rx_queues(tp->dev, 1);
10207 } 10303 }
@@ -10219,38 +10315,11 @@ static void tg3_ints_fini(struct tg3 *tp)
10219 tg3_flag_clear(tp, ENABLE_TSS); 10315 tg3_flag_clear(tp, ENABLE_TSS);
10220} 10316}
10221 10317
10222static int tg3_open(struct net_device *dev) 10318static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10223{ 10319{
10224 struct tg3 *tp = netdev_priv(dev); 10320 struct net_device *dev = tp->dev;
10225 int i, err; 10321 int i, err;
10226 10322
10227 if (tp->fw_needed) {
10228 err = tg3_request_firmware(tp);
10229 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10230 if (err)
10231 return err;
10232 } else if (err) {
10233 netdev_warn(tp->dev, "TSO capability disabled\n");
10234 tg3_flag_clear(tp, TSO_CAPABLE);
10235 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10236 netdev_notice(tp->dev, "TSO capability restored\n");
10237 tg3_flag_set(tp, TSO_CAPABLE);
10238 }
10239 }
10240
10241 netif_carrier_off(tp->dev);
10242
10243 err = tg3_power_up(tp);
10244 if (err)
10245 return err;
10246
10247 tg3_full_lock(tp, 0);
10248
10249 tg3_disable_ints(tp);
10250 tg3_flag_clear(tp, INIT_COMPLETE);
10251
10252 tg3_full_unlock(tp);
10253
10254 /* 10323 /*
10255 * Setup interrupts first so we know how 10324 * Setup interrupts first so we know how
10256 * many NAPI resources to allocate 10325 * many NAPI resources to allocate
@@ -10284,7 +10353,7 @@ static int tg3_open(struct net_device *dev)
10284 10353
10285 tg3_full_lock(tp, 0); 10354 tg3_full_lock(tp, 0);
10286 10355
10287 err = tg3_init_hw(tp, 1); 10356 err = tg3_init_hw(tp, reset_phy);
10288 if (err) { 10357 if (err) {
10289 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10358 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10290 tg3_free_rings(tp); 10359 tg3_free_rings(tp);
@@ -10295,7 +10364,7 @@ static int tg3_open(struct net_device *dev)
10295 if (err) 10364 if (err)
10296 goto err_out3; 10365 goto err_out3;
10297 10366
10298 if (tg3_flag(tp, USING_MSI)) { 10367 if (test_irq && tg3_flag(tp, USING_MSI)) {
10299 err = tg3_test_msi(tp); 10368 err = tg3_test_msi(tp);
10300 10369
10301 if (err) { 10370 if (err) {
@@ -10351,20 +10420,18 @@ err_out2:
10351 10420
10352err_out1: 10421err_out1:
10353 tg3_ints_fini(tp); 10422 tg3_ints_fini(tp);
10354 tg3_frob_aux_power(tp, false); 10423
10355 pci_set_power_state(tp->pdev, PCI_D3hot);
10356 return err; 10424 return err;
10357} 10425}
10358 10426
10359static int tg3_close(struct net_device *dev) 10427static void tg3_stop(struct tg3 *tp)
10360{ 10428{
10361 int i; 10429 int i;
10362 struct tg3 *tp = netdev_priv(dev);
10363 10430
10364 tg3_napi_disable(tp); 10431 tg3_napi_disable(tp);
10365 tg3_reset_task_cancel(tp); 10432 tg3_reset_task_cancel(tp);
10366 10433
10367 netif_tx_stop_all_queues(dev); 10434 netif_tx_disable(tp->dev);
10368 10435
10369 tg3_timer_stop(tp); 10436 tg3_timer_stop(tp);
10370 10437
@@ -10389,13 +10456,60 @@ static int tg3_close(struct net_device *dev)
10389 10456
10390 tg3_ints_fini(tp); 10457 tg3_ints_fini(tp);
10391 10458
10392 /* Clear stats across close / open calls */
10393 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10394 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10395
10396 tg3_napi_fini(tp); 10459 tg3_napi_fini(tp);
10397 10460
10398 tg3_free_consistent(tp); 10461 tg3_free_consistent(tp);
10462}
10463
10464static int tg3_open(struct net_device *dev)
10465{
10466 struct tg3 *tp = netdev_priv(dev);
10467 int err;
10468
10469 if (tp->fw_needed) {
10470 err = tg3_request_firmware(tp);
10471 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10472 if (err)
10473 return err;
10474 } else if (err) {
10475 netdev_warn(tp->dev, "TSO capability disabled\n");
10476 tg3_flag_clear(tp, TSO_CAPABLE);
10477 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10478 netdev_notice(tp->dev, "TSO capability restored\n");
10479 tg3_flag_set(tp, TSO_CAPABLE);
10480 }
10481 }
10482
10483 netif_carrier_off(tp->dev);
10484
10485 err = tg3_power_up(tp);
10486 if (err)
10487 return err;
10488
10489 tg3_full_lock(tp, 0);
10490
10491 tg3_disable_ints(tp);
10492 tg3_flag_clear(tp, INIT_COMPLETE);
10493
10494 tg3_full_unlock(tp);
10495
10496 err = tg3_start(tp, true, true);
10497 if (err) {
10498 tg3_frob_aux_power(tp, false);
10499 pci_set_power_state(tp->pdev, PCI_D3hot);
10500 }
10501 return err;
10502}
10503
10504static int tg3_close(struct net_device *dev)
10505{
10506 struct tg3 *tp = netdev_priv(dev);
10507
10508 tg3_stop(tp);
10509
10510 /* Clear stats across close / open calls */
10511 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10512 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10399 10513
10400 tg3_power_down(tp); 10514 tg3_power_down(tp);
10401 10515
@@ -11185,11 +11299,11 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11185 switch (info->cmd) { 11299 switch (info->cmd) {
11186 case ETHTOOL_GRXRINGS: 11300 case ETHTOOL_GRXRINGS:
11187 if (netif_running(tp->dev)) 11301 if (netif_running(tp->dev))
11188 info->data = tp->irq_cnt; 11302 info->data = tp->rxq_cnt;
11189 else { 11303 else {
11190 info->data = num_online_cpus(); 11304 info->data = num_online_cpus();
11191 if (info->data > TG3_IRQ_MAX_VECS_RSS) 11305 if (info->data > TG3_RSS_MAX_NUM_QS)
11192 info->data = TG3_IRQ_MAX_VECS_RSS; 11306 info->data = TG3_RSS_MAX_NUM_QS;
11193 } 11307 }
11194 11308
11195 /* The first interrupt vector only 11309 /* The first interrupt vector only
@@ -11246,6 +11360,58 @@ static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11246 return 0; 11360 return 0;
11247} 11361}
11248 11362
11363static void tg3_get_channels(struct net_device *dev,
11364 struct ethtool_channels *channel)
11365{
11366 struct tg3 *tp = netdev_priv(dev);
11367 u32 deflt_qs = netif_get_num_default_rss_queues();
11368
11369 channel->max_rx = tp->rxq_max;
11370 channel->max_tx = tp->txq_max;
11371
11372 if (netif_running(dev)) {
11373 channel->rx_count = tp->rxq_cnt;
11374 channel->tx_count = tp->txq_cnt;
11375 } else {
11376 if (tp->rxq_req)
11377 channel->rx_count = tp->rxq_req;
11378 else
11379 channel->rx_count = min(deflt_qs, tp->rxq_max);
11380
11381 if (tp->txq_req)
11382 channel->tx_count = tp->txq_req;
11383 else
11384 channel->tx_count = min(deflt_qs, tp->txq_max);
11385 }
11386}
11387
11388static int tg3_set_channels(struct net_device *dev,
11389 struct ethtool_channels *channel)
11390{
11391 struct tg3 *tp = netdev_priv(dev);
11392
11393 if (!tg3_flag(tp, SUPPORT_MSIX))
11394 return -EOPNOTSUPP;
11395
11396 if (channel->rx_count > tp->rxq_max ||
11397 channel->tx_count > tp->txq_max)
11398 return -EINVAL;
11399
11400 tp->rxq_req = channel->rx_count;
11401 tp->txq_req = channel->tx_count;
11402
11403 if (!netif_running(dev))
11404 return 0;
11405
11406 tg3_stop(tp);
11407
11408 netif_carrier_off(dev);
11409
11410 tg3_start(tp, true, false);
11411
11412 return 0;
11413}
11414
11249static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 11415static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11250{ 11416{
11251 switch (stringset) { 11417 switch (stringset) {
@@ -12494,6 +12660,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
12494 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 12660 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12495 .get_rxfh_indir = tg3_get_rxfh_indir, 12661 .get_rxfh_indir = tg3_get_rxfh_indir,
12496 .set_rxfh_indir = tg3_set_rxfh_indir, 12662 .set_rxfh_indir = tg3_set_rxfh_indir,
12663 .get_channels = tg3_get_channels,
12664 .set_channels = tg3_set_channels,
12497 .get_ts_info = ethtool_op_get_ts_info, 12665 .get_ts_info = ethtool_op_get_ts_info,
12498}; 12666};
12499 12667
@@ -14510,10 +14678,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14510 if (tg3_flag(tp, 57765_PLUS)) { 14678 if (tg3_flag(tp, 57765_PLUS)) {
14511 tg3_flag_set(tp, SUPPORT_MSIX); 14679 tg3_flag_set(tp, SUPPORT_MSIX);
14512 tp->irq_max = TG3_IRQ_MAX_VECS; 14680 tp->irq_max = TG3_IRQ_MAX_VECS;
14513 tg3_rss_init_dflt_indir_tbl(tp);
14514 } 14681 }
14515 } 14682 }
14516 14683
14684 tp->txq_max = 1;
14685 tp->rxq_max = 1;
14686 if (tp->irq_max > 1) {
14687 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14688 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14689
14690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14692 tp->txq_max = tp->irq_max - 1;
14693 }
14694
14517 if (tg3_flag(tp, 5755_PLUS) || 14695 if (tg3_flag(tp, 5755_PLUS) ||
14518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14519 tg3_flag_set(tp, SHORT_DMA_BUG); 14697 tg3_flag_set(tp, SHORT_DMA_BUG);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 6d52cb286826..d9308c32102e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2860,7 +2860,8 @@ struct tg3_rx_prodring_set {
2860 dma_addr_t rx_jmb_mapping; 2860 dma_addr_t rx_jmb_mapping;
2861}; 2861};
2862 2862
2863#define TG3_IRQ_MAX_VECS_RSS 5 2863#define TG3_RSS_MAX_NUM_QS 4
2864#define TG3_IRQ_MAX_VECS_RSS (TG3_RSS_MAX_NUM_QS + 1)
2864#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS 2865#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
2865 2866
2866struct tg3_napi { 2867struct tg3_napi {
@@ -3037,6 +3038,9 @@ struct tg3 {
3037 void (*write32_tx_mbox) (struct tg3 *, u32, 3038 void (*write32_tx_mbox) (struct tg3 *, u32,
3038 u32); 3039 u32);
3039 u32 dma_limit; 3040 u32 dma_limit;
3041 u32 txq_req;
3042 u32 txq_cnt;
3043 u32 txq_max;
3040 3044
3041 /* begin "rx thread" cacheline section */ 3045 /* begin "rx thread" cacheline section */
3042 struct tg3_napi napi[TG3_IRQ_MAX_VECS]; 3046 struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3051,6 +3055,9 @@ struct tg3 {
3051 u32 rx_std_max_post; 3055 u32 rx_std_max_post;
3052 u32 rx_offset; 3056 u32 rx_offset;
3053 u32 rx_pkt_map_sz; 3057 u32 rx_pkt_map_sz;
3058 u32 rxq_req;
3059 u32 rxq_cnt;
3060 u32 rxq_max;
3054 bool rx_refill; 3061 bool rx_refill;
3055 3062
3056 3063
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b441f33258e7..ce1eac529470 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3268,6 +3268,7 @@ bnad_pci_probe(struct pci_dev *pdev,
3268 * Output : using_dac = 1 for 64 bit DMA 3268 * Output : using_dac = 1 for 64 bit DMA
3269 * = 0 for 32 bit DMA 3269 * = 0 for 32 bit DMA
3270 */ 3270 */
3271 using_dac = false;
3271 err = bnad_pci_init(bnad, pdev, &using_dac); 3272 err = bnad_pci_init(bnad, pdev, &using_dac);
3272 if (err) 3273 if (err)
3273 goto unlock_mutex; 3274 goto unlock_mutex;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ec2dafe8ae5b..745a1f53361f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -67,12 +67,12 @@ enum {
67}; 67};
68 68
69enum { 69enum {
70 MEMWIN0_APERTURE = 65536, 70 MEMWIN0_APERTURE = 2048,
71 MEMWIN0_BASE = 0x30000, 71 MEMWIN0_BASE = 0x1b800,
72 MEMWIN1_APERTURE = 32768, 72 MEMWIN1_APERTURE = 32768,
73 MEMWIN1_BASE = 0x28000, 73 MEMWIN1_BASE = 0x28000,
74 MEMWIN2_APERTURE = 2048, 74 MEMWIN2_APERTURE = 65536,
75 MEMWIN2_BASE = 0x1b800, 75 MEMWIN2_BASE = 0x30000,
76}; 76};
77 77
78enum dev_master { 78enum dev_master {
@@ -211,6 +211,9 @@ struct tp_err_stats {
211struct tp_params { 211struct tp_params {
212 unsigned int ntxchan; /* # of Tx channels */ 212 unsigned int ntxchan; /* # of Tx channels */
213 unsigned int tre; /* log2 of core clocks per TP tick */ 213 unsigned int tre; /* log2 of core clocks per TP tick */
214
215 uint32_t dack_re; /* DACK timer resolution */
216 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
214}; 217};
215 218
216struct vpd_params { 219struct vpd_params {
@@ -315,6 +318,10 @@ enum { /* adapter flags */
315 USING_MSI = (1 << 1), 318 USING_MSI = (1 << 1),
316 USING_MSIX = (1 << 2), 319 USING_MSIX = (1 << 2),
317 FW_OK = (1 << 4), 320 FW_OK = (1 << 4),
321 RSS_TNLALLLOOKUP = (1 << 5),
322 USING_SOFT_PARAMS = (1 << 6),
323 MASTER_PF = (1 << 7),
324 FW_OFLD_CONN = (1 << 9),
318}; 325};
319 326
320struct rx_sw_desc; 327struct rx_sw_desc;
@@ -467,6 +474,11 @@ struct sge {
467 u16 rdma_rxq[NCHAN]; 474 u16 rdma_rxq[NCHAN];
468 u16 timer_val[SGE_NTIMERS]; 475 u16 timer_val[SGE_NTIMERS];
469 u8 counter_val[SGE_NCOUNTERS]; 476 u8 counter_val[SGE_NCOUNTERS];
477 u32 fl_pg_order; /* large page allocation size */
478 u32 stat_len; /* length of status page at ring end */
479 u32 pktshift; /* padding between CPL & packet data */
480 u32 fl_align; /* response queue message alignment */
481 u32 fl_starve_thres; /* Free List starvation threshold */
470 unsigned int starve_thres; 482 unsigned int starve_thres;
471 u8 idma_state[2]; 483 u8 idma_state[2];
472 unsigned int egr_start; 484 unsigned int egr_start;
@@ -511,6 +523,8 @@ struct adapter {
511 struct net_device *port[MAX_NPORTS]; 523 struct net_device *port[MAX_NPORTS];
512 u8 chan_map[NCHAN]; /* channel -> port map */ 524 u8 chan_map[NCHAN]; /* channel -> port map */
513 525
526 unsigned int l2t_start;
527 unsigned int l2t_end;
514 struct l2t_data *l2t; 528 struct l2t_data *l2t;
515 void *uld_handle[CXGB4_ULD_MAX]; 529 void *uld_handle[CXGB4_ULD_MAX];
516 struct list_head list_node; 530 struct list_head list_node;
@@ -619,7 +633,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
619int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, 633int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
620 struct net_device *dev, unsigned int iqid); 634 struct net_device *dev, unsigned int iqid);
621irqreturn_t t4_sge_intr_msix(int irq, void *cookie); 635irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
622void t4_sge_init(struct adapter *adap); 636int t4_sge_init(struct adapter *adap);
623void t4_sge_start(struct adapter *adap); 637void t4_sge_start(struct adapter *adap);
624void t4_sge_stop(struct adapter *adap); 638void t4_sge_stop(struct adapter *adap);
625extern int dbfifo_int_thresh; 639extern int dbfifo_int_thresh;
@@ -638,6 +652,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
638 return (us * adap->params.vpd.cclk) / 1000; 652 return (us * adap->params.vpd.cclk) / 1000;
639} 653}
640 654
655static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
656 unsigned int ticks)
657{
658 /* add Core Clock / 2 to round ticks to nearest uS */
659 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
660 adapter->params.vpd.cclk);
661}
662
641void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, 663void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
642 u32 val); 664 u32 val);
643 665
@@ -656,6 +678,9 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
656 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); 678 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
657} 679}
658 680
681void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
682 unsigned int data_reg, const u32 *vals,
683 unsigned int nregs, unsigned int start_idx);
659void t4_intr_enable(struct adapter *adapter); 684void t4_intr_enable(struct adapter *adapter);
660void t4_intr_disable(struct adapter *adapter); 685void t4_intr_disable(struct adapter *adapter);
661int t4_slow_intr_handler(struct adapter *adapter); 686int t4_slow_intr_handler(struct adapter *adapter);
@@ -664,8 +689,12 @@ int t4_wait_dev_ready(struct adapter *adap);
664int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 689int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
665 struct link_config *lc); 690 struct link_config *lc);
666int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 691int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
692int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
693 __be32 *buf);
667int t4_seeprom_wp(struct adapter *adapter, bool enable); 694int t4_seeprom_wp(struct adapter *adapter, bool enable);
695int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
668int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 696int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
697unsigned int t4_flash_cfg_addr(struct adapter *adapter);
669int t4_check_fw_version(struct adapter *adapter); 698int t4_check_fw_version(struct adapter *adapter);
670int t4_prep_adapter(struct adapter *adapter); 699int t4_prep_adapter(struct adapter *adapter);
671int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 700int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -680,6 +709,8 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
680 709
681void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 710void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
682void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 711void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
712void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
713 unsigned int mask, unsigned int val);
683void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 714void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
684 struct tp_tcp_stats *v6); 715 struct tp_tcp_stats *v6);
685void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 716void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -695,6 +726,16 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
695int t4_fw_bye(struct adapter *adap, unsigned int mbox); 726int t4_fw_bye(struct adapter *adap, unsigned int mbox);
696int t4_early_init(struct adapter *adap, unsigned int mbox); 727int t4_early_init(struct adapter *adap, unsigned int mbox);
697int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); 728int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
729int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
730int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
731int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
732 const u8 *fw_data, unsigned int size, int force);
733int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
734 unsigned int mtype, unsigned int maddr,
735 u32 *finiver, u32 *finicsum, u32 *cfcsum);
736int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
737 unsigned int cache_line_size);
738int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
698int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 739int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
699 unsigned int vf, unsigned int nparams, const u32 *params, 740 unsigned int vf, unsigned int nparams, const u32 *params,
700 u32 *val); 741 u32 *val);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 933985420acb..6b9f6bb2f7ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -78,28 +78,45 @@
78 */ 78 */
79#define MAX_SGE_TIMERVAL 200U 79#define MAX_SGE_TIMERVAL 200U
80 80
81#ifdef CONFIG_PCI_IOV
82/*
83 * Virtual Function provisioning constants. We need two extra Ingress Queues
84 * with Interrupt capability to serve as the VF's Firmware Event Queue and
85 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
86 * Lists associated with them). For each Ethernet/Control Egress Queue and
87 * for each Free List, we need an Egress Context.
88 */
89enum { 81enum {
82 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
90 VFRES_NPORTS = 1, /* # of "ports" per VF */ 106 VFRES_NPORTS = 1, /* # of "ports" per VF */
91 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ 107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
92 108
93 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ 109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
94 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ 110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
95 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ 111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
96 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
97 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
98 VFRES_TC = 0, /* PCI-E traffic class */ 114 VFRES_TC = 0, /* PCI-E traffic class */
99 VFRES_NEXACTF = 16, /* # of exact MPS filters */ 115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
100 116
101 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, 117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
102 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, 118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
119#endif
103}; 120};
104 121
105/* 122/*
@@ -146,7 +163,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
146 } 163 }
147 /*NOTREACHED*/ 164 /*NOTREACHED*/
148} 165}
149#endif
150 166
151enum { 167enum {
152 MAX_TXQ_ENTRIES = 16384, 168 MAX_TXQ_ENTRIES = 16384,
@@ -193,6 +209,7 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
193}; 209};
194 210
195#define FW_FNAME "cxgb4/t4fw.bin" 211#define FW_FNAME "cxgb4/t4fw.bin"
212#define FW_CFNAME "cxgb4/t4-config.txt"
196 213
197MODULE_DESCRIPTION(DRV_DESC); 214MODULE_DESCRIPTION(DRV_DESC);
198MODULE_AUTHOR("Chelsio Communications"); 215MODULE_AUTHOR("Chelsio Communications");
@@ -201,6 +218,28 @@ MODULE_VERSION(DRV_VERSION);
201MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 218MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
202MODULE_FIRMWARE(FW_FNAME); 219MODULE_FIRMWARE(FW_FNAME);
203 220
221/*
222 * Normally we're willing to become the firmware's Master PF but will be happy
223 * if another PF has already become the Master and initialized the adapter.
224 * Setting "force_init" will cause this driver to forcibly establish itself as
225 * the Master PF and initialize the adapter.
226 */
227static uint force_init;
228
229module_param(force_init, uint, 0644);
230MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
231
232/*
233 * Normally if the firmware we connect to has Configuration File support, we
234 * use that and only fall back to the old Driver-based initialization if the
235 * Configuration File fails for some reason. If force_old_init is set, then
236 * we'll always use the old Driver-based initialization sequence.
237 */
238static uint force_old_init;
239
240module_param(force_old_init, uint, 0644);
241MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
242
204static int dflt_msg_enable = DFLT_MSG_ENABLE; 243static int dflt_msg_enable = DFLT_MSG_ENABLE;
205 244
206module_param(dflt_msg_enable, int, 0644); 245module_param(dflt_msg_enable, int, 0644);
@@ -236,6 +275,20 @@ module_param_array(intr_cnt, uint, NULL, 0644);
236MODULE_PARM_DESC(intr_cnt, 275MODULE_PARM_DESC(intr_cnt,
237 "thresholds 1..3 for queue interrupt packet counters"); 276 "thresholds 1..3 for queue interrupt packet counters");
238 277
278/*
279 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
280 * offset by 2 bytes in order to have the IP headers line up on 4-byte
281 * boundaries. This is a requirement for many architectures which will throw
282 * a machine check fault if an attempt is made to access one of the 4-byte IP
283 * header fields on a non-4-byte boundary. And it's a major performance issue
284 * even on some architectures which allow it like some implementations of the
285 * x86 ISA. However, some architectures don't mind this and for some very
286 * edge-case performance sensitive applications (like forwarding large volumes
287 * of small packets), setting this DMA offset to 0 will decrease the number of
288 * PCI-E Bus transfers enough to measurably affect performance.
289 */
290static int rx_dma_offset = 2;
291
239static bool vf_acls; 292static bool vf_acls;
240 293
241#ifdef CONFIG_PCI_IOV 294#ifdef CONFIG_PCI_IOV
@@ -248,6 +301,30 @@ module_param_array(num_vf, uint, NULL, 0644);
248MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 301MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
249#endif 302#endif
250 303
304/*
305 * The filter TCAM has a fixed portion and a variable portion. The fixed
306 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
307 * ports. The variable portion is 36 bits which can include things like Exact
308 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
309 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
310 * far exceed the 36-bit budget for this "compressed" header portion of the
311 * filter. Thus, we have a scarce resource which must be carefully managed.
312 *
313 * By default we set this up to mostly match the set of filter matching
314 * capabilities of T3 but with accommodations for some of T4's more
315 * interesting features:
316 *
317 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
318 * [Inner] VLAN (17), Port (3), FCoE (1) }
319 */
320enum {
321 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
322 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
323 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
324};
325
326static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
327
251static struct dentry *cxgb4_debugfs_root; 328static struct dentry *cxgb4_debugfs_root;
252 329
253static LIST_HEAD(adapter_list); 330static LIST_HEAD(adapter_list);
@@ -852,11 +929,25 @@ static int upgrade_fw(struct adapter *adap)
852 */ 929 */
853 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || 930 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
854 vers > adap->params.fw_vers) { 931 vers > adap->params.fw_vers) {
855 ret = -t4_load_fw(adap, fw->data, fw->size); 932 dev_info(dev, "upgrading firmware ...\n");
933 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
934 /*force=*/false);
856 if (!ret) 935 if (!ret)
857 dev_info(dev, "firmware upgraded to version %pI4 from " 936 dev_info(dev, "firmware successfully upgraded to "
858 FW_FNAME "\n", &hdr->fw_ver); 937 FW_FNAME " (%d.%d.%d.%d)\n",
938 FW_HDR_FW_VER_MAJOR_GET(vers),
939 FW_HDR_FW_VER_MINOR_GET(vers),
940 FW_HDR_FW_VER_MICRO_GET(vers),
941 FW_HDR_FW_VER_BUILD_GET(vers));
942 else
943 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
944 } else {
945 /*
946 * Tell our caller that we didn't upgrade the firmware.
947 */
948 ret = -EINVAL;
859 } 949 }
950
860out: release_firmware(fw); 951out: release_firmware(fw);
861 return ret; 952 return ret;
862} 953}
@@ -2470,8 +2561,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2470 else 2561 else
2471 delta = size - hw_pidx + pidx; 2562 delta = size - hw_pidx + pidx;
2472 wmb(); 2563 wmb();
2473 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 2564 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2474 V_QID(qid) | V_PIDX(delta)); 2565 QID(qid) | PIDX(delta));
2475 } 2566 }
2476out: 2567out:
2477 return ret; 2568 return ret;
@@ -2579,8 +2670,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2579 else 2670 else
2580 delta = q->size - hw_pidx + q->db_pidx; 2671 delta = q->size - hw_pidx + q->db_pidx;
2581 wmb(); 2672 wmb();
2582 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 2673 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2583 V_QID(q->cntxt_id) | V_PIDX(delta)); 2674 QID(q->cntxt_id) | PIDX(delta));
2584 } 2675 }
2585out: 2676out:
2586 q->db_disabled = 0; 2677 q->db_disabled = 0;
@@ -2617,9 +2708,9 @@ static void process_db_full(struct work_struct *work)
2617 2708
2618 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 2709 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2619 drain_db_fifo(adap, dbfifo_drain_delay); 2710 drain_db_fifo(adap, dbfifo_drain_delay);
2620 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, 2711 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2621 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 2712 DBFIFO_HP_INT | DBFIFO_LP_INT,
2622 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); 2713 DBFIFO_HP_INT | DBFIFO_LP_INT);
2623 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 2714 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2624} 2715}
2625 2716
@@ -2639,8 +2730,8 @@ static void process_db_drop(struct work_struct *work)
2639 2730
2640void t4_db_full(struct adapter *adap) 2731void t4_db_full(struct adapter *adap)
2641{ 2732{
2642 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, 2733 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2643 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); 2734 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
2644 queue_work(workq, &adap->db_full_task); 2735 queue_work(workq, &adap->db_full_task);
2645} 2736}
2646 2737
@@ -3076,6 +3167,10 @@ static void setup_memwin(struct adapter *adap)
3076 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 3167 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3077 (bar0 + MEMWIN2_BASE) | BIR(0) | 3168 (bar0 + MEMWIN2_BASE) | BIR(0) |
3078 WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 3169 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
3170}
3171
3172static void setup_memwin_rdma(struct adapter *adap)
3173{
3079 if (adap->vres.ocq.size) { 3174 if (adap->vres.ocq.size) {
3080 unsigned int start, sz_kb; 3175 unsigned int start, sz_kb;
3081 3176
@@ -3155,6 +3250,488 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3155 3250
3156/* 3251/*
3157 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 3252 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3253 *
3254 * If the firmware we're dealing with has Configuration File support, then
3255 * we use that to perform all configuration
3256 */
3257
3258/*
3259 * Tweak configuration based on module parameters, etc. Most of these have
3260 * defaults assigned to them by Firmware Configuration Files (if we're using
3261 * them) but need to be explicitly set if we're using hard-coded
3262 * initialization. But even in the case of using Firmware Configuration
3263 * Files, we'd like to expose the ability to change these via module
3264 * parameters so these are essentially common tweaks/settings for
3265 * Configuration Files and hard-coded initialization ...
3266 */
3267static int adap_init0_tweaks(struct adapter *adapter)
3268{
3269 /*
3270 * Fix up various Host-Dependent Parameters like Page Size, Cache
3271 * Line Size, etc. The firmware default is for a 4KB Page Size and
3272 * 64B Cache Line Size ...
3273 */
3274 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3275
3276 /*
3277 * Process module parameters which affect early initialization.
3278 */
3279 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3280 dev_err(&adapter->pdev->dev,
3281 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3282 rx_dma_offset);
3283 rx_dma_offset = 2;
3284 }
3285 t4_set_reg_field(adapter, SGE_CONTROL,
3286 PKTSHIFT_MASK,
3287 PKTSHIFT(rx_dma_offset));
3288
3289 /*
3290 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3291 * adds the pseudo header itself.
3292 */
3293 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3294 CSUM_HAS_PSEUDO_HDR, 0);
3295
3296 return 0;
3297}
3298
3299/*
3300 * Attempt to initialize the adapter via a Firmware Configuration File.
3301 */
3302static int adap_init0_config(struct adapter *adapter, int reset)
3303{
3304 struct fw_caps_config_cmd caps_cmd;
3305 const struct firmware *cf;
3306 unsigned long mtype = 0, maddr = 0;
3307 u32 finiver, finicsum, cfcsum;
3308 int ret, using_flash;
3309
3310 /*
3311 * Reset device if necessary.
3312 */
3313 if (reset) {
3314 ret = t4_fw_reset(adapter, adapter->mbox,
3315 PIORSTMODE | PIORST);
3316 if (ret < 0)
3317 goto bye;
3318 }
3319
3320 /*
3321 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3322 * then use that. Otherwise, use the configuration file stored
3323 * in the adapter flash ...
3324 */
3325 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
3326 if (ret < 0) {
3327 using_flash = 1;
3328 mtype = FW_MEMTYPE_CF_FLASH;
3329 maddr = t4_flash_cfg_addr(adapter);
3330 } else {
3331 u32 params[7], val[7];
3332
3333 using_flash = 0;
3334 if (cf->size >= FLASH_CFG_MAX_SIZE)
3335 ret = -ENOMEM;
3336 else {
3337 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3338 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3339 ret = t4_query_params(adapter, adapter->mbox,
3340 adapter->fn, 0, 1, params, val);
3341 if (ret == 0) {
3342 /*
3343 * For t4_memory_write() below addresses and
3344 * sizes have to be in terms of multiples of 4
3345 * bytes. So, if the Configuration File isn't
3346 * a multiple of 4 bytes in length we'll have
3347 * to write that out separately since we can't
3348 * guarantee that the bytes following the
3349 * residual byte in the buffer returned by
3350 * request_firmware() are zeroed out ...
3351 */
3352 size_t resid = cf->size & 0x3;
3353 size_t size = cf->size & ~0x3;
3354 __be32 *data = (__be32 *)cf->data;
3355
3356 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3357 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3358
3359 ret = t4_memory_write(adapter, mtype, maddr,
3360 size, data);
3361 if (ret == 0 && resid != 0) {
3362 union {
3363 __be32 word;
3364 char buf[4];
3365 } last;
3366 int i;
3367
3368 last.word = data[size >> 2];
3369 for (i = resid; i < 4; i++)
3370 last.buf[i] = 0;
3371 ret = t4_memory_write(adapter, mtype,
3372 maddr + size,
3373 4, &last.word);
3374 }
3375 }
3376 }
3377
3378 release_firmware(cf);
3379 if (ret)
3380 goto bye;
3381 }
3382
3383 /*
3384 * Issue a Capability Configuration command to the firmware to get it
3385 * to parse the Configuration File. We don't use t4_fw_config_file()
3386 * because we want the ability to modify various features after we've
3387 * processed the configuration file ...
3388 */
3389 memset(&caps_cmd, 0, sizeof(caps_cmd));
3390 caps_cmd.op_to_write =
3391 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3392 FW_CMD_REQUEST |
3393 FW_CMD_READ);
3394 caps_cmd.retval_len16 =
3395 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3396 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3397 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3398 FW_LEN16(caps_cmd));
3399 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3400 &caps_cmd);
3401 if (ret < 0)
3402 goto bye;
3403
3404 finiver = ntohl(caps_cmd.finiver);
3405 finicsum = ntohl(caps_cmd.finicsum);
3406 cfcsum = ntohl(caps_cmd.cfcsum);
3407 if (finicsum != cfcsum)
3408 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3409 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3410 finicsum, cfcsum);
3411
3412 /*
3413 * If we're a pure NIC driver then disable all offloading facilities.
3414 * This will allow the firmware to optimize aspects of the hardware
3415 * configuration which will result in improved performance.
3416 */
3417 caps_cmd.ofldcaps = 0;
3418 caps_cmd.iscsicaps = 0;
3419 caps_cmd.rdmacaps = 0;
3420 caps_cmd.fcoecaps = 0;
3421
3422 /*
3423 * And now tell the firmware to use the configuration we just loaded.
3424 */
3425 caps_cmd.op_to_write =
3426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3427 FW_CMD_REQUEST |
3428 FW_CMD_WRITE);
3429 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3430 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3431 NULL);
3432 if (ret < 0)
3433 goto bye;
3434
3435 /*
3436 * Tweak configuration based on system architecture, module
3437 * parameters, etc.
3438 */
3439 ret = adap_init0_tweaks(adapter);
3440 if (ret < 0)
3441 goto bye;
3442
3443 /*
3444 * And finally tell the firmware to initialize itself using the
3445 * parameters from the Configuration File.
3446 */
3447 ret = t4_fw_initialize(adapter, adapter->mbox);
3448 if (ret < 0)
3449 goto bye;
3450
3451 /*
3452 * Return successfully and note that we're operating with parameters
3453 * not supplied by the driver, rather than from hard-wired
3454 * initialization constants burried in the driver.
3455 */
3456 adapter->flags |= USING_SOFT_PARAMS;
3457 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3458 "Configuration File %s, version %#x, computed checksum %#x\n",
3459 (using_flash
3460 ? "in device FLASH"
3461 : "/lib/firmware/" FW_CFNAME),
3462 finiver, cfcsum);
3463 return 0;
3464
3465 /*
3466 * Something bad happened. Return the error ... (If the "error"
3467 * is that there's no Configuration File on the adapter we don't
3468 * want to issue a warning since this is fairly common.)
3469 */
3470bye:
3471 if (ret != -ENOENT)
3472 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3473 -ret);
3474 return ret;
3475}
3476
3477/*
3478 * Attempt to initialize the adapter via hard-coded, driver supplied
3479 * parameters ...
3480 */
3481static int adap_init0_no_config(struct adapter *adapter, int reset)
3482{
3483 struct sge *s = &adapter->sge;
3484 struct fw_caps_config_cmd caps_cmd;
3485 u32 v;
3486 int i, ret;
3487
3488 /*
3489 * Reset device if necessary
3490 */
3491 if (reset) {
3492 ret = t4_fw_reset(adapter, adapter->mbox,
3493 PIORSTMODE | PIORST);
3494 if (ret < 0)
3495 goto bye;
3496 }
3497
3498 /*
3499 * Get device capabilities and select which we'll be using.
3500 */
3501 memset(&caps_cmd, 0, sizeof(caps_cmd));
3502 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3503 FW_CMD_REQUEST | FW_CMD_READ);
3504 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3505 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3506 &caps_cmd);
3507 if (ret < 0)
3508 goto bye;
3509
3510#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3511 /*
3512 * If we're a pure NIC driver then disable all offloading facilities.
3513 * This will allow the firmware to optimize aspects of the hardware
3514 * configuration which will result in improved performance.
3515 */
3516 caps_cmd.ofldcaps = 0;
3517 caps_cmd.iscsicaps = 0;
3518 caps_cmd.rdmacaps = 0;
3519 caps_cmd.fcoecaps = 0;
3520#endif
3521
3522 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3523 if (!vf_acls)
3524 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3525 else
3526 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3527 } else if (vf_acls) {
3528 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3529 goto bye;
3530 }
3531 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3532 FW_CMD_REQUEST | FW_CMD_WRITE);
3533 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3534 NULL);
3535 if (ret < 0)
3536 goto bye;
3537
3538 /*
3539 * Tweak configuration based on system architecture, module
3540 * parameters, etc.
3541 */
3542 ret = adap_init0_tweaks(adapter);
3543 if (ret < 0)
3544 goto bye;
3545
3546 /*
3547 * Select RSS Global Mode we want to use. We use "Basic Virtual"
3548 * mode which maps each Virtual Interface to its own section of
3549 * the RSS Table and we turn on all map and hash enables ...
3550 */
3551 adapter->flags |= RSS_TNLALLLOOKUP;
3552 ret = t4_config_glbl_rss(adapter, adapter->mbox,
3553 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3554 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3555 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3556 ((adapter->flags & RSS_TNLALLLOOKUP) ?
3557 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3558 if (ret < 0)
3559 goto bye;
3560
3561 /*
3562 * Set up our own fundamental resource provisioning ...
3563 */
3564 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3565 PFRES_NEQ, PFRES_NETHCTRL,
3566 PFRES_NIQFLINT, PFRES_NIQ,
3567 PFRES_TC, PFRES_NVI,
3568 FW_PFVF_CMD_CMASK_MASK,
3569 pfvfres_pmask(adapter, adapter->fn, 0),
3570 PFRES_NEXACTF,
3571 PFRES_R_CAPS, PFRES_WX_CAPS);
3572 if (ret < 0)
3573 goto bye;
3574
3575 /*
3576 * Perform low level SGE initialization. We need to do this before we
3577 * send the firmware the INITIALIZE command because that will cause
3578 * any other PF Drivers which are waiting for the Master
3579 * Initialization to proceed forward.
3580 */
3581 for (i = 0; i < SGE_NTIMERS - 1; i++)
3582 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
3583 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3584 s->counter_val[0] = 1;
3585 for (i = 1; i < SGE_NCOUNTERS; i++)
3586 s->counter_val[i] = min(intr_cnt[i - 1],
3587 THRESHOLD_0_GET(THRESHOLD_0_MASK));
3588 t4_sge_init(adapter);
3589
3590#ifdef CONFIG_PCI_IOV
3591 /*
3592 * Provision resource limits for Virtual Functions. We currently
3593 * grant them all the same static resource limits except for the Port
3594 * Access Rights Mask which we're assigning based on the PF. All of
3595 * the static provisioning stuff for both the PF and VF really needs
3596 * to be managed in a persistent manner for each device which the
3597 * firmware controls.
3598 */
3599 {
3600 int pf, vf;
3601
3602 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3603 if (num_vf[pf] <= 0)
3604 continue;
3605
3606 /* VF numbering starts at 1! */
3607 for (vf = 1; vf <= num_vf[pf]; vf++) {
3608 ret = t4_cfg_pfvf(adapter, adapter->mbox,
3609 pf, vf,
3610 VFRES_NEQ, VFRES_NETHCTRL,
3611 VFRES_NIQFLINT, VFRES_NIQ,
3612 VFRES_TC, VFRES_NVI,
3613 FW_PFVF_CMD_CMASK_GET(
3614 FW_PFVF_CMD_CMASK_MASK),
3615 pfvfres_pmask(
3616 adapter, pf, vf),
3617 VFRES_NEXACTF,
3618 VFRES_R_CAPS, VFRES_WX_CAPS);
3619 if (ret < 0)
3620 dev_warn(adapter->pdev_dev,
3621 "failed to "\
3622 "provision pf/vf=%d/%d; "
3623 "err=%d\n", pf, vf, ret);
3624 }
3625 }
3626 }
3627#endif
3628
3629 /*
3630 * Set up the default filter mode. Later we'll want to implement this
3631 * via a firmware command, etc. ... This needs to be done before the
3632 * firmare initialization command ... If the selected set of fields
3633 * isn't equal to the default value, we'll need to make sure that the
3634 * field selections will fit in the 36-bit budget.
3635 */
3636 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
3637 int i, bits = 0;
3638
3639 for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
3640 switch (tp_vlan_pri_map & (1 << i)) {
3641 case 0:
3642 /* compressed filter field not enabled */
3643 break;
3644 case FCOE_MASK:
3645 bits += 1;
3646 break;
3647 case PORT_MASK:
3648 bits += 3;
3649 break;
3650 case VNIC_ID_MASK:
3651 bits += 17;
3652 break;
3653 case VLAN_MASK:
3654 bits += 17;
3655 break;
3656 case TOS_MASK:
3657 bits += 8;
3658 break;
3659 case PROTOCOL_MASK:
3660 bits += 8;
3661 break;
3662 case ETHERTYPE_MASK:
3663 bits += 16;
3664 break;
3665 case MACMATCH_MASK:
3666 bits += 9;
3667 break;
3668 case MPSHITTYPE_MASK:
3669 bits += 3;
3670 break;
3671 case FRAGMENTATION_MASK:
3672 bits += 1;
3673 break;
3674 }
3675
3676 if (bits > 36) {
3677 dev_err(adapter->pdev_dev,
3678 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
3679 " using %#x\n", tp_vlan_pri_map, bits,
3680 TP_VLAN_PRI_MAP_DEFAULT);
3681 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
3682 }
3683 }
3684 v = tp_vlan_pri_map;
3685 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
3686 &v, 1, TP_VLAN_PRI_MAP);
3687
3688 /*
3689 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
3690 * to support any of the compressed filter fields above. Newer
3691 * versions of the firmware do this automatically but it doesn't hurt
3692 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
3693 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
3694 * since the firmware automatically turns this on and off when we have
3695 * a non-zero number of filters active (since it does have a
3696 * performance impact).
3697 */
3698 if (tp_vlan_pri_map)
3699 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
3700 FIVETUPLELOOKUP_MASK,
3701 FIVETUPLELOOKUP_MASK);
3702
3703 /*
3704 * Tweak some settings.
3705 */
3706 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
3707 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
3708 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
3709 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
3710
3711 /*
3712 * Get basic stuff going by issuing the Firmware Initialize command.
3713 * Note that this _must_ be after all PFVF commands ...
3714 */
3715 ret = t4_fw_initialize(adapter, adapter->mbox);
3716 if (ret < 0)
3717 goto bye;
3718
3719 /*
3720 * Return successfully!
3721 */
3722 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
3723 "driver parameters\n");
3724 return 0;
3725
3726 /*
3727 * Something bad happened. Return the error ...
3728 */
3729bye:
3730 return ret;
3731}
3732
3733/*
3734 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3158 */ 3735 */
3159static int adap_init0(struct adapter *adap) 3736static int adap_init0(struct adapter *adap)
3160{ 3737{
@@ -3162,72 +3739,216 @@ static int adap_init0(struct adapter *adap)
3162 u32 v, port_vec; 3739 u32 v, port_vec;
3163 enum dev_state state; 3740 enum dev_state state;
3164 u32 params[7], val[7]; 3741 u32 params[7], val[7];
3165 struct fw_caps_config_cmd c; 3742 int reset = 1, j;
3166
3167 ret = t4_check_fw_version(adap);
3168 if (ret == -EINVAL || ret > 0) {
3169 if (upgrade_fw(adap) >= 0) /* recache FW version */
3170 ret = t4_check_fw_version(adap);
3171 }
3172 if (ret < 0)
3173 return ret;
3174 3743
3175 /* contact FW, request master */ 3744 /*
3176 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); 3745 * Contact FW, advertising Master capability (and potentially forcing
3746 * ourselves as the Master PF if our module parameter force_init is
3747 * set).
3748 */
3749 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
3750 force_init ? MASTER_MUST : MASTER_MAY,
3751 &state);
3177 if (ret < 0) { 3752 if (ret < 0) {
3178 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 3753 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3179 ret); 3754 ret);
3180 return ret; 3755 return ret;
3181 } 3756 }
3757 if (ret == adap->mbox)
3758 adap->flags |= MASTER_PF;
3759 if (force_init && state == DEV_STATE_INIT)
3760 state = DEV_STATE_UNINIT;
3182 3761
3183 /* reset device */ 3762 /*
3184 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); 3763 * If we're the Master PF Driver and the device is uninitialized,
3185 if (ret < 0) 3764 * then let's consider upgrading the firmware ... (We always want
3186 goto bye; 3765 * to check the firmware version number in order to A. get it for
3187 3766 * later reporting and B. to warn if the currently loaded firmware
3188 for (v = 0; v < SGE_NTIMERS - 1; v++) 3767 * is excessively mismatched relative to the driver.)
3189 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); 3768 */
3190 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; 3769 ret = t4_check_fw_version(adap);
3191 adap->sge.counter_val[0] = 1; 3770 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3192 for (v = 1; v < SGE_NCOUNTERS; v++) 3771 if (ret == -EINVAL || ret > 0) {
3193 adap->sge.counter_val[v] = min(intr_cnt[v - 1], 3772 if (upgrade_fw(adap) >= 0) {
3194 THRESHOLD_3_MASK); 3773 /*
3195#define FW_PARAM_DEV(param) \ 3774 * Note that the chip was reset as part of the
3196 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3775 * firmware upgrade so we don't reset it again
3197 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3776 * below and grab the new firmware version.
3777 */
3778 reset = 0;
3779 ret = t4_check_fw_version(adap);
3780 }
3781 }
3782 if (ret < 0)
3783 return ret;
3784 }
3198 3785
3199 params[0] = FW_PARAM_DEV(CCLK); 3786 /*
3200 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); 3787 * Grab VPD parameters. This should be done after we establish a
3788 * connection to the firmware since some of the VPD parameters
3789 * (notably the Core Clock frequency) are retrieved via requests to
3790 * the firmware. On the other hand, we need these fairly early on
3791 * so we do this right after getting ahold of the firmware.
3792 */
3793 ret = get_vpd_params(adap, &adap->params.vpd);
3201 if (ret < 0) 3794 if (ret < 0)
3202 goto bye; 3795 goto bye;
3203 adap->params.vpd.cclk = val[0];
3204 3796
3205 ret = adap_init1(adap, &c); 3797 /*
3798 * Find out what ports are available to us. Note that we need to do
3799 * this before calling adap_init0_no_config() since it needs nports
3800 * and portvec ...
3801 */
3802 v =
3803 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3804 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
3805 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
3206 if (ret < 0) 3806 if (ret < 0)
3207 goto bye; 3807 goto bye;
3208 3808
3809 adap->params.nports = hweight32(port_vec);
3810 adap->params.portvec = port_vec;
3811
3812 /*
3813 * If the firmware is initialized already (and we're not forcing a
3814 * master initialization), note that we're living with existing
3815 * adapter parameters. Otherwise, it's time to try initializing the
3816 * adapter ...
3817 */
3818 if (state == DEV_STATE_INIT) {
3819 dev_info(adap->pdev_dev, "Coming up as %s: "\
3820 "Adapter already initialized\n",
3821 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3822 adap->flags |= USING_SOFT_PARAMS;
3823 } else {
3824 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3825 "Initializing adapter\n");
3826
3827 /*
3828 * If the firmware doesn't support Configuration
3829 * Files warn user and exit,
3830 */
3831 if (ret < 0)
3832 dev_warn(adap->pdev_dev, "Firmware doesn't support "
3833 "configuration file.\n");
3834 if (force_old_init)
3835 ret = adap_init0_no_config(adap, reset);
3836 else {
3837 /*
3838 * Find out whether we're dealing with a version of
3839 * the firmware which has configuration file support.
3840 */
3841 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3842 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3843 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
3844 params, val);
3845
3846 /*
3847 * If the firmware doesn't support Configuration
3848 * Files, use the old Driver-based, hard-wired
3849 * initialization. Otherwise, try using the
3850 * Configuration File support and fall back to the
3851 * Driver-based initialization if there's no
3852 * Configuration File found.
3853 */
3854 if (ret < 0)
3855 ret = adap_init0_no_config(adap, reset);
3856 else {
3857 /*
3858 * The firmware provides us with a memory
3859 * buffer where we can load a Configuration
3860 * File from the host if we want to override
3861 * the Configuration File in flash.
3862 */
3863
3864 ret = adap_init0_config(adap, reset);
3865 if (ret == -ENOENT) {
3866 dev_info(adap->pdev_dev,
3867 "No Configuration File present "
3868 "on adapter. Using hard-wired "
3869 "configuration parameters.\n");
3870 ret = adap_init0_no_config(adap, reset);
3871 }
3872 }
3873 }
3874 if (ret < 0) {
3875 dev_err(adap->pdev_dev,
3876 "could not initialize adapter, error %d\n",
3877 -ret);
3878 goto bye;
3879 }
3880 }
3881
3882 /*
3883 * If we're living with non-hard-coded parameters (either from a
3884 * Firmware Configuration File or values programmed by a different PF
3885 * Driver), give the SGE code a chance to pull in anything that it
3886 * needs ... Note that this must be called after we retrieve our VPD
3887 * parameters in order to know how to convert core ticks to seconds.
3888 */
3889 if (adap->flags & USING_SOFT_PARAMS) {
3890 ret = t4_sge_init(adap);
3891 if (ret < 0)
3892 goto bye;
3893 }
3894
3895 /*
3896 * Grab some of our basic fundamental operating parameters.
3897 */
3898#define FW_PARAM_DEV(param) \
3899 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3900 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3901
3209#define FW_PARAM_PFVF(param) \ 3902#define FW_PARAM_PFVF(param) \
3210 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3903 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3211 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ 3904 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3212 FW_PARAMS_PARAM_Y(adap->fn)) 3905 FW_PARAMS_PARAM_Y(0) | \
3906 FW_PARAMS_PARAM_Z(0)
3213 3907
3214 params[0] = FW_PARAM_DEV(PORTVEC); 3908 params[0] = FW_PARAM_PFVF(EQ_START);
3215 params[1] = FW_PARAM_PFVF(L2T_START); 3909 params[1] = FW_PARAM_PFVF(L2T_START);
3216 params[2] = FW_PARAM_PFVF(L2T_END); 3910 params[2] = FW_PARAM_PFVF(L2T_END);
3217 params[3] = FW_PARAM_PFVF(FILTER_START); 3911 params[3] = FW_PARAM_PFVF(FILTER_START);
3218 params[4] = FW_PARAM_PFVF(FILTER_END); 3912 params[4] = FW_PARAM_PFVF(FILTER_END);
3219 params[5] = FW_PARAM_PFVF(IQFLINT_START); 3913 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3220 params[6] = FW_PARAM_PFVF(EQ_START); 3914 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
3221 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3222 if (ret < 0) 3915 if (ret < 0)
3223 goto bye; 3916 goto bye;
3224 port_vec = val[0]; 3917 adap->sge.egr_start = val[0];
3918 adap->l2t_start = val[1];
3919 adap->l2t_end = val[2];
3225 adap->tids.ftid_base = val[3]; 3920 adap->tids.ftid_base = val[3];
3226 adap->tids.nftids = val[4] - val[3] + 1; 3921 adap->tids.nftids = val[4] - val[3] + 1;
3227 adap->sge.ingr_start = val[5]; 3922 adap->sge.ingr_start = val[5];
3228 adap->sge.egr_start = val[6];
3229 3923
3230 if (c.ofldcaps) { 3924 /* query params related to active filter region */
3925 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3926 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3927 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3928 /* If Active filter size is set we enable establishing
3929 * offload connection through firmware work request
3930 */
3931 if ((val[0] != val[1]) && (ret >= 0)) {
3932 adap->flags |= FW_OFLD_CONN;
3933 adap->tids.aftid_base = val[0];
3934 adap->tids.aftid_end = val[1];
3935 }
3936
3937#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3938 /*
3939 * Get device capabilities so we can determine what resources we need
3940 * to manage.
3941 */
3942 memset(&caps_cmd, 0, sizeof(caps_cmd));
3943 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3944 FW_CMD_REQUEST | FW_CMD_READ);
3945 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3946 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3947 &caps_cmd);
3948 if (ret < 0)
3949 goto bye;
3950
3951 if (caps_cmd.ofldcaps) {
3231 /* query offload-related parameters */ 3952 /* query offload-related parameters */
3232 params[0] = FW_PARAM_DEV(NTID); 3953 params[0] = FW_PARAM_DEV(NTID);
3233 params[1] = FW_PARAM_PFVF(SERVER_START); 3954 params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -3235,28 +3956,55 @@ static int adap_init0(struct adapter *adap)
3235 params[3] = FW_PARAM_PFVF(TDDP_START); 3956 params[3] = FW_PARAM_PFVF(TDDP_START);
3236 params[4] = FW_PARAM_PFVF(TDDP_END); 3957 params[4] = FW_PARAM_PFVF(TDDP_END);
3237 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3958 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3238 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, 3959 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3239 val); 3960 params, val);
3240 if (ret < 0) 3961 if (ret < 0)
3241 goto bye; 3962 goto bye;
3242 adap->tids.ntids = val[0]; 3963 adap->tids.ntids = val[0];
3243 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); 3964 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3244 adap->tids.stid_base = val[1]; 3965 adap->tids.stid_base = val[1];
3245 adap->tids.nstids = val[2] - val[1] + 1; 3966 adap->tids.nstids = val[2] - val[1] + 1;
3967 /*
3968 * Setup server filter region. Divide the availble filter
3969 * region into two parts. Regular filters get 1/3rd and server
3970 * filters get 2/3rd part. This is only enabled if workarond
3971 * path is enabled.
3972 * 1. For regular filters.
3973 * 2. Server filter: This are special filters which are used
3974 * to redirect SYN packets to offload queue.
3975 */
3976 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3977 adap->tids.sftid_base = adap->tids.ftid_base +
3978 DIV_ROUND_UP(adap->tids.nftids, 3);
3979 adap->tids.nsftids = adap->tids.nftids -
3980 DIV_ROUND_UP(adap->tids.nftids, 3);
3981 adap->tids.nftids = adap->tids.sftid_base -
3982 adap->tids.ftid_base;
3983 }
3246 adap->vres.ddp.start = val[3]; 3984 adap->vres.ddp.start = val[3];
3247 adap->vres.ddp.size = val[4] - val[3] + 1; 3985 adap->vres.ddp.size = val[4] - val[3] + 1;
3248 adap->params.ofldq_wr_cred = val[5]; 3986 adap->params.ofldq_wr_cred = val[5];
3987
3988 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3989 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3990 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3991 params, val);
3992 if ((val[0] != val[1]) && (ret >= 0)) {
3993 adap->tids.uotid_base = val[0];
3994 adap->tids.nuotids = val[1] - val[0] + 1;
3995 }
3996
3249 adap->params.offload = 1; 3997 adap->params.offload = 1;
3250 } 3998 }
3251 if (c.rdmacaps) { 3999 if (caps_cmd.rdmacaps) {
3252 params[0] = FW_PARAM_PFVF(STAG_START); 4000 params[0] = FW_PARAM_PFVF(STAG_START);
3253 params[1] = FW_PARAM_PFVF(STAG_END); 4001 params[1] = FW_PARAM_PFVF(STAG_END);
3254 params[2] = FW_PARAM_PFVF(RQ_START); 4002 params[2] = FW_PARAM_PFVF(RQ_START);
3255 params[3] = FW_PARAM_PFVF(RQ_END); 4003 params[3] = FW_PARAM_PFVF(RQ_END);
3256 params[4] = FW_PARAM_PFVF(PBL_START); 4004 params[4] = FW_PARAM_PFVF(PBL_START);
3257 params[5] = FW_PARAM_PFVF(PBL_END); 4005 params[5] = FW_PARAM_PFVF(PBL_END);
3258 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, 4006 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3259 val); 4007 params, val);
3260 if (ret < 0) 4008 if (ret < 0)
3261 goto bye; 4009 goto bye;
3262 adap->vres.stag.start = val[0]; 4010 adap->vres.stag.start = val[0];
@@ -3272,8 +4020,7 @@ static int adap_init0(struct adapter *adap)
3272 params[3] = FW_PARAM_PFVF(CQ_END); 4020 params[3] = FW_PARAM_PFVF(CQ_END);
3273 params[4] = FW_PARAM_PFVF(OCQ_START); 4021 params[4] = FW_PARAM_PFVF(OCQ_START);
3274 params[5] = FW_PARAM_PFVF(OCQ_END); 4022 params[5] = FW_PARAM_PFVF(OCQ_END);
3275 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, 4023 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
3276 val);
3277 if (ret < 0) 4024 if (ret < 0)
3278 goto bye; 4025 goto bye;
3279 adap->vres.qp.start = val[0]; 4026 adap->vres.qp.start = val[0];
@@ -3283,11 +4030,11 @@ static int adap_init0(struct adapter *adap)
3283 adap->vres.ocq.start = val[4]; 4030 adap->vres.ocq.start = val[4];
3284 adap->vres.ocq.size = val[5] - val[4] + 1; 4031 adap->vres.ocq.size = val[5] - val[4] + 1;
3285 } 4032 }
3286 if (c.iscsicaps) { 4033 if (caps_cmd.iscsicaps) {
3287 params[0] = FW_PARAM_PFVF(ISCSI_START); 4034 params[0] = FW_PARAM_PFVF(ISCSI_START);
3288 params[1] = FW_PARAM_PFVF(ISCSI_END); 4035 params[1] = FW_PARAM_PFVF(ISCSI_END);
3289 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, 4036 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3290 val); 4037 params, val);
3291 if (ret < 0) 4038 if (ret < 0)
3292 goto bye; 4039 goto bye;
3293 adap->vres.iscsi.start = val[0]; 4040 adap->vres.iscsi.start = val[0];
@@ -3295,63 +4042,33 @@ static int adap_init0(struct adapter *adap)
3295 } 4042 }
3296#undef FW_PARAM_PFVF 4043#undef FW_PARAM_PFVF
3297#undef FW_PARAM_DEV 4044#undef FW_PARAM_DEV
4045#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
3298 4046
3299 adap->params.nports = hweight32(port_vec); 4047 /*
3300 adap->params.portvec = port_vec; 4048 * These are finalized by FW initialization, load their values now.
3301 adap->flags |= FW_OK; 4049 */
3302
3303 /* These are finalized by FW initialization, load their values now */
3304 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 4050 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3305 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 4051 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4052 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3306 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 4053 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3307 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 4054 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3308 adap->params.b_wnd); 4055 adap->params.b_wnd);
3309 4056
3310#ifdef CONFIG_PCI_IOV 4057 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3311 /* 4058 for (j = 0; j < NCHAN; j++)
3312 * Provision resource limits for Virtual Functions. We currently 4059 adap->params.tp.tx_modq[j] = j;
3313 * grant them all the same static resource limits except for the Port
3314 * Access Rights Mask which we're assigning based on the PF. All of
3315 * the static provisioning stuff for both the PF and VF really needs
3316 * to be managed in a persistent manner for each device which the
3317 * firmware controls.
3318 */
3319 {
3320 int pf, vf;
3321
3322 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3323 if (num_vf[pf] <= 0)
3324 continue;
3325 4060
3326 /* VF numbering starts at 1! */ 4061 adap->flags |= FW_OK;
3327 for (vf = 1; vf <= num_vf[pf]; vf++) {
3328 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3329 VFRES_NEQ, VFRES_NETHCTRL,
3330 VFRES_NIQFLINT, VFRES_NIQ,
3331 VFRES_TC, VFRES_NVI,
3332 FW_PFVF_CMD_CMASK_MASK,
3333 pfvfres_pmask(adap, pf, vf),
3334 VFRES_NEXACTF,
3335 VFRES_R_CAPS, VFRES_WX_CAPS);
3336 if (ret < 0)
3337 dev_warn(adap->pdev_dev, "failed to "
3338 "provision pf/vf=%d/%d; "
3339 "err=%d\n", pf, vf, ret);
3340 }
3341 }
3342 }
3343#endif
3344
3345 setup_memwin(adap);
3346 return 0; 4062 return 0;
3347 4063
3348 /* 4064 /*
3349 * If a command timed out or failed with EIO FW does not operate within 4065 * Something bad happened. If a command timed out or failed with EIO
3350 * its spec or something catastrophic happened to HW/FW, stop issuing 4066 * FW does not operate within its spec or something catastrophic
3351 * commands. 4067 * happened to HW/FW, stop issuing commands.
3352 */ 4068 */
3353bye: if (ret != -ETIMEDOUT && ret != -EIO) 4069bye:
3354 t4_fw_bye(adap, adap->fn); 4070 if (ret != -ETIMEDOUT && ret != -EIO)
4071 t4_fw_bye(adap, adap->mbox);
3355 return ret; 4072 return ret;
3356} 4073}
3357 4074
@@ -3806,7 +4523,9 @@ static int __devinit init_one(struct pci_dev *pdev,
3806 err = t4_prep_adapter(adapter); 4523 err = t4_prep_adapter(adapter);
3807 if (err) 4524 if (err)
3808 goto out_unmap_bar; 4525 goto out_unmap_bar;
4526 setup_memwin(adapter);
3809 err = adap_init0(adapter); 4527 err = adap_init0(adapter);
4528 setup_memwin_rdma(adapter);
3810 if (err) 4529 if (err)
3811 goto out_unmap_bar; 4530 goto out_unmap_bar;
3812 4531
@@ -3948,8 +4667,11 @@ static void __devexit remove_one(struct pci_dev *pdev)
3948{ 4667{
3949 struct adapter *adapter = pci_get_drvdata(pdev); 4668 struct adapter *adapter = pci_get_drvdata(pdev);
3950 4669
4670#ifdef CONFIG_PCI_IOV
3951 pci_disable_sriov(pdev); 4671 pci_disable_sriov(pdev);
3952 4672
4673#endif
4674
3953 if (adapter) { 4675 if (adapter) {
3954 int i; 4676 int i;
3955 4677
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d79980c5fc63..1b899fea1a91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -100,6 +100,8 @@ struct tid_info {
100 100
101 unsigned int nftids; 101 unsigned int nftids;
102 unsigned int ftid_base; 102 unsigned int ftid_base;
103 unsigned int aftid_base;
104 unsigned int aftid_end;
103 105
104 spinlock_t atid_lock ____cacheline_aligned_in_smp; 106 spinlock_t atid_lock ____cacheline_aligned_in_smp;
105 union aopen_entry *afree; 107 union aopen_entry *afree;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index d49933ed551f..3ecc087d732d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -68,9 +68,6 @@
68 */ 68 */
69#define RX_PKT_SKB_LEN 512 69#define RX_PKT_SKB_LEN 512
70 70
71/* Ethernet header padding prepended to RX_PKTs */
72#define RX_PKT_PAD 2
73
74/* 71/*
75 * Max number of Tx descriptors we clean up at a time. Should be modest as 72 * Max number of Tx descriptors we clean up at a time. Should be modest as
76 * freeing skbs isn't cheap and it happens while holding locks. We just need 73 * freeing skbs isn't cheap and it happens while holding locks. We just need
@@ -137,13 +134,6 @@
137 */ 134 */
138#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 135#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
139 136
140enum {
141 /* packet alignment in FL buffers */
142 FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
143 /* egress status entry size */
144 STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
145};
146
147struct tx_sw_desc { /* SW state per Tx descriptor */ 137struct tx_sw_desc { /* SW state per Tx descriptor */
148 struct sk_buff *skb; 138 struct sk_buff *skb;
149 struct ulptx_sgl *sgl; 139 struct ulptx_sgl *sgl;
@@ -155,16 +145,57 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
155}; 145};
156 146
157/* 147/*
158 * The low bits of rx_sw_desc.dma_addr have special meaning. 148 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
150 * We could easily support more but there doesn't seem to be much need for
151 * that ...
152 */
153#define FL_MTU_SMALL 1500
154#define FL_MTU_LARGE 9000
155
156static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
157 unsigned int mtu)
158{
159 struct sge *s = &adapter->sge;
160
161 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
162}
163
164#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
166
167/*
168 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
169 * these to specify the buffer size as an index into the SGE Free List Buffer
170 * Size register array. We also use bit 4, when the buffer has been unmapped
171 * for DMA, but this is of course never sent to the hardware and is only used
172 * to prevent double unmappings. All of the above requires that the Free List
173 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
175 * Free List Buffer alignment is 32 bytes, this works out for us ...
159 */ 176 */
160enum { 177enum {
161 RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ 178 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
162 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ 179 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
180 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
181
182 /*
183 * XXX We shouldn't depend on being able to use these indices.
184 * XXX Especially when some other Master PF has initialized the
185 * XXX adapter or we use the Firmware Configuration File. We
186 * XXX should really search through the Host Buffer Size register
187 * XXX array for the appropriately sized buffer indices.
188 */
189 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
190 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
191
192 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
193 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
163}; 194};
164 195
165static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 196static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
166{ 197{
167 return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); 198 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
168} 199}
169 200
170static inline bool is_buf_mapped(const struct rx_sw_desc *d) 201static inline bool is_buf_mapped(const struct rx_sw_desc *d)
@@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
392 } 423 }
393} 424}
394 425
395static inline int get_buf_size(const struct rx_sw_desc *d) 426static inline int get_buf_size(struct adapter *adapter,
427 const struct rx_sw_desc *d)
396{ 428{
397#if FL_PG_ORDER > 0 429 struct sge *s = &adapter->sge;
398 return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : 430 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
399 PAGE_SIZE; 431 int buf_size;
400#else 432
401 return PAGE_SIZE; 433 switch (rx_buf_size_idx) {
402#endif 434 case RX_SMALL_PG_BUF:
435 buf_size = PAGE_SIZE;
436 break;
437
438 case RX_LARGE_PG_BUF:
439 buf_size = PAGE_SIZE << s->fl_pg_order;
440 break;
441
442 case RX_SMALL_MTU_BUF:
443 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
444 break;
445
446 case RX_LARGE_MTU_BUF:
447 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
448 break;
449
450 default:
451 BUG_ON(1);
452 }
453
454 return buf_size;
403} 455}
404 456
405/** 457/**
@@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
418 470
419 if (is_buf_mapped(d)) 471 if (is_buf_mapped(d))
420 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 472 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
421 get_buf_size(d), PCI_DMA_FROMDEVICE); 473 get_buf_size(adap, d),
474 PCI_DMA_FROMDEVICE);
422 put_page(d->page); 475 put_page(d->page);
423 d->page = NULL; 476 d->page = NULL;
424 if (++q->cidx == q->size) 477 if (++q->cidx == q->size)
@@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
444 497
445 if (is_buf_mapped(d)) 498 if (is_buf_mapped(d))
446 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 499 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
447 get_buf_size(d), PCI_DMA_FROMDEVICE); 500 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
448 d->page = NULL; 501 d->page = NULL;
449 if (++q->cidx == q->size) 502 if (++q->cidx == q->size)
450 q->cidx = 0; 503 q->cidx = 0;
@@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
485static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 538static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
486 gfp_t gfp) 539 gfp_t gfp)
487{ 540{
541 struct sge *s = &adap->sge;
488 struct page *pg; 542 struct page *pg;
489 dma_addr_t mapping; 543 dma_addr_t mapping;
490 unsigned int cred = q->avail; 544 unsigned int cred = q->avail;
@@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
493 547
494 gfp |= __GFP_NOWARN | __GFP_COLD; 548 gfp |= __GFP_NOWARN | __GFP_COLD;
495 549
496#if FL_PG_ORDER > 0 550 if (s->fl_pg_order == 0)
551 goto alloc_small_pages;
552
497 /* 553 /*
498 * Prefer large buffers 554 * Prefer large buffers
499 */ 555 */
500 while (n) { 556 while (n) {
501 pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); 557 pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
502 if (unlikely(!pg)) { 558 if (unlikely(!pg)) {
503 q->large_alloc_failed++; 559 q->large_alloc_failed++;
504 break; /* fall back to single pages */ 560 break; /* fall back to single pages */
505 } 561 }
506 562
507 mapping = dma_map_page(adap->pdev_dev, pg, 0, 563 mapping = dma_map_page(adap->pdev_dev, pg, 0,
508 PAGE_SIZE << FL_PG_ORDER, 564 PAGE_SIZE << s->fl_pg_order,
509 PCI_DMA_FROMDEVICE); 565 PCI_DMA_FROMDEVICE);
510 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 566 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
511 __free_pages(pg, FL_PG_ORDER); 567 __free_pages(pg, s->fl_pg_order);
512 goto out; /* do not try small pages for this error */ 568 goto out; /* do not try small pages for this error */
513 } 569 }
514 mapping |= RX_LARGE_BUF; 570 mapping |= RX_LARGE_PG_BUF;
515 *d++ = cpu_to_be64(mapping); 571 *d++ = cpu_to_be64(mapping);
516 572
517 set_rx_sw_desc(sd, pg, mapping); 573 set_rx_sw_desc(sd, pg, mapping);
@@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
525 } 581 }
526 n--; 582 n--;
527 } 583 }
528#endif
529 584
585alloc_small_pages:
530 while (n--) { 586 while (n--) {
531 pg = __skb_alloc_page(gfp, NULL); 587 pg = __skb_alloc_page(gfp, NULL);
532 if (unlikely(!pg)) { 588 if (unlikely(!pg)) {
@@ -769,8 +825,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
769 wmb(); /* write descriptors before telling HW */ 825 wmb(); /* write descriptors before telling HW */
770 spin_lock(&q->db_lock); 826 spin_lock(&q->db_lock);
771 if (!q->db_disabled) { 827 if (!q->db_disabled) {
772 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 828 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
773 V_QID(q->cntxt_id) | V_PIDX(n)); 829 QID(q->cntxt_id) | PIDX(n));
774 } 830 }
775 q->db_pidx = q->pidx; 831 q->db_pidx = q->pidx;
776 spin_unlock(&q->db_lock); 832 spin_unlock(&q->db_lock);
@@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1519static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1575static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1520 const struct cpl_rx_pkt *pkt) 1576 const struct cpl_rx_pkt *pkt)
1521{ 1577{
1578 struct adapter *adapter = rxq->rspq.adap;
1579 struct sge *s = &adapter->sge;
1522 int ret; 1580 int ret;
1523 struct sk_buff *skb; 1581 struct sk_buff *skb;
1524 1582
@@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1529 return; 1587 return;
1530 } 1588 }
1531 1589
1532 copy_frags(skb, gl, RX_PKT_PAD); 1590 copy_frags(skb, gl, s->pktshift);
1533 skb->len = gl->tot_len - RX_PKT_PAD; 1591 skb->len = gl->tot_len - s->pktshift;
1534 skb->data_len = skb->len; 1592 skb->data_len = skb->len;
1535 skb->truesize += skb->data_len; 1593 skb->truesize += skb->data_len;
1536 skb->ip_summed = CHECKSUM_UNNECESSARY; 1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1566 struct sk_buff *skb; 1624 struct sk_buff *skb;
1567 const struct cpl_rx_pkt *pkt; 1625 const struct cpl_rx_pkt *pkt;
1568 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1626 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1627 struct sge *s = &q->adap->sge;
1569 1628
1570 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1629 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1571 return handle_trace_pkt(q->adap, si); 1630 return handle_trace_pkt(q->adap, si);
@@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1585 return 0; 1644 return 0;
1586 } 1645 }
1587 1646
1588 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ 1647 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
1589 skb->protocol = eth_type_trans(skb, q->netdev); 1648 skb->protocol = eth_type_trans(skb, q->netdev);
1590 skb_record_rx_queue(skb, q->idx); 1649 skb_record_rx_queue(skb, q->idx);
1591 if (skb->dev->features & NETIF_F_RXHASH) 1650 if (skb->dev->features & NETIF_F_RXHASH)
@@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget)
1696 int budget_left = budget; 1755 int budget_left = budget;
1697 const struct rsp_ctrl *rc; 1756 const struct rsp_ctrl *rc;
1698 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1757 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1758 struct adapter *adapter = q->adap;
1759 struct sge *s = &adapter->sge;
1699 1760
1700 while (likely(budget_left)) { 1761 while (likely(budget_left)) {
1701 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 1762 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
@@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1722 /* gather packet fragments */ 1783 /* gather packet fragments */
1723 for (frags = 0, fp = si.frags; ; frags++, fp++) { 1784 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1724 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 1785 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1725 bufsz = get_buf_size(rsd); 1786 bufsz = get_buf_size(adapter, rsd);
1726 fp->page = rsd->page; 1787 fp->page = rsd->page;
1727 fp->offset = q->offset; 1788 fp->offset = q->offset;
1728 fp->size = min(bufsz, len); 1789 fp->size = min(bufsz, len);
@@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1747 si.nfrags = frags + 1; 1808 si.nfrags = frags + 1;
1748 ret = q->handler(q, q->cur_desc, &si); 1809 ret = q->handler(q, q->cur_desc, &si);
1749 if (likely(ret == 0)) 1810 if (likely(ret == 0))
1750 q->offset += ALIGN(fp->size, FL_ALIGN); 1811 q->offset += ALIGN(fp->size, s->fl_align);
1751 else 1812 else
1752 restore_rx_bufs(&si, &rxq->fl, frags); 1813 restore_rx_bufs(&si, &rxq->fl, frags);
1753 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1814 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1983{ 2044{
1984 int ret, flsz = 0; 2045 int ret, flsz = 0;
1985 struct fw_iq_cmd c; 2046 struct fw_iq_cmd c;
2047 struct sge *s = &adap->sge;
1986 struct port_info *pi = netdev_priv(dev); 2048 struct port_info *pi = netdev_priv(dev);
1987 2049
1988 /* Size needs to be multiple of 16, including status entry. */ 2050 /* Size needs to be multiple of 16, including status entry. */
@@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2015 fl->size = roundup(fl->size, 8); 2077 fl->size = roundup(fl->size, 8);
2016 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 2078 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2017 sizeof(struct rx_sw_desc), &fl->addr, 2079 sizeof(struct rx_sw_desc), &fl->addr,
2018 &fl->sdesc, STAT_LEN, NUMA_NO_NODE); 2080 &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2019 if (!fl->desc) 2081 if (!fl->desc)
2020 goto fl_nomem; 2082 goto fl_nomem;
2021 2083
2022 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); 2084 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2023 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | 2085 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2024 FW_IQ_CMD_FL0FETCHRO(1) | 2086 FW_IQ_CMD_FL0FETCHRO(1) |
2025 FW_IQ_CMD_FL0DATARO(1) | 2087 FW_IQ_CMD_FL0DATARO(1) |
@@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2096{ 2158{
2097 int ret, nentries; 2159 int ret, nentries;
2098 struct fw_eq_eth_cmd c; 2160 struct fw_eq_eth_cmd c;
2161 struct sge *s = &adap->sge;
2099 struct port_info *pi = netdev_priv(dev); 2162 struct port_info *pi = netdev_priv(dev);
2100 2163
2101 /* Add status entries */ 2164 /* Add status entries */
2102 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2165 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2103 2166
2104 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2167 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2105 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2168 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2106 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, 2169 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2107 netdev_queue_numa_node_read(netdevq)); 2170 netdev_queue_numa_node_read(netdevq));
2108 if (!txq->q.desc) 2171 if (!txq->q.desc)
2109 return -ENOMEM; 2172 return -ENOMEM;
@@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2149{ 2212{
2150 int ret, nentries; 2213 int ret, nentries;
2151 struct fw_eq_ctrl_cmd c; 2214 struct fw_eq_ctrl_cmd c;
2215 struct sge *s = &adap->sge;
2152 struct port_info *pi = netdev_priv(dev); 2216 struct port_info *pi = netdev_priv(dev);
2153 2217
2154 /* Add status entries */ 2218 /* Add status entries */
2155 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2219 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2156 2220
2157 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 2221 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2158 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 2222 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
@@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2200{ 2264{
2201 int ret, nentries; 2265 int ret, nentries;
2202 struct fw_eq_ofld_cmd c; 2266 struct fw_eq_ofld_cmd c;
2267 struct sge *s = &adap->sge;
2203 struct port_info *pi = netdev_priv(dev); 2268 struct port_info *pi = netdev_priv(dev);
2204 2269
2205 /* Add status entries */ 2270 /* Add status entries */
2206 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2271 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2207 2272
2208 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2273 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2209 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2274 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2210 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, 2275 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2211 NUMA_NO_NODE); 2276 NUMA_NO_NODE);
2212 if (!txq->q.desc) 2277 if (!txq->q.desc)
2213 return -ENOMEM; 2278 return -ENOMEM;
@@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2251 2316
2252static void free_txq(struct adapter *adap, struct sge_txq *q) 2317static void free_txq(struct adapter *adap, struct sge_txq *q)
2253{ 2318{
2319 struct sge *s = &adap->sge;
2320
2254 dma_free_coherent(adap->pdev_dev, 2321 dma_free_coherent(adap->pdev_dev,
2255 q->size * sizeof(struct tx_desc) + STAT_LEN, 2322 q->size * sizeof(struct tx_desc) + s->stat_len,
2256 q->desc, q->phys_addr); 2323 q->desc, q->phys_addr);
2257 q->cntxt_id = 0; 2324 q->cntxt_id = 0;
2258 q->sdesc = NULL; 2325 q->sdesc = NULL;
@@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
2262static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 2329static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2263 struct sge_fl *fl) 2330 struct sge_fl *fl)
2264{ 2331{
2332 struct sge *s = &adap->sge;
2265 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2333 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2266 2334
2267 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 2335 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
@@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2276 2344
2277 if (fl) { 2345 if (fl) {
2278 free_rx_bufs(adap, fl, fl->avail); 2346 free_rx_bufs(adap, fl, fl->avail);
2279 dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, 2347 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2280 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2281 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2282 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap)
2408 * Performs SGE initialization needed every time after a chip reset. 2476 * Performs SGE initialization needed every time after a chip reset.
2409 * We do not initialize any of the queues here, instead the driver 2477 * We do not initialize any of the queues here, instead the driver
2410 * top-level must request them individually. 2478 * top-level must request them individually.
2479 *
2480 * Called in two different modes:
2481 *
2482 * 1. Perform actual hardware initialization and record hard-coded
2483 * parameters which were used. This gets used when we're the
2484 * Master PF and the Firmware Configuration File support didn't
2485 * work for some reason.
2486 *
2487 * 2. We're not the Master PF or initialization was performed with
2488 * a Firmware Configuration File. In this case we need to grab
2489 * any of the SGE operating parameters that we need to have in
2490 * order to do our job and make sure we can live with them ...
2411 */ 2491 */
2412void t4_sge_init(struct adapter *adap) 2492
2493static int t4_sge_init_soft(struct adapter *adap)
2413{ 2494{
2414 unsigned int i, v;
2415 struct sge *s = &adap->sge; 2495 struct sge *s = &adap->sge;
2416 unsigned int fl_align_log = ilog2(FL_ALIGN); 2496 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2497 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2498 u32 ingress_rx_threshold;
2417 2499
2418 t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | 2500 /*
2419 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, 2501 * Verify that CPL messages are going to the Ingress Queue for
2420 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | 2502 * process_responses() and that only packet data is going to the
2421 RXPKTCPLMODE | 2503 * Free Lists.
2422 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); 2504 */
2505 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2506 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2507 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2508 return -EINVAL;
2509 }
2510
2511 /*
2512 * Validate the Host Buffer Register Array indices that we want to
2513 * use ...
2514 *
2515 * XXX Note that we should really read through the Host Buffer Size
2516 * XXX register array and find the indices of the Buffer Sizes which
2517 * XXX meet our needs!
2518 */
2519 #define READ_FL_BUF(x) \
2520 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2521
2522 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2523 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2524 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2525 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2526
2527 #undef READ_FL_BUF
2528
2529 if (fl_small_pg != PAGE_SIZE ||
2530 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
2531 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2532 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2533 fl_small_pg, fl_large_pg);
2534 return -EINVAL;
2535 }
2536 if (fl_large_pg)
2537 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2538
2539 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2540 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2541 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2542 fl_small_mtu, fl_large_mtu);
2543 return -EINVAL;
2544 }
2545
2546 /*
2547 * Retrieve our RX interrupt holdoff timer values and counter
2548 * threshold values from the SGE parameters.
2549 */
2550 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2551 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2552 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2553 s->timer_val[0] = core_ticks_to_us(adap,
2554 TIMERVALUE0_GET(timer_value_0_and_1));
2555 s->timer_val[1] = core_ticks_to_us(adap,
2556 TIMERVALUE1_GET(timer_value_0_and_1));
2557 s->timer_val[2] = core_ticks_to_us(adap,
2558 TIMERVALUE2_GET(timer_value_2_and_3));
2559 s->timer_val[3] = core_ticks_to_us(adap,
2560 TIMERVALUE3_GET(timer_value_2_and_3));
2561 s->timer_val[4] = core_ticks_to_us(adap,
2562 TIMERVALUE4_GET(timer_value_4_and_5));
2563 s->timer_val[5] = core_ticks_to_us(adap,
2564 TIMERVALUE5_GET(timer_value_4_and_5));
2565
2566 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2567 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2568 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2569 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2570 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2571
2572 return 0;
2573}
2574
2575static int t4_sge_init_hard(struct adapter *adap)
2576{
2577 struct sge *s = &adap->sge;
2578
2579 /*
2580 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2581 * Queue and Packet Date to the Free List.
2582 */
2583 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2584 RXPKTCPLMODE_MASK);
2423 2585
2424 /* 2586 /*
2425 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2587 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
@@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap)
2433 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, 2595 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2434 F_ENABLE_DROP); 2596 F_ENABLE_DROP);
2435 2597
2436 for (i = v = 0; i < 32; i += 4) 2598 /*
2437 v |= (PAGE_SHIFT - 10) << i; 2599 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2438 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); 2600 * t4_fixup_host_params().
2439 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); 2601 */
2440#if FL_PG_ORDER > 0 2602 s->fl_pg_order = FL_PG_ORDER;
2441 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); 2603 if (s->fl_pg_order)
2442#endif 2604 t4_write_reg(adap,
2605 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2606 PAGE_SIZE << FL_PG_ORDER);
2607 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2608 FL_MTU_SMALL_BUFSIZE(adap));
2609 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2610 FL_MTU_LARGE_BUFSIZE(adap));
2611
2612 /*
2613 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2614 * Timer Holdoff values must be supplied by our caller.
2615 */
2443 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, 2616 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2444 THRESHOLD_0(s->counter_val[0]) | 2617 THRESHOLD_0(s->counter_val[0]) |
2445 THRESHOLD_1(s->counter_val[1]) | 2618 THRESHOLD_1(s->counter_val[1]) |
@@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap)
2449 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | 2622 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2450 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); 2623 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2451 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, 2624 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2452 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | 2625 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2453 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); 2626 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2454 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, 2627 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2455 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | 2628 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2456 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); 2629 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2630
2631 return 0;
2632}
2633
2634int t4_sge_init(struct adapter *adap)
2635{
2636 struct sge *s = &adap->sge;
2637 u32 sge_control;
2638 int ret;
2639
2640 /*
2641 * Ingress Padding Boundary and Egress Status Page Size are set up by
2642 * t4_fixup_host_params().
2643 */
2644 sge_control = t4_read_reg(adap, SGE_CONTROL);
2645 s->pktshift = PKTSHIFT_GET(sge_control);
2646 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2647 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2648 X_INGPADBOUNDARY_SHIFT);
2649
2650 if (adap->flags & USING_SOFT_PARAMS)
2651 ret = t4_sge_init_soft(adap);
2652 else
2653 ret = t4_sge_init_hard(adap);
2654 if (ret < 0)
2655 return ret;
2656
2657 /*
2658 * A FL with <= fl_starve_thres buffers is starving and a periodic
2659 * timer will attempt to refill it. This needs to be larger than the
2660 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2661 * stuck waiting for new packets while the SGE is waiting for us to
2662 * give it more Free List entries. (Note that the SGE's Egress
2663 * Congestion Threshold is in units of 2 Free List pointers.)
2664 */
2665 s->fl_starve_thres
2666 = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
2667
2457 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2668 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2458 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 2669 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2459 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ 2670 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2460 s->idma_state[0] = s->idma_state[1] = 0; 2671 s->idma_state[0] = s->idma_state[1] = 0;
2461 spin_lock_init(&s->intrq_lock); 2672 spin_lock_init(&s->intrq_lock);
2673
2674 return 0;
2462} 2675}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index af1601323173..35b81d8b59e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -120,6 +120,28 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
123/* 145/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */ 147 */
@@ -330,6 +352,143 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330 return 0; 352 return 0;
331} 353}
332 354
355/*
356 * t4_mem_win_rw - read/write memory through PCIE memory window
357 * @adap: the adapter
358 * @addr: address of first byte requested
359 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360 * @dir: direction of transfer 1 => read, 0 => write
361 *
362 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364 * address @addr.
365 */
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{
368 int i;
369
370 /*
371 * Setup offset into PCIE memory window. Address must be a
372 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373 * ensure that changes propagate before we attempt to use the new
374 * values.)
375 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1));
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
384 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
386 }
387
388 return 0;
389}
390
391/**
392 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
393 * @adap: the adapter
394 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
395 * @addr: address within indicated memory type
396 * @len: amount of memory to transfer
397 * @buf: host memory buffer
398 * @dir: direction of transfer 1 => read, 0 => write
399 *
400 * Reads/writes an [almost] arbitrary memory region in the firmware: the
401 * firmware memory address, length and host buffer must be aligned on
402 * 32-bit boudaries. The memory is transferred as a raw byte sequence
403 * from/to the firmware's memory. If this memory contains data
404 * structures which contain multi-byte integers, it's the callers
405 * responsibility to perform appropriate byte order conversions.
406 */
407static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
408 __be32 *buf, int dir)
409{
410 u32 pos, start, end, offset, memoffset;
411 int ret;
412
413 /*
414 * Argument sanity checks ...
415 */
416 if ((addr & 0x3) || (len & 0x3))
417 return -EINVAL;
418
419 /*
420 * Offset into the region of memory which is being accessed
421 * MEM_EDC0 = 0
422 * MEM_EDC1 = 1
423 * MEM_MC = 2
424 */
425 memoffset = (mtype * (5 * 1024 * 1024));
426
427 /* Determine the PCIE_MEM_ACCESS_OFFSET */
428 addr = addr + memoffset;
429
430 /*
431 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
432 * at a time so we need to round down the start and round up the end.
433 * We'll start copying out of the first line at (addr - start) a word
434 * at a time.
435 */
436 start = addr & ~(MEMWIN0_APERTURE-1);
437 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
438 offset = (addr - start)/sizeof(__be32);
439
440 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
441 __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
442
443 /*
444 * If we're writing, copy the data from the caller's memory
445 * buffer
446 */
447 if (!dir) {
448 /*
449 * If we're doing a partial write, then we need to do
450 * a read-modify-write ...
451 */
452 if (offset || len < MEMWIN0_APERTURE) {
453 ret = t4_mem_win_rw(adap, pos, data, 1);
454 if (ret)
455 return ret;
456 }
457 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
458 len > 0) {
459 data[offset++] = *buf++;
460 len -= sizeof(__be32);
461 }
462 }
463
464 /*
465 * Transfer a block of memory and bail if there's an error.
466 */
467 ret = t4_mem_win_rw(adap, pos, data, dir);
468 if (ret)
469 return ret;
470
471 /*
472 * If we're reading, copy the data into the caller's memory
473 * buffer.
474 */
475 if (dir)
476 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
477 len > 0) {
478 *buf++ = data[offset++];
479 len -= sizeof(__be32);
480 }
481 }
482
483 return 0;
484}
485
486int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
487 __be32 *buf)
488{
489 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
490}
491
333#define EEPROM_STAT_ADDR 0x7bfc 492#define EEPROM_STAT_ADDR 0x7bfc
334#define VPD_BASE 0 493#define VPD_BASE 0
335#define VPD_LEN 512 494#define VPD_LEN 512
@@ -355,8 +514,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
355 * 514 *
356 * Reads card parameters stored in VPD EEPROM. 515 * Reads card parameters stored in VPD EEPROM.
357 */ 516 */
358static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 517int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
359{ 518{
519 u32 cclk_param, cclk_val;
360 int i, ret; 520 int i, ret;
361 int ec, sn; 521 int ec, sn;
362 u8 vpd[VPD_LEN], csum; 522 u8 vpd[VPD_LEN], csum;
@@ -418,6 +578,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 578 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 579 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
420 strim(p->sn); 580 strim(p->sn);
581
582 /*
583 * Ask firmware for the Core Clock since it knows how to translate the
584 * Reference Clock ('V2') VPD field into a Core Clock value ...
585 */
586 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
587 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
588 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
589 1, &cclk_param, &cclk_val);
590 if (ret)
591 return ret;
592 p->cclk = cclk_val;
593
421 return 0; 594 return 0;
422} 595}
423 596
@@ -718,6 +891,77 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
718} 891}
719 892
720/** 893/**
894 * t4_flash_cfg_addr - return the address of the flash configuration file
895 * @adapter: the adapter
896 *
897 * Return the address within the flash where the Firmware Configuration
898 * File is stored.
899 */
900unsigned int t4_flash_cfg_addr(struct adapter *adapter)
901{
902 if (adapter->params.sf_size == 0x100000)
903 return FLASH_FPGA_CFG_START;
904 else
905 return FLASH_CFG_START;
906}
907
908/**
909 * t4_load_cfg - download config file
910 * @adap: the adapter
911 * @cfg_data: the cfg text file to write
912 * @size: text file size
913 *
914 * Write the supplied config text file to the card's serial flash.
915 */
916int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
917{
918 int ret, i, n;
919 unsigned int addr;
920 unsigned int flash_cfg_start_sec;
921 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
922
923 addr = t4_flash_cfg_addr(adap);
924 flash_cfg_start_sec = addr / SF_SEC_SIZE;
925
926 if (size > FLASH_CFG_MAX_SIZE) {
927 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
928 FLASH_CFG_MAX_SIZE);
929 return -EFBIG;
930 }
931
932 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
933 sf_sec_size);
934 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
935 flash_cfg_start_sec + i - 1);
936 /*
937 * If size == 0 then we're simply erasing the FLASH sectors associated
938 * with the on-adapter Firmware Configuration File.
939 */
940 if (ret || size == 0)
941 goto out;
942
943 /* this will write to the flash up to SF_PAGE_SIZE at a time */
944 for (i = 0; i < size; i += SF_PAGE_SIZE) {
945 if ((size - i) < SF_PAGE_SIZE)
946 n = size - i;
947 else
948 n = SF_PAGE_SIZE;
949 ret = t4_write_flash(adap, addr, n, cfg_data);
950 if (ret)
951 goto out;
952
953 addr += SF_PAGE_SIZE;
954 cfg_data += SF_PAGE_SIZE;
955 }
956
957out:
958 if (ret)
959 dev_err(adap->pdev_dev, "config file %s failed %d\n",
960 (size == 0 ? "clear" : "download"), ret);
961 return ret;
962}
963
964/**
721 * t4_load_fw - download firmware 965 * t4_load_fw - download firmware
722 * @adap: the adapter 966 * @adap: the adapter
723 * @fw_data: the firmware image to write 967 * @fw_data: the firmware image to write
@@ -1018,9 +1262,9 @@ static void sge_intr_handler(struct adapter *adapter)
1018 { ERR_INVALID_CIDX_INC, 1262 { ERR_INVALID_CIDX_INC,
1019 "SGE GTS CIDX increment too large", -1, 0 }, 1263 "SGE GTS CIDX increment too large", -1, 0 },
1020 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1264 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1021 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1265 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1022 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1266 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1023 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1267 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1024 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1268 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1025 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1269 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1026 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1270 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1520,7 +1764,7 @@ void t4_intr_enable(struct adapter *adapter)
1520 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 1764 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1521 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 1765 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1522 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 1766 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1523 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT | 1767 DBFIFO_HP_INT | DBFIFO_LP_INT |
1524 EGRESS_SIZE_ERR); 1768 EGRESS_SIZE_ERR);
1525 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 1769 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1526 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 1770 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
@@ -1717,6 +1961,23 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1717} 1961}
1718 1962
1719/** 1963/**
1964 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1965 * @adap: the adapter
1966 * @addr: the indirect TP register address
1967 * @mask: specifies the field within the register to modify
1968 * @val: new value for the field
1969 *
1970 * Sets a field of an indirect TP register to the given value.
1971 */
1972void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1973 unsigned int mask, unsigned int val)
1974{
1975 t4_write_reg(adap, TP_PIO_ADDR, addr);
1976 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1977 t4_write_reg(adap, TP_PIO_DATA, val);
1978}
1979
1980/**
1720 * init_cong_ctrl - initialize congestion control parameters 1981 * init_cong_ctrl - initialize congestion control parameters
1721 * @a: the alpha values for congestion control 1982 * @a: the alpha values for congestion control
1722 * @b: the beta values for congestion control 1983 * @b: the beta values for congestion control
@@ -2000,9 +2261,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2000 struct fw_ldst_cmd c; 2261 struct fw_ldst_cmd c;
2001 2262
2002 memset(&c, 0, sizeof(c)); 2263 memset(&c, 0, sizeof(c));
2003 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 2264 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2004 F_FW_CMD_WRITE | 2265 FW_CMD_WRITE |
2005 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 2266 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2006 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2267 c.cycles_to_len16 = htonl(FW_LEN16(c));
2007 c.u.addrval.addr = htonl(addr); 2268 c.u.addrval.addr = htonl(addr);
2008 c.u.addrval.val = htonl(val); 2269 c.u.addrval.val = htonl(val);
@@ -2033,8 +2294,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2033 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2294 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2034 return -EINVAL; 2295 return -EINVAL;
2035 2296
2036 t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15); 2297 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2037 t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET); 2298 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2038 2299
2039 for (i = 0; i < len; i += 4) 2300 for (i = 0; i < len; i += 4)
2040 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); 2301 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
@@ -2102,39 +2363,129 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2102} 2363}
2103 2364
2104/** 2365/**
2105 * t4_fw_hello - establish communication with FW 2366 * t4_fw_hello - establish communication with FW
2106 * @adap: the adapter 2367 * @adap: the adapter
2107 * @mbox: mailbox to use for the FW command 2368 * @mbox: mailbox to use for the FW command
2108 * @evt_mbox: mailbox to receive async FW events 2369 * @evt_mbox: mailbox to receive async FW events
2109 * @master: specifies the caller's willingness to be the device master 2370 * @master: specifies the caller's willingness to be the device master
2110 * @state: returns the current device state 2371 * @state: returns the current device state (if non-NULL)
2111 * 2372 *
2112 * Issues a command to establish communication with FW. 2373 * Issues a command to establish communication with FW. Returns either
2374 * an error (negative integer) or the mailbox of the Master PF.
2113 */ 2375 */
2114int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2376int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2115 enum dev_master master, enum dev_state *state) 2377 enum dev_master master, enum dev_state *state)
2116{ 2378{
2117 int ret; 2379 int ret;
2118 struct fw_hello_cmd c; 2380 struct fw_hello_cmd c;
2381 u32 v;
2382 unsigned int master_mbox;
2383 int retries = FW_CMD_HELLO_RETRIES;
2119 2384
2385retry:
2386 memset(&c, 0, sizeof(c));
2120 INIT_CMD(c, HELLO, WRITE); 2387 INIT_CMD(c, HELLO, WRITE);
2121 c.err_to_mbasyncnot = htonl( 2388 c.err_to_mbasyncnot = htonl(
2122 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2389 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2123 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2390 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2124 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | 2391 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2125 FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); 2392 FW_HELLO_CMD_MBMASTER_MASK) |
2393 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2394 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2395 FW_HELLO_CMD_CLEARINIT);
2126 2396
2397 /*
2398 * Issue the HELLO command to the firmware. If it's not successful
2399 * but indicates that we got a "busy" or "timeout" condition, retry
2400 * the HELLO until we exhaust our retry limit.
2401 */
2127 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2402 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2128 if (ret == 0 && state) { 2403 if (ret < 0) {
2129 u32 v = ntohl(c.err_to_mbasyncnot); 2404 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2130 if (v & FW_HELLO_CMD_INIT) 2405 goto retry;
2131 *state = DEV_STATE_INIT; 2406 return ret;
2132 else if (v & FW_HELLO_CMD_ERR) 2407 }
2408
2409 v = ntohl(c.err_to_mbasyncnot);
2410 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2411 if (state) {
2412 if (v & FW_HELLO_CMD_ERR)
2133 *state = DEV_STATE_ERR; 2413 *state = DEV_STATE_ERR;
2414 else if (v & FW_HELLO_CMD_INIT)
2415 *state = DEV_STATE_INIT;
2134 else 2416 else
2135 *state = DEV_STATE_UNINIT; 2417 *state = DEV_STATE_UNINIT;
2136 } 2418 }
2137 return ret; 2419
2420 /*
2421 * If we're not the Master PF then we need to wait around for the
2422 * Master PF Driver to finish setting up the adapter.
2423 *
2424 * Note that we also do this wait if we're a non-Master-capable PF and
2425 * there is no current Master PF; a Master PF may show up momentarily
2426 * and we wouldn't want to fail pointlessly. (This can happen when an
2427 * OS loads lots of different drivers rapidly at the same time). In
2428 * this case, the Master PF returned by the firmware will be
2429 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2430 */
2431 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2432 master_mbox != mbox) {
2433 int waiting = FW_CMD_HELLO_TIMEOUT;
2434
2435 /*
2436 * Wait for the firmware to either indicate an error or
2437 * initialized state. If we see either of these we bail out
2438 * and report the issue to the caller. If we exhaust the
2439 * "hello timeout" and we haven't exhausted our retries, try
2440 * again. Otherwise bail with a timeout error.
2441 */
2442 for (;;) {
2443 u32 pcie_fw;
2444
2445 msleep(50);
2446 waiting -= 50;
2447
2448 /*
2449 * If neither Error nor Initialialized are indicated
2450 * by the firmware keep waiting till we exaust our
2451 * timeout ... and then retry if we haven't exhausted
2452 * our retries ...
2453 */
2454 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2455 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2456 if (waiting <= 0) {
2457 if (retries-- > 0)
2458 goto retry;
2459
2460 return -ETIMEDOUT;
2461 }
2462 continue;
2463 }
2464
2465 /*
2466 * We either have an Error or Initialized condition
2467 * report errors preferentially.
2468 */
2469 if (state) {
2470 if (pcie_fw & FW_PCIE_FW_ERR)
2471 *state = DEV_STATE_ERR;
2472 else if (pcie_fw & FW_PCIE_FW_INIT)
2473 *state = DEV_STATE_INIT;
2474 }
2475
2476 /*
2477 * If we arrived before a Master PF was selected and
2478 * there's not a valid Master PF, grab its identity
2479 * for our caller.
2480 */
2481 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2482 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2483 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2484 break;
2485 }
2486 }
2487
2488 return master_mbox;
2138} 2489}
2139 2490
2140/** 2491/**
@@ -2186,6 +2537,334 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2186} 2537}
2187 2538
2188/** 2539/**
2540 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2541 * @adap: the adapter
2542 * @mbox: mailbox to use for the FW RESET command (if desired)
2543 * @force: force uP into RESET even if FW RESET command fails
2544 *
2545 * Issues a RESET command to firmware (if desired) with a HALT indication
2546 * and then puts the microprocessor into RESET state. The RESET command
2547 * will only be issued if a legitimate mailbox is provided (mbox <=
2548 * FW_PCIE_FW_MASTER_MASK).
2549 *
2550 * This is generally used in order for the host to safely manipulate the
2551 * adapter without fear of conflicting with whatever the firmware might
2552 * be doing. The only way out of this state is to RESTART the firmware
2553 * ...
2554 */
2555int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2556{
2557 int ret = 0;
2558
2559 /*
2560 * If a legitimate mailbox is provided, issue a RESET command
2561 * with a HALT indication.
2562 */
2563 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2564 struct fw_reset_cmd c;
2565
2566 memset(&c, 0, sizeof(c));
2567 INIT_CMD(c, RESET, WRITE);
2568 c.val = htonl(PIORST | PIORSTMODE);
2569 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2570 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2571 }
2572
2573 /*
2574 * Normally we won't complete the operation if the firmware RESET
2575 * command fails but if our caller insists we'll go ahead and put the
2576 * uP into RESET. This can be useful if the firmware is hung or even
2577 * missing ... We'll have to take the risk of putting the uP into
2578 * RESET without the cooperation of firmware in that case.
2579 *
2580 * We also force the firmware's HALT flag to be on in case we bypassed
2581 * the firmware RESET command above or we're dealing with old firmware
2582 * which doesn't have the HALT capability. This will serve as a flag
2583 * for the incoming firmware to know that it's coming out of a HALT
2584 * rather than a RESET ... if it's new enough to understand that ...
2585 */
2586 if (ret == 0 || force) {
2587 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2588 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2589 FW_PCIE_FW_HALT);
2590 }
2591
2592 /*
2593 * And we always return the result of the firmware RESET command
2594 * even when we force the uP into RESET ...
2595 */
2596 return ret;
2597}
2598
2599/**
2600 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2601 * @adap: the adapter
2602 * @reset: if we want to do a RESET to restart things
2603 *
2604 * Restart firmware previously halted by t4_fw_halt(). On successful
2605 * return the previous PF Master remains as the new PF Master and there
2606 * is no need to issue a new HELLO command, etc.
2607 *
2608 * We do this in two ways:
2609 *
2610 * 1. If we're dealing with newer firmware we'll simply want to take
2611 * the chip's microprocessor out of RESET. This will cause the
2612 * firmware to start up from its start vector. And then we'll loop
2613 * until the firmware indicates it's started again (PCIE_FW.HALT
2614 * reset to 0) or we timeout.
2615 *
2616 * 2. If we're dealing with older firmware then we'll need to RESET
2617 * the chip since older firmware won't recognize the PCIE_FW.HALT
2618 * flag and automatically RESET itself on startup.
2619 */
2620int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2621{
2622 if (reset) {
2623 /*
2624 * Since we're directing the RESET instead of the firmware
2625 * doing it automatically, we need to clear the PCIE_FW.HALT
2626 * bit.
2627 */
2628 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2629
2630 /*
2631 * If we've been given a valid mailbox, first try to get the
2632 * firmware to do the RESET. If that works, great and we can
2633 * return success. Otherwise, if we haven't been given a
2634 * valid mailbox or the RESET command failed, fall back to
2635 * hitting the chip with a hammer.
2636 */
2637 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2638 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2639 msleep(100);
2640 if (t4_fw_reset(adap, mbox,
2641 PIORST | PIORSTMODE) == 0)
2642 return 0;
2643 }
2644
2645 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2646 msleep(2000);
2647 } else {
2648 int ms;
2649
2650 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2651 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2652 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2653 return 0;
2654 msleep(100);
2655 ms += 100;
2656 }
2657 return -ETIMEDOUT;
2658 }
2659 return 0;
2660}
2661
2662/**
2663 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2664 * @adap: the adapter
2665 * @mbox: mailbox to use for the FW RESET command (if desired)
2666 * @fw_data: the firmware image to write
2667 * @size: image size
2668 * @force: force upgrade even if firmware doesn't cooperate
2669 *
2670 * Perform all of the steps necessary for upgrading an adapter's
2671 * firmware image. Normally this requires the cooperation of the
2672 * existing firmware in order to halt all existing activities
2673 * but if an invalid mailbox token is passed in we skip that step
2674 * (though we'll still put the adapter microprocessor into RESET in
2675 * that case).
2676 *
2677 * On successful return the new firmware will have been loaded and
2678 * the adapter will have been fully RESET losing all previous setup
2679 * state. On unsuccessful return the adapter may be completely hosed ...
2680 * positive errno indicates that the adapter is ~probably~ intact, a
2681 * negative errno indicates that things are looking bad ...
2682 */
2683int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2684 const u8 *fw_data, unsigned int size, int force)
2685{
2686 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2687 int reset, ret;
2688
2689 ret = t4_fw_halt(adap, mbox, force);
2690 if (ret < 0 && !force)
2691 return ret;
2692
2693 ret = t4_load_fw(adap, fw_data, size);
2694 if (ret < 0)
2695 return ret;
2696
2697 /*
2698 * Older versions of the firmware don't understand the new
2699 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2700 * restart. So for newly loaded older firmware we'll have to do the
2701 * RESET for it so it starts up on a clean slate. We can tell if
2702 * the newly loaded firmware will handle this right by checking
2703 * its header flags to see if it advertises the capability.
2704 */
2705 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2706 return t4_fw_restart(adap, mbox, reset);
2707}
2708
2709
2710/**
2711 * t4_fw_config_file - setup an adapter via a Configuration File
2712 * @adap: the adapter
2713 * @mbox: mailbox to use for the FW command
2714 * @mtype: the memory type where the Configuration File is located
2715 * @maddr: the memory address where the Configuration File is located
2716 * @finiver: return value for CF [fini] version
2717 * @finicsum: return value for CF [fini] checksum
2718 * @cfcsum: return value for CF computed checksum
2719 *
2720 * Issue a command to get the firmware to process the Configuration
2721 * File located at the specified mtype/maddress. If the Configuration
2722 * File is processed successfully and return value pointers are
2723 * provided, the Configuration File "[fini] section version and
2724 * checksum values will be returned along with the computed checksum.
2725 * It's up to the caller to decide how it wants to respond to the
2726 * checksums not matching but it recommended that a prominant warning
2727 * be emitted in order to help people rapidly identify changed or
2728 * corrupted Configuration Files.
2729 *
2730 * Also note that it's possible to modify things like "niccaps",
2731 * "toecaps",etc. between processing the Configuration File and telling
2732 * the firmware to use the new configuration. Callers which want to
2733 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2734 * Configuration Files if they want to do this.
2735 */
2736int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2737 unsigned int mtype, unsigned int maddr,
2738 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2739{
2740 struct fw_caps_config_cmd caps_cmd;
2741 int ret;
2742
2743 /*
2744 * Tell the firmware to process the indicated Configuration File.
2745 * If there are no errors and the caller has provided return value
2746 * pointers for the [fini] section version, checksum and computed
2747 * checksum, pass those back to the caller.
2748 */
2749 memset(&caps_cmd, 0, sizeof(caps_cmd));
2750 caps_cmd.op_to_write =
2751 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2752 FW_CMD_REQUEST |
2753 FW_CMD_READ);
2754 caps_cmd.retval_len16 =
2755 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2756 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2757 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2758 FW_LEN16(caps_cmd));
2759 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2760 if (ret < 0)
2761 return ret;
2762
2763 if (finiver)
2764 *finiver = ntohl(caps_cmd.finiver);
2765 if (finicsum)
2766 *finicsum = ntohl(caps_cmd.finicsum);
2767 if (cfcsum)
2768 *cfcsum = ntohl(caps_cmd.cfcsum);
2769
2770 /*
2771 * And now tell the firmware to use the configuration we just loaded.
2772 */
2773 caps_cmd.op_to_write =
2774 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2775 FW_CMD_REQUEST |
2776 FW_CMD_WRITE);
2777 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2778 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2779}
2780
2781/**
2782 * t4_fixup_host_params - fix up host-dependent parameters
2783 * @adap: the adapter
2784 * @page_size: the host's Base Page Size
2785 * @cache_line_size: the host's Cache Line Size
2786 *
2787 * Various registers in T4 contain values which are dependent on the
2788 * host's Base Page and Cache Line Sizes. This function will fix all of
2789 * those registers with the appropriate values as passed in ...
2790 */
2791int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2792 unsigned int cache_line_size)
2793{
2794 unsigned int page_shift = fls(page_size) - 1;
2795 unsigned int sge_hps = page_shift - 10;
2796 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2797 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2798 unsigned int fl_align_log = fls(fl_align) - 1;
2799
2800 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2801 HOSTPAGESIZEPF0(sge_hps) |
2802 HOSTPAGESIZEPF1(sge_hps) |
2803 HOSTPAGESIZEPF2(sge_hps) |
2804 HOSTPAGESIZEPF3(sge_hps) |
2805 HOSTPAGESIZEPF4(sge_hps) |
2806 HOSTPAGESIZEPF5(sge_hps) |
2807 HOSTPAGESIZEPF6(sge_hps) |
2808 HOSTPAGESIZEPF7(sge_hps));
2809
2810 t4_set_reg_field(adap, SGE_CONTROL,
2811 INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
2812 EGRSTATUSPAGESIZE_MASK,
2813 INGPADBOUNDARY(fl_align_log - 5) |
2814 EGRSTATUSPAGESIZE(stat_len != 64));
2815
2816 /*
2817 * Adjust various SGE Free List Host Buffer Sizes.
2818 *
2819 * This is something of a crock since we're using fixed indices into
2820 * the array which are also known by the sge.c code and the T4
2821 * Firmware Configuration File. We need to come up with a much better
2822 * approach to managing this array. For now, the first four entries
2823 * are:
2824 *
2825 * 0: Host Page Size
2826 * 1: 64KB
2827 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2828 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2829 *
2830 * For the single-MTU buffers in unpacked mode we need to include
2831 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2832 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2833 * Padding boundry. All of these are accommodated in the Factory
2834 * Default Firmware Configuration File but we need to adjust it for
2835 * this host's cache line size.
2836 */
2837 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2838 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2839 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2840 & ~(fl_align-1));
2841 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2842 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2843 & ~(fl_align-1));
2844
2845 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2846
2847 return 0;
2848}
2849
2850/**
2851 * t4_fw_initialize - ask FW to initialize the device
2852 * @adap: the adapter
2853 * @mbox: mailbox to use for the FW command
2854 *
2855 * Issues a command to FW to partially initialize the device. This
2856 * performs initialization that generally doesn't depend on user input.
2857 */
2858int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2859{
2860 struct fw_initialize_cmd c;
2861
2862 memset(&c, 0, sizeof(c));
2863 INIT_CMD(c, INITIALIZE, WRITE);
2864 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2865}
2866
2867/**
2189 * t4_query_params - query FW or device parameters 2868 * t4_query_params - query FW or device parameters
2190 * @adap: the adapter 2869 * @adap: the adapter
2191 * @mbox: mailbox to use for the FW command 2870 * @mbox: mailbox to use for the FW command
@@ -2835,10 +3514,6 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
2835 return ret; 3514 return ret;
2836 } 3515 }
2837 3516
2838 ret = get_vpd_params(adapter, &adapter->params.vpd);
2839 if (ret < 0)
2840 return ret;
2841
2842 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3517 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2843 3518
2844 /* 3519 /*
@@ -2846,6 +3521,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
2846 */ 3521 */
2847 adapter->params.nports = 1; 3522 adapter->params.nports = 1;
2848 adapter->params.portvec = 1; 3523 adapter->params.portvec = 1;
3524 adapter->params.vpd.cclk = 50000;
2849 return 0; 3525 return 0;
2850} 3526}
2851 3527
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c26b455f37de..f534ed7e10e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -58,6 +58,7 @@ enum {
58 58
59enum { 59enum {
60 SF_PAGE_SIZE = 256, /* serial flash page size */ 60 SF_PAGE_SIZE = 256, /* serial flash page size */
61 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61}; 62};
62 63
63enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ 64enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -137,4 +138,83 @@ struct rsp_ctrl {
137#define QINTR_CNT_EN 0x1 138#define QINTR_CNT_EN 0x1
138#define QINTR_TIMER_IDX(x) ((x) << 1) 139#define QINTR_TIMER_IDX(x) ((x) << 1)
139#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) 140#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
141
142/*
143 * Flash layout.
144 */
145#define FLASH_START(start) ((start) * SF_SEC_SIZE)
146#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
147
148enum {
149 /*
150 * Various Expansion-ROM boot images, etc.
151 */
152 FLASH_EXP_ROM_START_SEC = 0,
153 FLASH_EXP_ROM_NSECS = 6,
154 FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
155 FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
156
157 /*
158 * iSCSI Boot Firmware Table (iBFT) and other driver-related
159 * parameters ...
160 */
161 FLASH_IBFT_START_SEC = 6,
162 FLASH_IBFT_NSECS = 1,
163 FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
164 FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
165
166 /*
167 * Boot configuration data.
168 */
169 FLASH_BOOTCFG_START_SEC = 7,
170 FLASH_BOOTCFG_NSECS = 1,
171 FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
172 FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
173
174 /*
175 * Location of firmware image in FLASH.
176 */
177 FLASH_FW_START_SEC = 8,
178 FLASH_FW_NSECS = 8,
179 FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
180 FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
181
182 /*
183 * iSCSI persistent/crash information.
184 */
185 FLASH_ISCSI_CRASH_START_SEC = 29,
186 FLASH_ISCSI_CRASH_NSECS = 1,
187 FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
188 FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
189
190 /*
191 * FCoE persistent/crash information.
192 */
193 FLASH_FCOE_CRASH_START_SEC = 30,
194 FLASH_FCOE_CRASH_NSECS = 1,
195 FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
196 FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
197
198 /*
199 * Location of Firmware Configuration File in FLASH. Since the FPGA
200 * "FLASH" is smaller we need to store the Configuration File in a
201 * different location -- which will overlap the end of the firmware
202 * image if firmware ever gets that large ...
203 */
204 FLASH_CFG_START_SEC = 31,
205 FLASH_CFG_NSECS = 1,
206 FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
207 FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
208
209 FLASH_FPGA_CFG_START_SEC = 15,
210 FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
211
212 /*
213 * Sectors 32-63 are reserved for FLASH failover.
214 */
215};
216
217#undef FLASH_START
218#undef FLASH_MAX_SIZE
219
140#endif /* __T4_HW_H */ 220#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 111fc323f155..a1a8b57200f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -86,10 +86,17 @@
86#define CIDXINC_SHIFT 0 86#define CIDXINC_SHIFT 0
87#define CIDXINC(x) ((x) << CIDXINC_SHIFT) 87#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
88 88
89#define X_RXPKTCPLMODE_SPLIT 1
90#define X_INGPADBOUNDARY_SHIFT 5
91
89#define SGE_CONTROL 0x1008 92#define SGE_CONTROL 0x1008
90#define DCASYSTYPE 0x00080000U 93#define DCASYSTYPE 0x00080000U
91#define RXPKTCPLMODE 0x00040000U 94#define RXPKTCPLMODE_MASK 0x00040000U
92#define EGRSTATUSPAGESIZE 0x00020000U 95#define RXPKTCPLMODE_SHIFT 18
96#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT)
97#define EGRSTATUSPAGESIZE_MASK 0x00020000U
98#define EGRSTATUSPAGESIZE_SHIFT 17
99#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT)
93#define PKTSHIFT_MASK 0x00001c00U 100#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10 101#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 102#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
@@ -108,6 +115,35 @@
108#define GLOBALENABLE 0x00000001U 115#define GLOBALENABLE 0x00000001U
109 116
110#define SGE_HOST_PAGE_SIZE 0x100c 117#define SGE_HOST_PAGE_SIZE 0x100c
118
119#define HOSTPAGESIZEPF7_MASK 0x0000000fU
120#define HOSTPAGESIZEPF7_SHIFT 28
121#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
122
123#define HOSTPAGESIZEPF6_MASK 0x0000000fU
124#define HOSTPAGESIZEPF6_SHIFT 24
125#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT)
126
127#define HOSTPAGESIZEPF5_MASK 0x0000000fU
128#define HOSTPAGESIZEPF5_SHIFT 20
129#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT)
130
131#define HOSTPAGESIZEPF4_MASK 0x0000000fU
132#define HOSTPAGESIZEPF4_SHIFT 16
133#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT)
134
135#define HOSTPAGESIZEPF3_MASK 0x0000000fU
136#define HOSTPAGESIZEPF3_SHIFT 12
137#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT)
138
139#define HOSTPAGESIZEPF2_MASK 0x0000000fU
140#define HOSTPAGESIZEPF2_SHIFT 8
141#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
142
143#define HOSTPAGESIZEPF1_MASK 0x0000000fU
144#define HOSTPAGESIZEPF1_SHIFT 4
145#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT)
146
111#define HOSTPAGESIZEPF0_MASK 0x0000000fU 147#define HOSTPAGESIZEPF0_MASK 0x0000000fU
112#define HOSTPAGESIZEPF0_SHIFT 0 148#define HOSTPAGESIZEPF0_SHIFT 0
113#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) 149#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
@@ -155,6 +191,8 @@
155#define SGE_INT_ENABLE3 0x1040 191#define SGE_INT_ENABLE3 0x1040
156#define SGE_FL_BUFFER_SIZE0 0x1044 192#define SGE_FL_BUFFER_SIZE0 0x1044
157#define SGE_FL_BUFFER_SIZE1 0x1048 193#define SGE_FL_BUFFER_SIZE1 0x1048
194#define SGE_FL_BUFFER_SIZE2 0x104c
195#define SGE_FL_BUFFER_SIZE3 0x1050
158#define SGE_INGRESS_RX_THRESHOLD 0x10a0 196#define SGE_INGRESS_RX_THRESHOLD 0x10a0
159#define THRESHOLD_0_MASK 0x3f000000U 197#define THRESHOLD_0_MASK 0x3f000000U
160#define THRESHOLD_0_SHIFT 24 198#define THRESHOLD_0_SHIFT 24
@@ -173,6 +211,12 @@
173#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) 211#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
174#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) 212#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
175 213
214#define SGE_CONM_CTRL 0x1094
215#define EGRTHRESHOLD_MASK 0x00003f00U
216#define EGRTHRESHOLDshift 8
217#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
218#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
219
176#define SGE_TIMER_VALUE_0_AND_1 0x10b8 220#define SGE_TIMER_VALUE_0_AND_1 0x10b8
177#define TIMERVALUE0_MASK 0xffff0000U 221#define TIMERVALUE0_MASK 0xffff0000U
178#define TIMERVALUE0_SHIFT 16 222#define TIMERVALUE0_SHIFT 16
@@ -184,64 +228,54 @@
184#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) 228#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
185 229
186#define SGE_TIMER_VALUE_2_AND_3 0x10bc 230#define SGE_TIMER_VALUE_2_AND_3 0x10bc
231#define TIMERVALUE2_MASK 0xffff0000U
232#define TIMERVALUE2_SHIFT 16
233#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT)
234#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
235#define TIMERVALUE3_MASK 0x0000ffffU
236#define TIMERVALUE3_SHIFT 0
237#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT)
238#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
239
187#define SGE_TIMER_VALUE_4_AND_5 0x10c0 240#define SGE_TIMER_VALUE_4_AND_5 0x10c0
241#define TIMERVALUE4_MASK 0xffff0000U
242#define TIMERVALUE4_SHIFT 16
243#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT)
244#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
245#define TIMERVALUE5_MASK 0x0000ffffU
246#define TIMERVALUE5_SHIFT 0
247#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT)
248#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
249
188#define SGE_DEBUG_INDEX 0x10cc 250#define SGE_DEBUG_INDEX 0x10cc
189#define SGE_DEBUG_DATA_HIGH 0x10d0 251#define SGE_DEBUG_DATA_HIGH 0x10d0
190#define SGE_DEBUG_DATA_LOW 0x10d4 252#define SGE_DEBUG_DATA_LOW 0x10d4
191#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 253#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
192 254
193#define S_LP_INT_THRESH 12
194#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
195#define S_HP_INT_THRESH 28 255#define S_HP_INT_THRESH 28
256#define M_HP_INT_THRESH 0xfU
196#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) 257#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
258#define M_HP_COUNT 0x7ffU
259#define S_HP_COUNT 16
260#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
261#define S_LP_INT_THRESH 12
262#define M_LP_INT_THRESH 0xfU
263#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
264#define M_LP_COUNT 0x7ffU
265#define S_LP_COUNT 0
266#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
197#define A_SGE_DBFIFO_STATUS 0x10a4 267#define A_SGE_DBFIFO_STATUS 0x10a4
198 268
199#define S_ENABLE_DROP 13 269#define S_ENABLE_DROP 13
200#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) 270#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
201#define F_ENABLE_DROP V_ENABLE_DROP(1U) 271#define F_ENABLE_DROP V_ENABLE_DROP(1U)
202#define A_SGE_DOORBELL_CONTROL 0x10a8
203
204#define A_SGE_CTXT_CMD 0x11fc
205#define A_SGE_DBQ_CTXT_BADDR 0x1084
206
207#define A_SGE_PF_KDOORBELL 0x0
208
209#define S_QID 15
210#define V_QID(x) ((x) << S_QID)
211
212#define S_PIDX 0
213#define V_PIDX(x) ((x) << S_PIDX)
214
215#define M_LP_COUNT 0x7ffU
216#define S_LP_COUNT 0
217#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
218
219#define M_HP_COUNT 0x7ffU
220#define S_HP_COUNT 16
221#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
222
223#define A_SGE_INT_ENABLE3 0x1040
224
225#define S_DBFIFO_HP_INT 8
226#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
227#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
228
229#define S_DBFIFO_LP_INT 7
230#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
231#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
232
233#define S_DROPPED_DB 0 272#define S_DROPPED_DB 0
234#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) 273#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
235#define F_DROPPED_DB V_DROPPED_DB(1U) 274#define F_DROPPED_DB V_DROPPED_DB(1U)
275#define A_SGE_DOORBELL_CONTROL 0x10a8
236 276
237#define S_ERR_DROPPED_DB 18 277#define A_SGE_CTXT_CMD 0x11fc
238#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) 278#define A_SGE_DBQ_CTXT_BADDR 0x1084
239#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
240
241#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
242
243#define M_HP_INT_THRESH 0xfU
244#define M_LP_INT_THRESH 0xfU
245 279
246#define PCIE_PF_CLI 0x44 280#define PCIE_PF_CLI 0x44
247#define PCIE_INT_CAUSE 0x3004 281#define PCIE_INT_CAUSE 0x3004
@@ -287,6 +321,8 @@
287#define WINDOW(x) ((x) << WINDOW_SHIFT) 321#define WINDOW(x) ((x) << WINDOW_SHIFT)
288#define PCIE_MEM_ACCESS_OFFSET 0x306c 322#define PCIE_MEM_ACCESS_OFFSET 0x306c
289 323
324#define PCIE_FW 0x30b8
325
290#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 326#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
291#define RNPP 0x80000000U 327#define RNPP 0x80000000U
292#define RPCP 0x20000000U 328#define RPCP 0x20000000U
@@ -364,7 +400,7 @@
364#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU 400#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
365#define MEM_WRAP_CLIENT_NUM_SHIFT 0 401#define MEM_WRAP_CLIENT_NUM_SHIFT 0
366#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) 402#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
367 403#define MA_PCIE_FW 0x30b8
368#define MA_PARITY_ERROR_STATUS 0x77f4 404#define MA_PARITY_ERROR_STATUS 0x77f4
369 405
370#define EDC_0_BASE_ADDR 0x7900 406#define EDC_0_BASE_ADDR 0x7900
@@ -385,6 +421,7 @@
385 421
386#define CIM_BOOT_CFG 0x7b00 422#define CIM_BOOT_CFG 0x7b00
387#define BOOTADDR_MASK 0xffffff00U 423#define BOOTADDR_MASK 0xffffff00U
424#define UPCRST 0x1U
388 425
389#define CIM_PF_MAILBOX_DATA 0x240 426#define CIM_PF_MAILBOX_DATA 0x240
390#define CIM_PF_MAILBOX_CTRL 0x280 427#define CIM_PF_MAILBOX_CTRL 0x280
@@ -457,6 +494,13 @@
457#define VLANEXTENABLE_MASK 0x0000f000U 494#define VLANEXTENABLE_MASK 0x0000f000U
458#define VLANEXTENABLE_SHIFT 12 495#define VLANEXTENABLE_SHIFT 12
459 496
497#define TP_GLOBAL_CONFIG 0x7d08
498#define FIVETUPLELOOKUP_SHIFT 17
499#define FIVETUPLELOOKUP_MASK 0x00060000U
500#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT)
501#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
502 FIVETUPLELOOKUP_SHIFT)
503
460#define TP_PARA_REG2 0x7d68 504#define TP_PARA_REG2 0x7d68
461#define MAXRXDATA_MASK 0xffff0000U 505#define MAXRXDATA_MASK 0xffff0000U
462#define MAXRXDATA_SHIFT 16 506#define MAXRXDATA_SHIFT 16
@@ -466,8 +510,47 @@
466#define TIMERRESOLUTION_MASK 0x00ff0000U 510#define TIMERRESOLUTION_MASK 0x00ff0000U
467#define TIMERRESOLUTION_SHIFT 16 511#define TIMERRESOLUTION_SHIFT 16
468#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) 512#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
513#define DELAYEDACKRESOLUTION_MASK 0x000000ffU
514#define DELAYEDACKRESOLUTION_SHIFT 0
515#define DELAYEDACKRESOLUTION_GET(x) \
516 (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
469 517
470#define TP_SHIFT_CNT 0x7dc0 518#define TP_SHIFT_CNT 0x7dc0
519#define SYNSHIFTMAX_SHIFT 24
520#define SYNSHIFTMAX_MASK 0xff000000U
521#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT)
522#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \
523 SYNSHIFTMAX_SHIFT)
524#define RXTSHIFTMAXR1_SHIFT 20
525#define RXTSHIFTMAXR1_MASK 0x00f00000U
526#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT)
527#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \
528 RXTSHIFTMAXR1_SHIFT)
529#define RXTSHIFTMAXR2_SHIFT 16
530#define RXTSHIFTMAXR2_MASK 0x000f0000U
531#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT)
532#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \
533 RXTSHIFTMAXR2_SHIFT)
534#define PERSHIFTBACKOFFMAX_SHIFT 12
535#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U
536#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT)
537#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
538 PERSHIFTBACKOFFMAX_SHIFT)
539#define PERSHIFTMAX_SHIFT 8
540#define PERSHIFTMAX_MASK 0x00000f00U
541#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT)
542#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \
543 PERSHIFTMAX_SHIFT)
544#define KEEPALIVEMAXR1_SHIFT 4
545#define KEEPALIVEMAXR1_MASK 0x000000f0U
546#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT)
547#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \
548 KEEPALIVEMAXR1_SHIFT)
549#define KEEPALIVEMAXR2_SHIFT 0
550#define KEEPALIVEMAXR2_MASK 0x0000000fU
551#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT)
552#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \
553 KEEPALIVEMAXR2_SHIFT)
471 554
472#define TP_CCTRL_TABLE 0x7ddc 555#define TP_CCTRL_TABLE 0x7ddc
473#define TP_MTU_TABLE 0x7de4 556#define TP_MTU_TABLE 0x7de4
@@ -501,6 +584,20 @@
501#define TP_INT_CAUSE 0x7e74 584#define TP_INT_CAUSE 0x7e74
502#define FLMTXFLSTEMPTY 0x40000000U 585#define FLMTXFLSTEMPTY 0x40000000U
503 586
587#define TP_VLAN_PRI_MAP 0x140
588#define FRAGMENTATION_SHIFT 9
589#define FRAGMENTATION_MASK 0x00000200U
590#define MPSHITTYPE_MASK 0x00000100U
591#define MACMATCH_MASK 0x00000080U
592#define ETHERTYPE_MASK 0x00000040U
593#define PROTOCOL_MASK 0x00000020U
594#define TOS_MASK 0x00000010U
595#define VLAN_MASK 0x00000008U
596#define VNIC_ID_MASK 0x00000004U
597#define PORT_MASK 0x00000002U
598#define FCOE_SHIFT 0
599#define FCOE_MASK 0x00000001U
600
504#define TP_INGRESS_CONFIG 0x141 601#define TP_INGRESS_CONFIG 0x141
505#define VNIC 0x00000800U 602#define VNIC 0x00000800U
506#define CSUM_HAS_PSEUDO_HDR 0x00000400U 603#define CSUM_HAS_PSEUDO_HDR 0x00000400U
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ad53f796b574..a6364632b490 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -79,6 +79,8 @@ struct fw_wr_hdr {
79#define FW_WR_FLOWID(x) ((x) << 8) 79#define FW_WR_FLOWID(x) ((x) << 8)
80#define FW_WR_LEN16(x) ((x) << 0) 80#define FW_WR_LEN16(x) ((x) << 0)
81 81
82#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
83
82struct fw_ulptx_wr { 84struct fw_ulptx_wr {
83 __be32 op_to_compl; 85 __be32 op_to_compl;
84 __be32 flowid_len16; 86 __be32 flowid_len16;
@@ -155,6 +157,17 @@ struct fw_eth_tx_pkt_vm_wr {
155 157
156#define FW_CMD_MAX_TIMEOUT 3000 158#define FW_CMD_MAX_TIMEOUT 3000
157 159
160/*
161 * If a host driver does a HELLO and discovers that there's already a MASTER
162 * selected, we may have to wait for that MASTER to finish issuing RESET,
163 * configuration and INITIALIZE commands. Also, there's a possibility that
164 * our own HELLO may get lost if it happens right as the MASTER is issuign a
165 * RESET command, so we need to be willing to make a few retries of our HELLO.
166 */
167#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT)
168#define FW_CMD_HELLO_RETRIES 3
169
170
158enum fw_cmd_opcodes { 171enum fw_cmd_opcodes {
159 FW_LDST_CMD = 0x01, 172 FW_LDST_CMD = 0x01,
160 FW_RESET_CMD = 0x03, 173 FW_RESET_CMD = 0x03,
@@ -304,7 +317,17 @@ struct fw_reset_cmd {
304 __be32 op_to_write; 317 __be32 op_to_write;
305 __be32 retval_len16; 318 __be32 retval_len16;
306 __be32 val; 319 __be32 val;
307 __be32 r3; 320 __be32 halt_pkd;
321};
322
323#define FW_RESET_CMD_HALT_SHIFT 31
324#define FW_RESET_CMD_HALT_MASK 0x1
325#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT)
326#define FW_RESET_CMD_HALT_GET(x) \
327 (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
328
329enum fw_hellow_cmd {
330 fw_hello_cmd_stage_os = 0x0
308}; 331};
309 332
310struct fw_hello_cmd { 333struct fw_hello_cmd {
@@ -315,8 +338,14 @@ struct fw_hello_cmd {
315#define FW_HELLO_CMD_INIT (1U << 30) 338#define FW_HELLO_CMD_INIT (1U << 30)
316#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) 339#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
317#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) 340#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
318#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) 341#define FW_HELLO_CMD_MBMASTER_MASK 0xfU
342#define FW_HELLO_CMD_MBMASTER_SHIFT 24
343#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
344#define FW_HELLO_CMD_MBMASTER_GET(x) \
345 (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
319#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) 346#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
347#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
348#define FW_HELLO_CMD_CLEARINIT (1U << 16)
320 __be32 fwrev; 349 __be32 fwrev;
321}; 350};
322 351
@@ -401,6 +430,14 @@ enum fw_caps_config_fcoe {
401 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, 430 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
402}; 431};
403 432
433enum fw_memtype_cf {
434 FW_MEMTYPE_CF_EDC0 = 0x0,
435 FW_MEMTYPE_CF_EDC1 = 0x1,
436 FW_MEMTYPE_CF_EXTMEM = 0x2,
437 FW_MEMTYPE_CF_FLASH = 0x4,
438 FW_MEMTYPE_CF_INTERNAL = 0x5,
439};
440
404struct fw_caps_config_cmd { 441struct fw_caps_config_cmd {
405 __be32 op_to_write; 442 __be32 op_to_write;
406 __be32 retval_len16; 443 __be32 retval_len16;
@@ -416,10 +453,15 @@ struct fw_caps_config_cmd {
416 __be16 r4; 453 __be16 r4;
417 __be16 iscsicaps; 454 __be16 iscsicaps;
418 __be16 fcoecaps; 455 __be16 fcoecaps;
419 __be32 r5; 456 __be32 cfcsum;
420 __be64 r6; 457 __be32 finiver;
458 __be32 finicsum;
421}; 459};
422 460
461#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27)
462#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24)
463#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
464
423/* 465/*
424 * params command mnemonics 466 * params command mnemonics
425 */ 467 */
@@ -451,6 +493,7 @@ enum fw_params_param_dev {
451 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, 493 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
452 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 494 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
453 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 495 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
496 FW_PARAMS_PARAM_DEV_CF = 0x0D,
454}; 497};
455 498
456/* 499/*
@@ -492,6 +535,8 @@ enum fw_params_param_pfvf {
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, 535 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, 536 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, 537 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
538 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
539 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
495}; 540};
496 541
497/* 542/*
@@ -507,8 +552,16 @@ enum fw_params_param_dmaq {
507 552
508#define FW_PARAMS_MNEM(x) ((x) << 24) 553#define FW_PARAMS_MNEM(x) ((x) << 24)
509#define FW_PARAMS_PARAM_X(x) ((x) << 16) 554#define FW_PARAMS_PARAM_X(x) ((x) << 16)
510#define FW_PARAMS_PARAM_Y(x) ((x) << 8) 555#define FW_PARAMS_PARAM_Y_SHIFT 8
511#define FW_PARAMS_PARAM_Z(x) ((x) << 0) 556#define FW_PARAMS_PARAM_Y_MASK 0xffU
557#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT)
558#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
559 FW_PARAMS_PARAM_Y_MASK)
560#define FW_PARAMS_PARAM_Z_SHIFT 0
561#define FW_PARAMS_PARAM_Z_MASK 0xffu
562#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT)
563#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
564 FW_PARAMS_PARAM_Z_MASK)
512#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) 565#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
513#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) 566#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
514 567
@@ -1599,6 +1652,16 @@ struct fw_debug_cmd {
1599 } u; 1652 } u;
1600}; 1653};
1601 1654
1655#define FW_PCIE_FW_ERR (1U << 31)
1656#define FW_PCIE_FW_INIT (1U << 30)
1657#define FW_PCIE_FW_HALT (1U << 29)
1658#define FW_PCIE_FW_MASTER_VLD (1U << 15)
1659#define FW_PCIE_FW_MASTER_MASK 0x7
1660#define FW_PCIE_FW_MASTER_SHIFT 12
1661#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
1662#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
1663 FW_PCIE_FW_MASTER_MASK)
1664
1602struct fw_hdr { 1665struct fw_hdr {
1603 u8 ver; 1666 u8 ver;
1604 u8 reserved1; 1667 u8 reserved1;
@@ -1613,7 +1676,11 @@ struct fw_hdr {
1613 u8 intfver_iscsi; 1676 u8 intfver_iscsi;
1614 u8 intfver_fcoe; 1677 u8 intfver_fcoe;
1615 u8 reserved2; 1678 u8 reserved2;
1616 __be32 reserved3[27]; 1679 __u32 reserved3;
1680 __u32 reserved4;
1681 __u32 reserved5;
1682 __be32 flags;
1683 __be32 reserved6[23];
1617}; 1684};
1618 1685
1619#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) 1686#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
@@ -1621,18 +1688,8 @@ struct fw_hdr {
1621#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) 1688#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
1622#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) 1689#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
1623 1690
1624#define S_FW_CMD_OP 24 1691enum fw_hdr_flags {
1625#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) 1692 FW_HDR_FLAGS_RESET_HALT = 0x00000001,
1626 1693};
1627#define S_FW_CMD_REQUEST 23
1628#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
1629#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
1630
1631#define S_FW_CMD_WRITE 21
1632#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
1633#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
1634
1635#define S_FW_LDST_CMD_ADDRSPACE 0
1636#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
1637 1694
1638#endif /* _T4FW_INTERFACE_H_ */ 1695#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8877fbfefb63..f16745f4b36b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter)
2421 fl0, fl1); 2421 fl0, fl1);
2422 return -EINVAL; 2422 return -EINVAL;
2423 } 2423 }
2424 if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { 2424 if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
2425 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2425 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2426 return -EINVAL; 2426 return -EINVAL;
2427 } 2427 }
@@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter)
2431 */ 2431 */
2432 if (fl1) 2432 if (fl1)
2433 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2433 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
2434 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); 2434 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2435 ? 128 : 64);
2435 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2436 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
2436 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2437 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2437 SGE_INGPADBOUNDARY_SHIFT); 2438 SGE_INGPADBOUNDARY_SHIFT);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d266c86a53f7..cf4c05bdf5fe 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
110#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 110#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
111#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 111#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
112 112
113#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
113#define FW_VER_LEN 32 114#define FW_VER_LEN 32
114 115
115struct be_dma_mem { 116struct be_dma_mem {
@@ -336,7 +337,6 @@ struct phy_info {
336 u16 auto_speeds_supported; 337 u16 auto_speeds_supported;
337 u16 fixed_speeds_supported; 338 u16 fixed_speeds_supported;
338 int link_speed; 339 int link_speed;
339 int forced_port_speed;
340 u32 dac_cable_len; 340 u32 dac_cable_len;
341 u32 advertising; 341 u32 advertising;
342 u32 supported; 342 u32 supported;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8c63d06ab12b..af60bb26e330 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
120 120
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 dev_warn(&adapter->pdev->dev, 122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n", 123 "VF is not privileged to issue opcode %d-%d\n",
124 opcode, subsystem); 124 opcode, subsystem);
125 } else { 125 } else {
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -165,14 +165,13 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
165 } 165 }
166} 166}
167 167
168/* Grp5 QOS Speed evt */ 168/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
169static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 169static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
170 struct be_async_event_grp5_qos_link_speed *evt) 170 struct be_async_event_grp5_qos_link_speed *evt)
171{ 171{
172 if (evt->physical_port == adapter->port_num) { 172 if (adapter->phy.link_speed >= 0 &&
173 /* qos_link_speed is in units of 10 Mbps */ 173 evt->physical_port == adapter->port_num)
174 adapter->phy.link_speed = evt->qos_link_speed * 10; 174 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
175 }
176} 175}
177 176
178/*Grp5 PVID evt*/ 177/*Grp5 PVID evt*/
@@ -717,7 +716,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
717 716
718/* Use MCC */ 717/* Use MCC */
719int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 718int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
720 u8 type, bool permanent, u32 if_handle, u32 pmac_id) 719 bool permanent, u32 if_handle, u32 pmac_id)
721{ 720{
722 struct be_mcc_wrb *wrb; 721 struct be_mcc_wrb *wrb;
723 struct be_cmd_req_mac_query *req; 722 struct be_cmd_req_mac_query *req;
@@ -734,7 +733,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
734 733
735 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 734 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
736 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 735 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
737 req->type = type; 736 req->type = MAC_ADDRESS_TYPE_NETWORK;
738 if (permanent) { 737 if (permanent) {
739 req->permanent = 1; 738 req->permanent = 1;
740 } else { 739 } else {
@@ -1326,9 +1325,28 @@ err:
1326 return status; 1325 return status;
1327} 1326}
1328 1327
1329/* Uses synchronous mcc */ 1328static int be_mac_to_link_speed(int mac_speed)
1330int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1329{
1331 u16 *link_speed, u8 *link_status, u32 dom) 1330 switch (mac_speed) {
1331 case PHY_LINK_SPEED_ZERO:
1332 return 0;
1333 case PHY_LINK_SPEED_10MBPS:
1334 return 10;
1335 case PHY_LINK_SPEED_100MBPS:
1336 return 100;
1337 case PHY_LINK_SPEED_1GBPS:
1338 return 1000;
1339 case PHY_LINK_SPEED_10GBPS:
1340 return 10000;
1341 }
1342 return 0;
1343}
1344
1345/* Uses synchronous mcc
1346 * Returns link_speed in Mbps
1347 */
1348int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1349 u8 *link_status, u32 dom)
1332{ 1350{
1333 struct be_mcc_wrb *wrb; 1351 struct be_mcc_wrb *wrb;
1334 struct be_cmd_req_link_status *req; 1352 struct be_cmd_req_link_status *req;
@@ -1357,11 +1375,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1357 status = be_mcc_notify_wait(adapter); 1375 status = be_mcc_notify_wait(adapter);
1358 if (!status) { 1376 if (!status) {
1359 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1377 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1360 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { 1378 if (link_speed) {
1361 if (link_speed) 1379 *link_speed = resp->link_speed ?
1362 *link_speed = le16_to_cpu(resp->link_speed); 1380 le16_to_cpu(resp->link_speed) * 10 :
1363 if (mac_speed) 1381 be_mac_to_link_speed(resp->mac_speed);
1364 *mac_speed = resp->mac_speed; 1382
1383 if (!resp->logical_link_status)
1384 *link_speed = 0;
1365 } 1385 }
1366 if (link_status) 1386 if (link_status)
1367 *link_status = resp->logical_link_status; 1387 *link_status = resp->logical_link_status;
@@ -2405,6 +2425,9 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
2405 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2425 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2406 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2426 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2407 CAPABILITY_BE3_NATIVE_ERX_API; 2427 CAPABILITY_BE3_NATIVE_ERX_API;
2428 if (!adapter->be3_native)
2429 dev_warn(&adapter->pdev->dev,
2430 "adapter not in advanced mode\n");
2408 } 2431 }
2409err: 2432err:
2410 mutex_unlock(&adapter->mbox_lock); 2433 mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 250f19b5f7b6..0936e21e3cff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1687,7 +1687,7 @@ struct be_cmd_req_set_ext_fat_caps {
1687extern int be_pci_fnum_get(struct be_adapter *adapter); 1687extern int be_pci_fnum_get(struct be_adapter *adapter);
1688extern int be_fw_wait_ready(struct be_adapter *adapter); 1688extern int be_fw_wait_ready(struct be_adapter *adapter);
1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1690 u8 type, bool permanent, u32 if_handle, u32 pmac_id); 1690 bool permanent, u32 if_handle, u32 pmac_id);
1691extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1691extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1692 u32 if_id, u32 *pmac_id, u32 domain); 1692 u32 if_id, u32 *pmac_id, u32 domain);
1693extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, 1693extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
@@ -1714,8 +1714,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1714 int type); 1714 int type);
1715extern int be_cmd_rxq_destroy(struct be_adapter *adapter, 1715extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
1716 struct be_queue_info *q); 1716 struct be_queue_info *q);
1717extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1717extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1718 u16 *link_speed, u8 *link_status, u32 dom); 1718 u8 *link_status, u32 dom);
1719extern int be_cmd_reset(struct be_adapter *adapter); 1719extern int be_cmd_reset(struct be_adapter *adapter);
1720extern int be_cmd_get_stats(struct be_adapter *adapter, 1720extern int be_cmd_get_stats(struct be_adapter *adapter,
1721 struct be_dma_mem *nonemb_cmd); 1721 struct be_dma_mem *nonemb_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c0e700653f96..8e6fb0ba6aa9 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -512,28 +512,6 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
512 return val; 512 return val;
513} 513}
514 514
515static int convert_to_et_speed(u32 be_speed)
516{
517 int et_speed = SPEED_10000;
518
519 switch (be_speed) {
520 case PHY_LINK_SPEED_10MBPS:
521 et_speed = SPEED_10;
522 break;
523 case PHY_LINK_SPEED_100MBPS:
524 et_speed = SPEED_100;
525 break;
526 case PHY_LINK_SPEED_1GBPS:
527 et_speed = SPEED_1000;
528 break;
529 case PHY_LINK_SPEED_10GBPS:
530 et_speed = SPEED_10000;
531 break;
532 }
533
534 return et_speed;
535}
536
537bool be_pause_supported(struct be_adapter *adapter) 515bool be_pause_supported(struct be_adapter *adapter)
538{ 516{
539 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB || 517 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
@@ -544,27 +522,16 @@ bool be_pause_supported(struct be_adapter *adapter)
544static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 522static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
545{ 523{
546 struct be_adapter *adapter = netdev_priv(netdev); 524 struct be_adapter *adapter = netdev_priv(netdev);
547 u8 port_speed = 0;
548 u16 link_speed = 0;
549 u8 link_status; 525 u8 link_status;
550 u32 et_speed = 0; 526 u16 link_speed = 0;
551 int status; 527 int status;
552 528
553 if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) { 529 if (adapter->phy.link_speed < 0) {
554 if (adapter->phy.forced_port_speed < 0) { 530 status = be_cmd_link_status_query(adapter, &link_speed,
555 status = be_cmd_link_status_query(adapter, &port_speed, 531 &link_status, 0);
556 &link_speed, &link_status, 0); 532 if (!status)
557 if (!status) 533 be_link_status_update(adapter, link_status);
558 be_link_status_update(adapter, link_status); 534 ethtool_cmd_speed_set(ecmd, link_speed);
559 if (link_speed)
560 et_speed = link_speed * 10;
561 else if (link_status)
562 et_speed = convert_to_et_speed(port_speed);
563 } else {
564 et_speed = adapter->phy.forced_port_speed;
565 }
566
567 ethtool_cmd_speed_set(ecmd, et_speed);
568 535
569 status = be_cmd_get_phy_info(adapter); 536 status = be_cmd_get_phy_info(adapter);
570 if (status) 537 if (status)
@@ -773,8 +740,8 @@ static void
773be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 740be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
774{ 741{
775 struct be_adapter *adapter = netdev_priv(netdev); 742 struct be_adapter *adapter = netdev_priv(netdev);
776 u8 mac_speed = 0; 743 int status;
777 u16 qos_link_speed = 0; 744 u8 link_status = 0;
778 745
779 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 746 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
780 747
@@ -798,11 +765,11 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
798 test->flags |= ETH_TEST_FL_FAILED; 765 test->flags |= ETH_TEST_FL_FAILED;
799 } 766 }
800 767
801 if (be_cmd_link_status_query(adapter, &mac_speed, 768 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
802 &qos_link_speed, NULL, 0) != 0) { 769 if (status) {
803 test->flags |= ETH_TEST_FL_FAILED; 770 test->flags |= ETH_TEST_FL_FAILED;
804 data[4] = -1; 771 data[4] = -1;
805 } else if (!mac_speed) { 772 } else if (!link_status) {
806 test->flags |= ETH_TEST_FL_FAILED; 773 test->flags |= ETH_TEST_FL_FAILED;
807 data[4] = 1; 774 data[4] = 1;
808 } 775 }
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 95d10472f236..eb3f2cb3b93b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -20,6 +20,7 @@
20#include "be.h" 20#include "be.h"
21#include "be_cmds.h" 21#include "be_cmds.h"
22#include <asm/div64.h> 22#include <asm/div64.h>
23#include <linux/aer.h>
23 24
24MODULE_VERSION(DRV_VER); 25MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids); 26MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -240,9 +241,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
240 if (!is_valid_ether_addr(addr->sa_data)) 241 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL; 242 return -EADDRNOTAVAIL;
242 243
243 status = be_cmd_mac_addr_query(adapter, current_mac, 244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
244 MAC_ADDRESS_TYPE_NETWORK, false, 245 adapter->if_handle, 0);
245 adapter->if_handle, 0);
246 if (status) 246 if (status)
247 goto err; 247 goto err;
248 248
@@ -1075,7 +1075,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1075static int be_find_vfs(struct be_adapter *adapter, int vf_state) 1075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{ 1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev; 1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn; 1078 int vfs = 0, assigned_vfs = 0, pos;
1079 u16 offset, stride; 1079 u16 offset, stride;
1080 1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -1086,9 +1086,7 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1086 1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); 1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) { 1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF; 1089 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
1092 vfs++; 1090 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 1091 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++; 1092 assigned_vfs++;
@@ -1896,6 +1894,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
1896 return status; 1894 return status;
1897 } 1895 }
1898 1896
1897 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1898 adapter->num_tx_qs);
1899 return 0; 1899 return 0;
1900} 1900}
1901 1901
@@ -1946,10 +1946,9 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1946 return rc; 1946 return rc;
1947 } 1947 }
1948 1948
1949 if (adapter->num_rx_qs != MAX_RX_QS) 1949 dev_info(&adapter->pdev->dev,
1950 dev_info(&adapter->pdev->dev, 1950 "created %d RSS queue(s) and 1 default RX queue\n",
1951 "Created only %d receive queues\n", adapter->num_rx_qs); 1951 adapter->num_rx_qs - 1);
1952
1953 return 0; 1952 return 0;
1954} 1953}
1955 1954
@@ -2176,8 +2175,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
2176{ 2175{
2177 u32 num = 0; 2176 u32 num = 0;
2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2177 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2179 !sriov_want(adapter) && be_physfn(adapter) && 2178 !sriov_want(adapter) && be_physfn(adapter)) {
2180 !be_is_mc(adapter)) {
2181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); 2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183 } 2181 }
@@ -2188,6 +2186,7 @@ static void be_msix_enable(struct be_adapter *adapter)
2188{ 2186{
2189#define BE_MIN_MSIX_VECTORS 1 2187#define BE_MIN_MSIX_VECTORS 1
2190 int i, status, num_vec, num_roce_vec = 0; 2188 int i, status, num_vec, num_roce_vec = 0;
2189 struct device *dev = &adapter->pdev->dev;
2191 2190
2192 /* If RSS queues are not used, need a vec for default RX Q */ 2191 /* If RSS queues are not used, need a vec for default RX Q */
2193 num_vec = min(be_num_rss_want(adapter), num_online_cpus()); 2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
@@ -2212,6 +2211,8 @@ static void be_msix_enable(struct be_adapter *adapter)
2212 num_vec) == 0) 2211 num_vec) == 0)
2213 goto done; 2212 goto done;
2214 } 2213 }
2214
2215 dev_warn(dev, "MSIx enable failed\n");
2215 return; 2216 return;
2216done: 2217done:
2217 if (be_roce_supported(adapter)) { 2218 if (be_roce_supported(adapter)) {
@@ -2225,6 +2226,7 @@ done:
2225 } 2226 }
2226 } else 2227 } else
2227 adapter->num_msix_vec = num_vec; 2228 adapter->num_msix_vec = num_vec;
2229 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2228 return; 2230 return;
2229} 2231}
2230 2232
@@ -2441,8 +2443,7 @@ static int be_open(struct net_device *netdev)
2441 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2443 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2442 } 2444 }
2443 2445
2444 status = be_cmd_link_status_query(adapter, NULL, NULL, 2446 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2445 &link_status, 0);
2446 if (!status) 2447 if (!status)
2447 be_link_status_update(adapter, link_status); 2448 be_link_status_update(adapter, link_status);
2448 2449
@@ -2646,8 +2647,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2646 } 2647 }
2647 2648
2648 for_all_vfs(adapter, vf_cfg, vf) { 2649 for_all_vfs(adapter, vf_cfg, vf) {
2649 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, 2650 lnk_speed = 1000;
2650 NULL, vf + 1); 2651 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2651 if (status) 2652 if (status)
2652 goto err; 2653 goto err;
2653 vf_cfg->tx_rate = lnk_speed * 10; 2654 vf_cfg->tx_rate = lnk_speed * 10;
@@ -2671,7 +2672,6 @@ static void be_setup_init(struct be_adapter *adapter)
2671 adapter->be3_native = false; 2672 adapter->be3_native = false;
2672 adapter->promiscuous = false; 2673 adapter->promiscuous = false;
2673 adapter->eq_next_idx = 0; 2674 adapter->eq_next_idx = 0;
2674 adapter->phy.forced_port_speed = -1;
2675} 2675}
2676 2676
2677static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, 2677static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2693,21 +2693,16 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2693 status = be_cmd_get_mac_from_list(adapter, mac, 2693 status = be_cmd_get_mac_from_list(adapter, mac,
2694 active_mac, pmac_id, 0); 2694 active_mac, pmac_id, 0);
2695 if (*active_mac) { 2695 if (*active_mac) {
2696 status = be_cmd_mac_addr_query(adapter, mac, 2696 status = be_cmd_mac_addr_query(adapter, mac, false,
2697 MAC_ADDRESS_TYPE_NETWORK, 2697 if_handle, *pmac_id);
2698 false, if_handle,
2699 *pmac_id);
2700 } 2698 }
2701 } else if (be_physfn(adapter)) { 2699 } else if (be_physfn(adapter)) {
2702 /* For BE3, for PF get permanent MAC */ 2700 /* For BE3, for PF get permanent MAC */
2703 status = be_cmd_mac_addr_query(adapter, mac, 2701 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2704 MAC_ADDRESS_TYPE_NETWORK, true,
2705 0, 0);
2706 *active_mac = false; 2702 *active_mac = false;
2707 } else { 2703 } else {
2708 /* For BE3, for VF get soft MAC assigned by PF*/ 2704 /* For BE3, for VF get soft MAC assigned by PF*/
2709 status = be_cmd_mac_addr_query(adapter, mac, 2705 status = be_cmd_mac_addr_query(adapter, mac, false,
2710 MAC_ADDRESS_TYPE_NETWORK, false,
2711 if_handle, 0); 2706 if_handle, 0);
2712 *active_mac = true; 2707 *active_mac = true;
2713 } 2708 }
@@ -2724,6 +2719,8 @@ static int be_get_config(struct be_adapter *adapter)
2724 if (pos) { 2719 if (pos) {
2725 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, 2720 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2726 &dev_num_vfs); 2721 &dev_num_vfs);
2722 if (!lancer_chip(adapter))
2723 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 adapter->dev_num_vfs = dev_num_vfs; 2724 adapter->dev_num_vfs = dev_num_vfs;
2728 } 2725 }
2729 return 0; 2726 return 0;
@@ -3437,6 +3434,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
3437 if (mem->va) 3434 if (mem->va)
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, 3435 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma); 3436 mem->dma);
3437 kfree(adapter->pmac_id);
3440} 3438}
3441 3439
3442static int be_ctrl_init(struct be_adapter *adapter) 3440static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3471,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
3473 } 3471 }
3474 memset(rx_filter->va, 0, rx_filter->size); 3472 memset(rx_filter->va, 0, rx_filter->size);
3475 3473
3474 /* primary mac needs 1 pmac entry */
3475 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3476 sizeof(*adapter->pmac_id), GFP_KERNEL);
3477 if (!adapter->pmac_id)
3478 return -ENOMEM;
3479
3476 mutex_init(&adapter->mbox_lock); 3480 mutex_init(&adapter->mbox_lock);
3477 spin_lock_init(&adapter->mcc_lock); 3481 spin_lock_init(&adapter->mcc_lock);
3478 spin_lock_init(&adapter->mcc_cq_lock); 3482 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3543,6 +3547,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
3543 3547
3544 be_ctrl_cleanup(adapter); 3548 be_ctrl_cleanup(adapter);
3545 3549
3550 pci_disable_pcie_error_reporting(pdev);
3551
3546 pci_set_drvdata(pdev, NULL); 3552 pci_set_drvdata(pdev, NULL);
3547 pci_release_regions(pdev); 3553 pci_release_regions(pdev);
3548 pci_disable_device(pdev); 3554 pci_disable_device(pdev);
@@ -3609,12 +3615,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
3609 else 3615 else
3610 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; 3616 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3611 3617
3612 /* primary mac needs 1 pmac entry */
3613 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3614 sizeof(u32), GFP_KERNEL);
3615 if (!adapter->pmac_id)
3616 return -ENOMEM;
3617
3618 status = be_cmd_get_cntl_attributes(adapter); 3618 status = be_cmd_get_cntl_attributes(adapter);
3619 if (status) 3619 if (status)
3620 return status; 3620 return status;
@@ -3800,6 +3800,23 @@ static bool be_reset_required(struct be_adapter *adapter)
3800 return be_find_vfs(adapter, ENABLED) > 0 ? false : true; 3800 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3801} 3801}
3802 3802
3803static char *mc_name(struct be_adapter *adapter)
3804{
3805 if (adapter->function_mode & FLEX10_MODE)
3806 return "FLEX10";
3807 else if (adapter->function_mode & VNIC_MODE)
3808 return "vNIC";
3809 else if (adapter->function_mode & UMC_ENABLED)
3810 return "UMC";
3811 else
3812 return "";
3813}
3814
3815static inline char *func_name(struct be_adapter *adapter)
3816{
3817 return be_physfn(adapter) ? "PF" : "VF";
3818}
3819
3803static int __devinit be_probe(struct pci_dev *pdev, 3820static int __devinit be_probe(struct pci_dev *pdev,
3804 const struct pci_device_id *pdev_id) 3821 const struct pci_device_id *pdev_id)
3805{ 3822{
@@ -3844,6 +3861,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
3844 } 3861 }
3845 } 3862 }
3846 3863
3864 status = pci_enable_pcie_error_reporting(pdev);
3865 if (status)
3866 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3867
3847 status = be_ctrl_init(adapter); 3868 status = be_ctrl_init(adapter);
3848 if (status) 3869 if (status)
3849 goto free_netdev; 3870 goto free_netdev;
@@ -3886,7 +3907,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3886 3907
3887 status = be_setup(adapter); 3908 status = be_setup(adapter);
3888 if (status) 3909 if (status)
3889 goto msix_disable; 3910 goto stats_clean;
3890 3911
3891 be_netdev_init(netdev); 3912 be_netdev_init(netdev);
3892 status = register_netdev(netdev); 3913 status = register_netdev(netdev);
@@ -3900,15 +3921,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
3900 3921
3901 be_cmd_query_port_name(adapter, &port_name); 3922 be_cmd_query_port_name(adapter, &port_name);
3902 3923
3903 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev), 3924 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
3904 port_name); 3925 func_name(adapter), mc_name(adapter), port_name);
3905 3926
3906 return 0; 3927 return 0;
3907 3928
3908unsetup: 3929unsetup:
3909 be_clear(adapter); 3930 be_clear(adapter);
3910msix_disable:
3911 be_msix_disable(adapter);
3912stats_clean: 3931stats_clean:
3913 be_stats_cleanup(adapter); 3932 be_stats_cleanup(adapter);
3914ctrl_clean: 3933ctrl_clean:
@@ -4066,6 +4085,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4066 if (status) 4085 if (status)
4067 return PCI_ERS_RESULT_DISCONNECT; 4086 return PCI_ERS_RESULT_DISCONNECT;
4068 4087
4088 pci_cleanup_aer_uncorrect_error_status(pdev);
4069 return PCI_ERS_RESULT_RECOVERED; 4089 return PCI_ERS_RESULT_RECOVERED;
4070} 4090}
4071 4091
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3574e1499dfc..feff51664dcf 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
62 ---help--- 62 ---help---
63 This driver supports the MDIO bus used by the gianfar and UCC drivers. 63 This driver supports the MDIO bus used by the gianfar and UCC drivers.
64 64
65config FSL_XGMAC_MDIO
66 tristate "Freescale XGMAC MDIO"
67 depends on FSL_SOC
68 select PHYLIB
69 ---help---
70 This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
71
65config UCC_GETH 72config UCC_GETH
66 tristate "Freescale QE Gigabit Ethernet" 73 tristate "Freescale QE Gigabit Ethernet"
67 depends on QUICC_ENGINE 74 depends on QUICC_ENGINE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 1752488c9ee5..3d1839afff65 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
9endif 9endif
10obj-$(CONFIG_FS_ENET) += fs_enet/ 10obj-$(CONFIG_FS_ENET) += fs_enet/
11obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o 11obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
12obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
12obj-$(CONFIG_GIANFAR) += gianfar_driver.o 13obj-$(CONFIG_GIANFAR) += gianfar_driver.o
13obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o 14obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
14gianfar_driver-objs := gianfar.o \ 15gianfar_driver-objs := gianfar.o \
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9527b28d70d1..c93a05654b46 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -19,54 +19,90 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h> 23#include <linux/init.h>
26#include <linux/delay.h> 24#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h> 25#include <linux/module.h>
33#include <linux/platform_device.h>
34#include <linux/crc32.h>
35#include <linux/mii.h> 26#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/of.h>
38#include <linux/of_address.h> 27#include <linux/of_address.h>
39#include <linux/of_mdio.h> 28#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 29#include <linux/of_device.h>
41 30
42#include <asm/io.h> 31#include <asm/io.h>
43#include <asm/irq.h> 32#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
44#include <asm/uaccess.h>
45#include <asm/ucc.h>
46 33
47#include "gianfar.h" 34#include "gianfar.h"
48#include "fsl_pq_mdio.h" 35
36#define MIIMIND_BUSY 0x00000001
37#define MIIMIND_NOTVALID 0x00000004
38#define MIIMCFG_INIT_VALUE 0x00000007
39#define MIIMCFG_RESET 0x80000000
40
41#define MII_READ_COMMAND 0x00000001
42
43struct fsl_pq_mii {
44 u32 miimcfg; /* MII management configuration reg */
45 u32 miimcom; /* MII management command reg */
46 u32 miimadd; /* MII management address reg */
47 u32 miimcon; /* MII management control reg */
48 u32 miimstat; /* MII management status reg */
49 u32 miimind; /* MII management indication reg */
50};
51
52struct fsl_pq_mdio {
53 u8 res1[16];
54 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
55 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
56 u8 res2[4];
57 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
58 u8 res3[1280];
59 struct fsl_pq_mii mii;
60 u8 res4[28];
61 u32 utbipar; /* TBI phy address reg (only on UCC) */
62 u8 res5[2728];
63} __packed;
49 64
50/* Number of microseconds to wait for an MII register to respond */ 65/* Number of microseconds to wait for an MII register to respond */
51#define MII_TIMEOUT 1000 66#define MII_TIMEOUT 1000
52 67
53struct fsl_pq_mdio_priv { 68struct fsl_pq_mdio_priv {
54 void __iomem *map; 69 void __iomem *map;
55 struct fsl_pq_mdio __iomem *regs; 70 struct fsl_pq_mii __iomem *regs;
71 int irqs[PHY_MAX_ADDR];
72};
73
74/*
75 * Per-device-type data. Each type of device tree node that we support gets
76 * one of these.
77 *
78 * @mii_offset: the offset of the MII registers within the memory map of the
79 * node. Some nodes define only the MII registers, and some define the whole
80 * MAC (which includes the MII registers).
81 *
82 * @get_tbipa: determines the address of the TBIPA register
83 *
84 * @ucc_configure: a special function for extra QE configuration
85 */
86struct fsl_pq_mdio_data {
87 unsigned int mii_offset; /* offset of the MII registers */
88 uint32_t __iomem * (*get_tbipa)(void __iomem *p);
89 void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
56}; 90};
57 91
58/* 92/*
59 * Write value to the PHY at mii_id at register regnum, 93 * Write value to the PHY at mii_id at register regnum, on the bus attached
60 * on the bus attached to the local interface, which may be different from the 94 * to the local interface, which may be different from the generic mdio bus
61 * generic mdio bus (tied to a single interface), waiting until the write is 95 * (tied to a single interface), waiting until the write is done before
62 * done before returning. This is helpful in programming interfaces like 96 * returning. This is helpful in programming interfaces like the TBI which
63 * the TBI which control interfaces like onchip SERDES and are always tied to 97 * control interfaces like onchip SERDES and are always tied to the local
64 * the local mdio pins, which may not be the same as system mdio bus, used for 98 * mdio pins, which may not be the same as system mdio bus, used for
65 * controlling the external PHYs, for example. 99 * controlling the external PHYs, for example.
66 */ 100 */
67int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 101static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
68 int regnum, u16 value) 102 u16 value)
69{ 103{
104 struct fsl_pq_mdio_priv *priv = bus->priv;
105 struct fsl_pq_mii __iomem *regs = priv->regs;
70 u32 status; 106 u32 status;
71 107
72 /* Set the PHY address and the register address we want to write */ 108 /* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
83} 119}
84 120
85/* 121/*
86 * Read the bus for PHY at addr mii_id, register regnum, and 122 * Read the bus for PHY at addr mii_id, register regnum, and return the value.
87 * return the value. Clears miimcom first. All PHY operation 123 * Clears miimcom first.
88 * done on the bus attached to the local interface, 124 *
89 * which may be different from the generic mdio bus 125 * All PHY operation done on the bus attached to the local interface, which
90 * This is helpful in programming interfaces like 126 * may be different from the generic mdio bus. This is helpful in programming
91 * the TBI which, in turn, control interfaces like onchip SERDES 127 * interfaces like the TBI which, in turn, control interfaces like on-chip
92 * and are always tied to the local mdio pins, which may not be the 128 * SERDES and are always tied to the local mdio pins, which may not be the
93 * same as system mdio bus, used for controlling the external PHYs, for eg. 129 * same as system mdio bus, used for controlling the external PHYs, for eg.
94 */ 130 */
95int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, 131static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
96 int mii_id, int regnum)
97{ 132{
98 u16 value; 133 struct fsl_pq_mdio_priv *priv = bus->priv;
134 struct fsl_pq_mii __iomem *regs = priv->regs;
99 u32 status; 135 u32 status;
136 u16 value;
100 137
101 /* Set the PHY address and the register address we want to read */ 138 /* Set the PHY address and the register address we want to read */
102 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 139 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
115 /* Grab the value of the register from miimstat */ 152 /* Grab the value of the register from miimstat */
116 value = in_be32(&regs->miimstat); 153 value = in_be32(&regs->miimstat);
117 154
155 dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
118 return value; 156 return value;
119} 157}
120 158
121static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
122{
123 struct fsl_pq_mdio_priv *priv = bus->priv;
124
125 return priv->regs;
126}
127
128/*
129 * Write value to the PHY at mii_id at register regnum,
130 * on the bus, waiting until the write is done before returning.
131 */
132int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
133{
134 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
135
136 /* Write to the local MII regs */
137 return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
138}
139
140/*
141 * Read the bus for PHY at addr mii_id, register regnum, and
142 * return the value. Clears miimcom first.
143 */
144int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
145{
146 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
147
148 /* Read the local MII regs */
149 return fsl_pq_local_mdio_read(regs, mii_id, regnum);
150}
151
152/* Reset the MIIM registers, and wait for the bus to free */ 159/* Reset the MIIM registers, and wait for the bus to free */
153static int fsl_pq_mdio_reset(struct mii_bus *bus) 160static int fsl_pq_mdio_reset(struct mii_bus *bus)
154{ 161{
155 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 162 struct fsl_pq_mdio_priv *priv = bus->priv;
163 struct fsl_pq_mii __iomem *regs = priv->regs;
156 u32 status; 164 u32 status;
157 165
158 mutex_lock(&bus->mdio_lock); 166 mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
170 mutex_unlock(&bus->mdio_lock); 178 mutex_unlock(&bus->mdio_lock);
171 179
172 if (!status) { 180 if (!status) {
173 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 181 dev_err(&bus->dev, "timeout waiting for MII bus\n");
174 bus->name);
175 return -EBUSY; 182 return -EBUSY;
176 } 183 }
177 184
178 return 0; 185 return 0;
179} 186}
180 187
181void fsl_pq_mdio_bus_name(char *name, struct device_node *np) 188#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
189/*
190 * This is mildly evil, but so is our hardware for doing this.
191 * Also, we have to cast back to struct gfar because of
192 * definition weirdness done in gianfar.h.
193 */
194static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
182{ 195{
183 const u32 *addr; 196 struct gfar __iomem *enet_regs = p;
184 u64 taddr = OF_BAD_ADDR;
185
186 addr = of_get_address(np, 0, NULL, NULL);
187 if (addr)
188 taddr = of_translate_address(np, addr);
189 197
190 snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, 198 return &enet_regs->tbipa;
191 (unsigned long long)taddr);
192} 199}
193EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
194 200
201/*
202 * Return the TBIPAR address for an eTSEC2 node
203 */
204static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
205{
206 return p;
207}
208#endif
195 209
196static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 210#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
211/*
212 * Return the TBIPAR address for a QE MDIO node
213 */
214static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
197{ 215{
198#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 216 struct fsl_pq_mdio __iomem *mdio = p;
199 struct gfar __iomem *enet_regs;
200 217
201 /* 218 return &mdio->utbipar;
202 * This is mildly evil, but so is our hardware for doing this.
203 * Also, we have to cast back to struct gfar because of
204 * definition weirdness done in gianfar.h.
205 */
206 if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
207 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
208 of_device_is_compatible(np, "gianfar")) {
209 enet_regs = (struct gfar __iomem *)regs;
210 return &enet_regs->tbipa;
211 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
212 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
213 return of_iomap(np, 1);
214 }
215#endif
216 return NULL;
217} 219}
218 220
219 221/*
220static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 222 * Find the UCC node that controls the given MDIO node
223 *
224 * For some reason, the QE MDIO nodes are not children of the UCC devices
225 * that control them. Therefore, we need to scan all UCC nodes looking for
226 * the one that encompases the given MDIO node. We do this by comparing
227 * physical addresses. The 'start' and 'end' addresses of the MDIO node are
228 * passed, and the correct UCC node will cover the entire address range.
229 *
230 * This assumes that there is only one QE MDIO node in the entire device tree.
231 */
232static void ucc_configure(phys_addr_t start, phys_addr_t end)
221{ 233{
222#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) 234 static bool found_mii_master;
223 struct device_node *np = NULL; 235 struct device_node *np = NULL;
224 int err = 0;
225 236
226 for_each_compatible_node(np, NULL, "ucc_geth") { 237 if (found_mii_master)
227 struct resource tempres; 238 return;
228 239
229 err = of_address_to_resource(np, 0, &tempres); 240 for_each_compatible_node(np, NULL, "ucc_geth") {
230 if (err) 241 struct resource res;
242 const uint32_t *iprop;
243 uint32_t id;
244 int ret;
245
246 ret = of_address_to_resource(np, 0, &res);
247 if (ret < 0) {
248 pr_debug("fsl-pq-mdio: no address range in node %s\n",
249 np->full_name);
231 continue; 250 continue;
251 }
232 252
233 /* if our mdio regs fall within this UCC regs range */ 253 /* if our mdio regs fall within this UCC regs range */
234 if ((start >= tempres.start) && (end <= tempres.end)) { 254 if ((start < res.start) || (end > res.end))
235 /* Find the id of the UCC */ 255 continue;
236 const u32 *id; 256
237 257 iprop = of_get_property(np, "cell-index", NULL);
238 id = of_get_property(np, "cell-index", NULL); 258 if (!iprop) {
239 if (!id) { 259 iprop = of_get_property(np, "device-id", NULL);
240 id = of_get_property(np, "device-id", NULL); 260 if (!iprop) {
241 if (!id) 261 pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
242 continue; 262 np->full_name);
263 continue;
243 } 264 }
265 }
244 266
245 *ucc_id = *id; 267 id = be32_to_cpup(iprop);
246 268
247 return 0; 269 /*
270 * cell-index and device-id for QE nodes are
271 * numbered from 1, not 0.
272 */
273 if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
274 pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
275 np->full_name);
276 continue;
248 } 277 }
278
279 pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
280 found_mii_master = true;
249 } 281 }
282}
250 283
251 if (err)
252 return err;
253 else
254 return -EINVAL;
255#else
256 return -ENODEV;
257#endif 284#endif
258}
259 285
260static int fsl_pq_mdio_probe(struct platform_device *ofdev) 286static struct of_device_id fsl_pq_mdio_match[] = {
287#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
288 {
289 .compatible = "fsl,gianfar-tbi",
290 .data = &(struct fsl_pq_mdio_data) {
291 .mii_offset = 0,
292 .get_tbipa = get_gfar_tbipa,
293 },
294 },
295 {
296 .compatible = "fsl,gianfar-mdio",
297 .data = &(struct fsl_pq_mdio_data) {
298 .mii_offset = 0,
299 .get_tbipa = get_gfar_tbipa,
300 },
301 },
302 {
303 .type = "mdio",
304 .compatible = "gianfar",
305 .data = &(struct fsl_pq_mdio_data) {
306 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
307 .get_tbipa = get_gfar_tbipa,
308 },
309 },
310 {
311 .compatible = "fsl,etsec2-tbi",
312 .data = &(struct fsl_pq_mdio_data) {
313 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
314 .get_tbipa = get_etsec_tbipa,
315 },
316 },
317 {
318 .compatible = "fsl,etsec2-mdio",
319 .data = &(struct fsl_pq_mdio_data) {
320 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
321 .get_tbipa = get_etsec_tbipa,
322 },
323 },
324#endif
325#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
326 {
327 .compatible = "fsl,ucc-mdio",
328 .data = &(struct fsl_pq_mdio_data) {
329 .mii_offset = 0,
330 .get_tbipa = get_ucc_tbipa,
331 .ucc_configure = ucc_configure,
332 },
333 },
334 {
335 /* Legacy UCC MDIO node */
336 .type = "mdio",
337 .compatible = "ucc_geth_phy",
338 .data = &(struct fsl_pq_mdio_data) {
339 .mii_offset = 0,
340 .get_tbipa = get_ucc_tbipa,
341 .ucc_configure = ucc_configure,
342 },
343 },
344#endif
345 /* No Kconfig option for Fman support yet */
346 {
347 .compatible = "fsl,fman-mdio",
348 .data = &(struct fsl_pq_mdio_data) {
349 .mii_offset = 0,
350 /* Fman TBI operations are handled elsewhere */
351 },
352 },
353
354 {},
355};
356MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
357
358static int fsl_pq_mdio_probe(struct platform_device *pdev)
261{ 359{
262 struct device_node *np = ofdev->dev.of_node; 360 const struct of_device_id *id =
361 of_match_device(fsl_pq_mdio_match, &pdev->dev);
362 const struct fsl_pq_mdio_data *data = id->data;
363 struct device_node *np = pdev->dev.of_node;
364 struct resource res;
263 struct device_node *tbi; 365 struct device_node *tbi;
264 struct fsl_pq_mdio_priv *priv; 366 struct fsl_pq_mdio_priv *priv;
265 struct fsl_pq_mdio __iomem *regs = NULL;
266 void __iomem *map;
267 u32 __iomem *tbipa;
268 struct mii_bus *new_bus; 367 struct mii_bus *new_bus;
269 int tbiaddr = -1;
270 const u32 *addrp;
271 u64 addr = 0, size = 0;
272 int err; 368 int err;
273 369
274 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 370 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
275 if (!priv)
276 return -ENOMEM;
277 371
278 new_bus = mdiobus_alloc(); 372 new_bus = mdiobus_alloc_size(sizeof(*priv));
279 if (!new_bus) { 373 if (!new_bus)
280 err = -ENOMEM; 374 return -ENOMEM;
281 goto err_free_priv;
282 }
283 375
376 priv = new_bus->priv;
284 new_bus->name = "Freescale PowerQUICC MII Bus", 377 new_bus->name = "Freescale PowerQUICC MII Bus",
285 new_bus->read = &fsl_pq_mdio_read, 378 new_bus->read = &fsl_pq_mdio_read;
286 new_bus->write = &fsl_pq_mdio_write, 379 new_bus->write = &fsl_pq_mdio_write;
287 new_bus->reset = &fsl_pq_mdio_reset, 380 new_bus->reset = &fsl_pq_mdio_reset;
288 new_bus->priv = priv; 381 new_bus->irq = priv->irqs;
289 fsl_pq_mdio_bus_name(new_bus->id, np); 382
290 383 err = of_address_to_resource(np, 0, &res);
291 addrp = of_get_address(np, 0, &size, NULL); 384 if (err < 0) {
292 if (!addrp) { 385 dev_err(&pdev->dev, "could not obtain address information\n");
293 err = -EINVAL; 386 goto error;
294 goto err_free_bus;
295 } 387 }
296 388
297 /* Set the PHY base address */ 389 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
298 addr = of_translate_address(np, addrp); 390 (unsigned long long)res.start);
299 if (addr == OF_BAD_ADDR) {
300 err = -EINVAL;
301 goto err_free_bus;
302 }
303 391
304 map = ioremap(addr, size); 392 priv->map = of_iomap(np, 0);
305 if (!map) { 393 if (!priv->map) {
306 err = -ENOMEM; 394 err = -ENOMEM;
307 goto err_free_bus; 395 goto error;
308 } 396 }
309 priv->map = map;
310
311 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
312 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
313 of_device_is_compatible(np, "fsl,ucc-mdio") ||
314 of_device_is_compatible(np, "ucc_geth_phy"))
315 map -= offsetof(struct fsl_pq_mdio, miimcfg);
316 regs = map;
317 priv->regs = regs;
318
319 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
320 397
321 if (NULL == new_bus->irq) { 398 /*
322 err = -ENOMEM; 399 * Some device tree nodes represent only the MII registers, and
323 goto err_unmap_regs; 400 * others represent the MAC and MII registers. The 'mii_offset' field
401 * contains the offset of the MII registers inside the mapped register
402 * space.
403 */
404 if (data->mii_offset > resource_size(&res)) {
405 dev_err(&pdev->dev, "invalid register map\n");
406 err = -EINVAL;
407 goto error;
324 } 408 }
409 priv->regs = priv->map + data->mii_offset;
325 410
326 new_bus->parent = &ofdev->dev; 411 new_bus->parent = &pdev->dev;
327 dev_set_drvdata(&ofdev->dev, new_bus); 412 dev_set_drvdata(&pdev->dev, new_bus);
328
329 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
330 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
331 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
332 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
333 of_device_is_compatible(np, "gianfar")) {
334 tbipa = get_gfar_tbipa(regs, np);
335 if (!tbipa) {
336 err = -EINVAL;
337 goto err_free_irqs;
338 }
339 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
340 of_device_is_compatible(np, "ucc_geth_phy")) {
341 u32 id;
342 static u32 mii_mng_master;
343
344 tbipa = &regs->utbipar;
345
346 if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
347 goto err_free_irqs;
348 413
349 if (!mii_mng_master) { 414 if (data->get_tbipa) {
350 mii_mng_master = id; 415 for_each_child_of_node(np, tbi) {
351 ucc_set_qe_mux_mii_mng(id - 1); 416 if (strcmp(tbi->type, "tbi-phy") == 0) {
417 dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
418 strrchr(tbi->full_name, '/') + 1);
419 break;
420 }
352 } 421 }
353 } else {
354 err = -ENODEV;
355 goto err_free_irqs;
356 }
357 422
358 for_each_child_of_node(np, tbi) { 423 if (tbi) {
359 if (!strncmp(tbi->type, "tbi-phy", 8)) 424 const u32 *prop = of_get_property(tbi, "reg", NULL);
360 break; 425 uint32_t __iomem *tbipa;
361 }
362 426
363 if (tbi) { 427 if (!prop) {
364 const u32 *prop = of_get_property(tbi, "reg", NULL); 428 dev_err(&pdev->dev,
429 "missing 'reg' property in node %s\n",
430 tbi->full_name);
431 err = -EBUSY;
432 goto error;
433 }
365 434
366 if (prop) 435 tbipa = data->get_tbipa(priv->map);
367 tbiaddr = *prop;
368 436
369 if (tbiaddr == -1) { 437 out_be32(tbipa, be32_to_cpup(prop));
370 err = -EBUSY;
371 goto err_free_irqs;
372 } else {
373 out_be32(tbipa, tbiaddr);
374 } 438 }
375 } 439 }
376 440
441 if (data->ucc_configure)
442 data->ucc_configure(res.start, res.end);
443
377 err = of_mdiobus_register(new_bus, np); 444 err = of_mdiobus_register(new_bus, np);
378 if (err) { 445 if (err) {
379 printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 446 dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
380 new_bus->name); 447 new_bus->name);
381 goto err_free_irqs; 448 goto error;
382 } 449 }
383 450
384 return 0; 451 return 0;
385 452
386err_free_irqs: 453error:
387 kfree(new_bus->irq); 454 if (priv->map)
388err_unmap_regs: 455 iounmap(priv->map);
389 iounmap(priv->map); 456
390err_free_bus:
391 kfree(new_bus); 457 kfree(new_bus);
392err_free_priv: 458
393 kfree(priv);
394 return err; 459 return err;
395} 460}
396 461
397 462
398static int fsl_pq_mdio_remove(struct platform_device *ofdev) 463static int fsl_pq_mdio_remove(struct platform_device *pdev)
399{ 464{
400 struct device *device = &ofdev->dev; 465 struct device *device = &pdev->dev;
401 struct mii_bus *bus = dev_get_drvdata(device); 466 struct mii_bus *bus = dev_get_drvdata(device);
402 struct fsl_pq_mdio_priv *priv = bus->priv; 467 struct fsl_pq_mdio_priv *priv = bus->priv;
403 468
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
406 dev_set_drvdata(device, NULL); 471 dev_set_drvdata(device, NULL);
407 472
408 iounmap(priv->map); 473 iounmap(priv->map);
409 bus->priv = NULL;
410 mdiobus_free(bus); 474 mdiobus_free(bus);
411 kfree(priv);
412 475
413 return 0; 476 return 0;
414} 477}
415 478
416static struct of_device_id fsl_pq_mdio_match[] = {
417 {
418 .type = "mdio",
419 .compatible = "ucc_geth_phy",
420 },
421 {
422 .type = "mdio",
423 .compatible = "gianfar",
424 },
425 {
426 .compatible = "fsl,ucc-mdio",
427 },
428 {
429 .compatible = "fsl,gianfar-tbi",
430 },
431 {
432 .compatible = "fsl,gianfar-mdio",
433 },
434 {
435 .compatible = "fsl,etsec2-tbi",
436 },
437 {
438 .compatible = "fsl,etsec2-mdio",
439 },
440 {},
441};
442MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
443
444static struct platform_driver fsl_pq_mdio_driver = { 479static struct platform_driver fsl_pq_mdio_driver = {
445 .driver = { 480 .driver = {
446 .name = "fsl-pq_mdio", 481 .name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644
index bd17a2a0139b..000000000000
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 *
5 * Author: Andy Fleming
6 * Modifier: Sandeep Gopalpet
7 *
8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#ifndef __FSL_PQ_MDIO_H
17#define __FSL_PQ_MDIO_H
18
19#define MIIMIND_BUSY 0x00000001
20#define MIIMIND_NOTVALID 0x00000004
21#define MIIMCFG_INIT_VALUE 0x00000007
22#define MIIMCFG_RESET 0x80000000
23
24#define MII_READ_COMMAND 0x00000001
25
26struct fsl_pq_mdio {
27 u8 res1[16];
28 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
29 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
30 u8 res2[4];
31 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
32 u8 res3[1280];
33 u32 miimcfg; /* MII management configuration reg */
34 u32 miimcom; /* MII management command reg */
35 u32 miimadd; /* MII management address reg */
36 u32 miimcon; /* MII management control reg */
37 u32 miimstat; /* MII management status reg */
38 u32 miimind; /* MII management indication reg */
39 u8 reserved[28]; /* Space holder */
40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728];
42} __packed;
43
44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
46int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
47 int regnum, u16 value);
48int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
49int __init fsl_pq_mdio_init(void);
50void fsl_pq_mdio_exit(void);
51void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
52#endif /* FSL_PQ_MDIO_H */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d3233f59a82e..a1b52ec3b930 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -100,7 +100,6 @@
100#include <linux/of_net.h> 100#include <linux/of_net.h>
101 101
102#include "gianfar.h" 102#include "gianfar.h"
103#include "fsl_pq_mdio.h"
104 103
105#define TX_TIMEOUT (1*HZ) 104#define TX_TIMEOUT (1*HZ)
106 105
@@ -395,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev)
395 if (ndev->features & NETIF_F_IP_CSUM) 394 if (ndev->features & NETIF_F_IP_CSUM)
396 tctrl |= TCTRL_INIT_CSUM; 395 tctrl |= TCTRL_INIT_CSUM;
397 396
398 tctrl |= TCTRL_TXSCHED_PRIO; 397 if (priv->prio_sched_en)
398 tctrl |= TCTRL_TXSCHED_PRIO;
399 else {
400 tctrl |= TCTRL_TXSCHED_WRRS;
401 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
402 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
403 }
399 404
400 gfar_write(&regs->tctrl, tctrl); 405 gfar_write(&regs->tctrl, tctrl);
401 406
@@ -1161,6 +1166,9 @@ static int gfar_probe(struct platform_device *ofdev)
1161 priv->rx_filer_enable = 1; 1166 priv->rx_filer_enable = 1;
1162 /* Enable most messages by default */ 1167 /* Enable most messages by default */
1163 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1168 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1169 /* use pritority h/w tx queue scheduling for single queue devices */
1170 if (priv->num_tx_queues == 1)
1171 priv->prio_sched_en = 1;
1164 1172
1165 /* Carrier starts down, phylib will bring it up */ 1173 /* Carrier starts down, phylib will bring it up */
1166 netif_carrier_off(dev); 1174 netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 2136c7ff5e6d..4141ef2ddafc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -301,8 +301,16 @@ extern const char gfar_driver_version[];
301#define TCTRL_TFCPAUSE 0x00000008 301#define TCTRL_TFCPAUSE 0x00000008
302#define TCTRL_TXSCHED_MASK 0x00000006 302#define TCTRL_TXSCHED_MASK 0x00000006
303#define TCTRL_TXSCHED_INIT 0x00000000 303#define TCTRL_TXSCHED_INIT 0x00000000
304/* priority scheduling */
304#define TCTRL_TXSCHED_PRIO 0x00000002 305#define TCTRL_TXSCHED_PRIO 0x00000002
306/* weighted round-robin scheduling (WRRS) */
305#define TCTRL_TXSCHED_WRRS 0x00000004 307#define TCTRL_TXSCHED_WRRS 0x00000004
308/* default WRRS weight and policy setting,
309 * tailored to the tr03wt and tr47wt registers:
310 * equal weight for all Tx Qs, measured in 64byte units
311 */
312#define DEFAULT_WRRS_WEIGHT 0x18181818
313
306#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) 314#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
307 315
308#define IEVENT_INIT_CLEAR 0xffffffff 316#define IEVENT_INIT_CLEAR 0xffffffff
@@ -1098,7 +1106,8 @@ struct gfar_private {
1098 extended_hash:1, 1106 extended_hash:1,
1099 bd_stash_en:1, 1107 bd_stash_en:1,
1100 rx_filer_enable:1, 1108 rx_filer_enable:1,
1101 wol_en:1; /* Wake-on-LAN enabled */ 1109 wol_en:1, /* Wake-on-LAN enabled */
1110 prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
1102 unsigned short padding; 1111 unsigned short padding;
1103 1112
1104 /* PHY stuff */ 1113 /* PHY stuff */
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 0daa66b8eca0..b9db0e040563 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -510,7 +510,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
510 510
511 spin_unlock_irqrestore(&etsects->lock, flags); 511 spin_unlock_irqrestore(&etsects->lock, flags);
512 512
513 etsects->clock = ptp_clock_register(&etsects->caps); 513 etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
514 if (IS_ERR(etsects->clock)) { 514 if (IS_ERR(etsects->clock)) {
515 err = PTR_ERR(etsects->clock); 515 err = PTR_ERR(etsects->clock);
516 goto no_clock; 516 goto no_clock;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 21c6574c5f15..164288439220 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,7 +42,6 @@
42#include <asm/machdep.h> 42#include <asm/machdep.h>
43 43
44#include "ucc_geth.h" 44#include "ucc_geth.h"
45#include "fsl_pq_mdio.h"
46 45
47#undef DEBUG 46#undef DEBUG
48 47
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 000000000000..1afb5ea2a984
--- /dev/null
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,274 @@
1/*
2 * QorIQ 10G MDIO Controller
3 *
4 * Copyright 2012 Freescale Semiconductor, Inc.
5 *
6 * Authors: Andy Fleming <afleming@freescale.com>
7 * Timur Tabi <timur@freescale.com>
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/phy.h>
19#include <linux/mdio.h>
20#include <linux/of_platform.h>
21#include <linux/of_mdio.h>
22
23/* Number of microseconds to wait for a register to respond */
24#define TIMEOUT 1000
25
26struct tgec_mdio_controller {
27 __be32 reserved[12];
28 __be32 mdio_stat; /* MDIO configuration and status */
29 __be32 mdio_ctl; /* MDIO control */
30 __be32 mdio_data; /* MDIO data */
31 __be32 mdio_addr; /* MDIO address */
32} __packed;
33
34#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
35#define MDIO_STAT_BSY (1 << 0)
36#define MDIO_STAT_RD_ER (1 << 1)
37#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
38#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
39#define MDIO_CTL_PRE_DIS (1 << 10)
40#define MDIO_CTL_SCAN_EN (1 << 11)
41#define MDIO_CTL_POST_INC (1 << 14)
42#define MDIO_CTL_READ (1 << 15)
43
44#define MDIO_DATA(x) (x & 0xffff)
45#define MDIO_DATA_BSY (1 << 31)
46
47/*
48 * Wait untill the MDIO bus is free
49 */
50static int xgmac_wait_until_free(struct device *dev,
51 struct tgec_mdio_controller __iomem *regs)
52{
53 uint32_t status;
54
55 /* Wait till the bus is free */
56 status = spin_event_timeout(
57 !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
58 if (!status) {
59 dev_err(dev, "timeout waiting for bus to be free\n");
60 return -ETIMEDOUT;
61 }
62
63 return 0;
64}
65
66/*
67 * Wait till the MDIO read or write operation is complete
68 */
69static int xgmac_wait_until_done(struct device *dev,
70 struct tgec_mdio_controller __iomem *regs)
71{
72 uint32_t status;
73
74 /* Wait till the MDIO write is complete */
75 status = spin_event_timeout(
76 !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
77 if (!status) {
78 dev_err(dev, "timeout waiting for operation to complete\n");
79 return -ETIMEDOUT;
80 }
81
82 return 0;
83}
84
85/*
86 * Write value to the PHY for this device to the register at regnum,waiting
87 * until the write is done before it returns. All PHY configuration has to be
88 * done through the TSEC1 MIIM regs.
89 */
90static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
91{
92 struct tgec_mdio_controller __iomem *regs = bus->priv;
93 uint16_t dev_addr = regnum >> 16;
94 int ret;
95
96 /* Setup the MII Mgmt clock speed */
97 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
98
99 ret = xgmac_wait_until_free(&bus->dev, regs);
100 if (ret)
101 return ret;
102
103 /* Set the port and dev addr */
104 out_be32(&regs->mdio_ctl,
105 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
106
107 /* Set the register address */
108 out_be32(&regs->mdio_addr, regnum & 0xffff);
109
110 ret = xgmac_wait_until_free(&bus->dev, regs);
111 if (ret)
112 return ret;
113
114 /* Write the value to the register */
115 out_be32(&regs->mdio_data, MDIO_DATA(value));
116
117 ret = xgmac_wait_until_done(&bus->dev, regs);
118 if (ret)
119 return ret;
120
121 return 0;
122}
123
124/*
125 * Reads from register regnum in the PHY for device dev, returning the value.
126 * Clears miimcom first. All PHY configuration has to be done through the
127 * TSEC1 MIIM regs.
128 */
129static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
130{
131 struct tgec_mdio_controller __iomem *regs = bus->priv;
132 uint16_t dev_addr = regnum >> 16;
133 uint32_t mdio_ctl;
134 uint16_t value;
135 int ret;
136
137 /* Setup the MII Mgmt clock speed */
138 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
139
140 ret = xgmac_wait_until_free(&bus->dev, regs);
141 if (ret)
142 return ret;
143
144 /* Set the Port and Device Addrs */
145 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
146 out_be32(&regs->mdio_ctl, mdio_ctl);
147
148 /* Set the register address */
149 out_be32(&regs->mdio_addr, regnum & 0xffff);
150
151 ret = xgmac_wait_until_free(&bus->dev, regs);
152 if (ret)
153 return ret;
154
155 /* Initiate the read */
156 out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
157
158 ret = xgmac_wait_until_done(&bus->dev, regs);
159 if (ret)
160 return ret;
161
162 /* Return all Fs if nothing was there */
163 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
164 dev_err(&bus->dev, "MDIO read error\n");
165 return 0xffff;
166 }
167
168 value = in_be32(&regs->mdio_data) & 0xffff;
169 dev_dbg(&bus->dev, "read %04x\n", value);
170
171 return value;
172}
173
174/* Reset the MIIM registers, and wait for the bus to free */
175static int xgmac_mdio_reset(struct mii_bus *bus)
176{
177 struct tgec_mdio_controller __iomem *regs = bus->priv;
178 int ret;
179
180 mutex_lock(&bus->mdio_lock);
181
182 /* Setup the MII Mgmt clock speed */
183 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
184
185 ret = xgmac_wait_until_free(&bus->dev, regs);
186
187 mutex_unlock(&bus->mdio_lock);
188
189 return ret;
190}
191
192static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
193{
194 struct device_node *np = pdev->dev.of_node;
195 struct mii_bus *bus;
196 struct resource res;
197 int ret;
198
199 ret = of_address_to_resource(np, 0, &res);
200 if (ret) {
201 dev_err(&pdev->dev, "could not obtain address\n");
202 return ret;
203 }
204
205 bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
206 if (!bus)
207 return -ENOMEM;
208
209 bus->name = "Freescale XGMAC MDIO Bus";
210 bus->read = xgmac_mdio_read;
211 bus->write = xgmac_mdio_write;
212 bus->reset = xgmac_mdio_reset;
213 bus->irq = bus->priv;
214 bus->parent = &pdev->dev;
215 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
216
217 /* Set the PHY base address */
218 bus->priv = of_iomap(np, 0);
219 if (!bus->priv) {
220 ret = -ENOMEM;
221 goto err_ioremap;
222 }
223
224 ret = of_mdiobus_register(bus, np);
225 if (ret) {
226 dev_err(&pdev->dev, "cannot register MDIO bus\n");
227 goto err_registration;
228 }
229
230 dev_set_drvdata(&pdev->dev, bus);
231
232 return 0;
233
234err_registration:
235 iounmap(bus->priv);
236
237err_ioremap:
238 mdiobus_free(bus);
239
240 return ret;
241}
242
243static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
244{
245 struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
246
247 mdiobus_unregister(bus);
248 iounmap(bus->priv);
249 mdiobus_free(bus);
250
251 return 0;
252}
253
254static struct of_device_id xgmac_mdio_match[] = {
255 {
256 .compatible = "fsl,fman-xmdio",
257 },
258 {},
259};
260MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
261
262static struct platform_driver xgmac_mdio_driver = {
263 .driver = {
264 .name = "fsl-fman_xmdio",
265 .of_match_table = xgmac_mdio_match,
266 },
267 .probe = xgmac_mdio_probe,
268 .remove = xgmac_mdio_remove,
269};
270
271module_platform_driver(xgmac_mdio_driver);
272
273MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
274MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index fed5080a6b62..959faf7388e2 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -150,7 +150,7 @@ config SUN3_82586
150 150
151config ZNET 151config ZNET
152 tristate "Zenith Z-Note support (EXPERIMENTAL)" 152 tristate "Zenith Z-Note support (EXPERIMENTAL)"
153 depends on EXPERIMENTAL && ISA_DMA_API 153 depends on EXPERIMENTAL && ISA_DMA_API && X86
154 ---help--- 154 ---help---
155 The Zenith Z-Note notebook computer has a built-in network 155 The Zenith Z-Note notebook computer has a built-in network
156 (Ethernet) card, and this is the Linux driver for it. Note that the 156 (Ethernet) card, and this is the Linux driver for it. Note that the
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
index ba4e0cea3506..c9479e081b8a 100644
--- a/drivers/net/ethernet/i825xx/znet.c
+++ b/drivers/net/ethernet/i825xx/znet.c
@@ -865,14 +865,14 @@ static void hardware_init(struct net_device *dev)
865 disable_dma(znet->rx_dma); /* reset by an interrupting task. */ 865 disable_dma(znet->rx_dma); /* reset by an interrupting task. */
866 clear_dma_ff(znet->rx_dma); 866 clear_dma_ff(znet->rx_dma);
867 set_dma_mode(znet->rx_dma, DMA_RX_MODE); 867 set_dma_mode(znet->rx_dma, DMA_RX_MODE);
868 set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start); 868 set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
869 set_dma_count(znet->rx_dma, RX_BUF_SIZE); 869 set_dma_count(znet->rx_dma, RX_BUF_SIZE);
870 enable_dma(znet->rx_dma); 870 enable_dma(znet->rx_dma);
871 /* Now set up the Tx channel. */ 871 /* Now set up the Tx channel. */
872 disable_dma(znet->tx_dma); 872 disable_dma(znet->tx_dma);
873 clear_dma_ff(znet->tx_dma); 873 clear_dma_ff(znet->tx_dma);
874 set_dma_mode(znet->tx_dma, DMA_TX_MODE); 874 set_dma_mode(znet->tx_dma, DMA_TX_MODE);
875 set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start); 875 set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
876 set_dma_count(znet->tx_dma, znet->tx_buf_len<<1); 876 set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
877 enable_dma(znet->tx_dma); 877 enable_dma(znet->tx_dma);
878 release_dma_lock(flags); 878 release_dma_lock(flags);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 736a7d987db5..9089d00f1421 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
174 174
175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || 175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
176 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 176 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
177
178 /* MDI-X => 1; MDI => 0 */
179 if ((hw->media_type == e1000_media_type_copper) &&
180 netif_carrier_ok(netdev))
181 ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
182 ETH_TP_MDI_X :
183 ETH_TP_MDI);
184 else
185 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
186
187 if (hw->mdix == AUTO_ALL_MODES)
188 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
189 else
190 ecmd->eth_tp_mdix_ctrl = hw->mdix;
177 return 0; 191 return 0;
178} 192}
179 193
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
183 struct e1000_adapter *adapter = netdev_priv(netdev); 197 struct e1000_adapter *adapter = netdev_priv(netdev);
184 struct e1000_hw *hw = &adapter->hw; 198 struct e1000_hw *hw = &adapter->hw;
185 199
200 /*
201 * MDI setting is only allowed when autoneg enabled because
202 * some hardware doesn't allow MDI setting when speed or
203 * duplex is forced.
204 */
205 if (ecmd->eth_tp_mdix_ctrl) {
206 if (hw->media_type != e1000_media_type_copper)
207 return -EOPNOTSUPP;
208
209 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
210 (ecmd->autoneg != AUTONEG_ENABLE)) {
211 e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
212 return -EINVAL;
213 }
214 }
215
186 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 216 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
187 msleep(1); 217 msleep(1);
188 218
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
199 ecmd->advertising = hw->autoneg_advertised; 229 ecmd->advertising = hw->autoneg_advertised;
200 } else { 230 } else {
201 u32 speed = ethtool_cmd_speed(ecmd); 231 u32 speed = ethtool_cmd_speed(ecmd);
232 /* calling this overrides forced MDI setting */
202 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { 233 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
203 clear_bit(__E1000_RESETTING, &adapter->flags); 234 clear_bit(__E1000_RESETTING, &adapter->flags);
204 return -EINVAL; 235 return -EINVAL;
205 } 236 }
206 } 237 }
207 238
239 /* MDI-X => 2; MDI => 1; Auto => 3 */
240 if (ecmd->eth_tp_mdix_ctrl) {
241 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
242 hw->mdix = AUTO_ALL_MODES;
243 else
244 hw->mdix = ecmd->eth_tp_mdix_ctrl;
245 }
246
208 /* reset the link */ 247 /* reset the link */
209 248
210 if (netif_running(adapter->netdev)) { 249 if (netif_running(adapter->netdev)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f3f9aeb7d1e1..222bfaff4622 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2015 } 2015 }
2016 2016
2017 netdev_reset_queue(adapter->netdev);
2017 size = sizeof(struct e1000_buffer) * tx_ring->count; 2018 size = sizeof(struct e1000_buffer) * tx_ring->count;
2018 memset(tx_ring->buffer_info, 0, size); 2019 memset(tx_ring->buffer_info, 0, size);
2019 2020
@@ -3273,6 +3274,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3273 nr_frags, mss); 3274 nr_frags, mss);
3274 3275
3275 if (count) { 3276 if (count) {
3277 netdev_sent_queue(netdev, skb->len);
3276 skb_tx_timestamp(skb); 3278 skb_tx_timestamp(skb);
3277 3279
3278 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3280 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
@@ -3860,6 +3862,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3860 unsigned int i, eop; 3862 unsigned int i, eop;
3861 unsigned int count = 0; 3863 unsigned int count = 0;
3862 unsigned int total_tx_bytes=0, total_tx_packets=0; 3864 unsigned int total_tx_bytes=0, total_tx_packets=0;
3865 unsigned int bytes_compl = 0, pkts_compl = 0;
3863 3866
3864 i = tx_ring->next_to_clean; 3867 i = tx_ring->next_to_clean;
3865 eop = tx_ring->buffer_info[i].next_to_watch; 3868 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3877,6 +3880,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3877 if (cleaned) { 3880 if (cleaned) {
3878 total_tx_packets += buffer_info->segs; 3881 total_tx_packets += buffer_info->segs;
3879 total_tx_bytes += buffer_info->bytecount; 3882 total_tx_bytes += buffer_info->bytecount;
3883 if (buffer_info->skb) {
3884 bytes_compl += buffer_info->skb->len;
3885 pkts_compl++;
3886 }
3887
3880 } 3888 }
3881 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3889 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3882 tx_desc->upper.data = 0; 3890 tx_desc->upper.data = 0;
@@ -3890,6 +3898,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3890 3898
3891 tx_ring->next_to_clean = i; 3899 tx_ring->next_to_clean = i;
3892 3900
3901 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3902
3893#define TX_WAKE_THRESHOLD 32 3903#define TX_WAKE_THRESHOLD 32
3894 if (unlikely(count && netif_carrier_ok(netdev) && 3904 if (unlikely(count && netif_carrier_ok(netdev) &&
3895 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3905 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
@@ -4950,6 +4960,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4950 default: 4960 default:
4951 goto err_inval; 4961 goto err_inval;
4952 } 4962 }
4963
4964 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4965 hw->mdix = AUTO_ALL_MODES;
4966
4953 return 0; 4967 return 0;
4954 4968
4955err_inval: 4969err_inval:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 080c89093feb..c98586408005 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
653 **/ 653 **/
654static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) 654static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
655{ 655{
656 u16 data = er32(POEMB); 656 u32 data = er32(POEMB);
657 657
658 if (active) 658 if (active)
659 data |= E1000_PHY_CTRL_D0A_LPLU; 659 data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
677 **/ 677 **/
678static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) 678static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
679{ 679{
680 u16 data = er32(POEMB); 680 u32 data = er32(POEMB);
681 681
682 if (!active) { 682 if (!active) {
683 data &= ~E1000_PHY_CTRL_NOND0A_LPLU; 683 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 0349e2478df8..c11ac2756667 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
199 else 199 else
200 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 200 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
201 201
202 if (hw->phy.mdix == AUTO_ALL_MODES)
203 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
204 else
205 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
206
202 return 0; 207 return 0;
203} 208}
204 209
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
241 default: 246 default:
242 goto err_inval; 247 goto err_inval;
243 } 248 }
249
250 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
251 adapter->hw.phy.mdix = AUTO_ALL_MODES;
252
244 return 0; 253 return 0;
245 254
246err_inval: 255err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
264 return -EINVAL; 273 return -EINVAL;
265 } 274 }
266 275
276 /*
277 * MDI setting is only allowed when autoneg enabled because
278 * some hardware doesn't allow MDI setting when speed or
279 * duplex is forced.
280 */
281 if (ecmd->eth_tp_mdix_ctrl) {
282 if (hw->phy.media_type != e1000_media_type_copper)
283 return -EOPNOTSUPP;
284
285 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
286 (ecmd->autoneg != AUTONEG_ENABLE)) {
287 e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
288 return -EINVAL;
289 }
290 }
291
267 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 292 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
268 usleep_range(1000, 2000); 293 usleep_range(1000, 2000);
269 294
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
282 hw->fc.requested_mode = e1000_fc_default; 307 hw->fc.requested_mode = e1000_fc_default;
283 } else { 308 } else {
284 u32 speed = ethtool_cmd_speed(ecmd); 309 u32 speed = ethtool_cmd_speed(ecmd);
310 /* calling this overrides forced MDI setting */
285 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { 311 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
286 clear_bit(__E1000_RESETTING, &adapter->state); 312 clear_bit(__E1000_RESETTING, &adapter->state);
287 return -EINVAL; 313 return -EINVAL;
288 } 314 }
289 } 315 }
290 316
317 /* MDI-X => 2; MDI => 1; Auto => 3 */
318 if (ecmd->eth_tp_mdix_ctrl) {
319 /*
320 * fix up the value for auto (3 => 0) as zero is mapped
321 * internally to auto
322 */
323 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
324 hw->phy.mdix = AUTO_ALL_MODES;
325 else
326 hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
327 }
328
291 /* reset the link */ 329 /* reset the link */
292 330
293 if (netif_running(adapter->netdev)) { 331 if (netif_running(adapter->netdev)) {
294 e1000e_down(adapter); 332 e1000e_down(adapter);
295 e1000e_up(adapter); 333 e1000e_up(adapter);
296 } else { 334 } else
297 e1000e_reset(adapter); 335 e1000e_reset(adapter);
298 }
299 336
300 clear_bit(__E1000_RESETTING, &adapter->state); 337 clear_bit(__E1000_RESETTING, &adapter->state);
301 return 0; 338 return 0;
@@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
1905 return -EINVAL; 1942 return -EINVAL;
1906 1943
1907 if (ec->rx_coalesce_usecs == 4) { 1944 if (ec->rx_coalesce_usecs == 4) {
1908 adapter->itr = adapter->itr_setting = 4; 1945 adapter->itr_setting = 4;
1946 adapter->itr = adapter->itr_setting;
1909 } else if (ec->rx_coalesce_usecs <= 3) { 1947 } else if (ec->rx_coalesce_usecs <= 3) {
1910 adapter->itr = 20000; 1948 adapter->itr = 20000;
1911 adapter->itr_setting = ec->rx_coalesce_usecs; 1949 adapter->itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3f0223ac4c7c..fb659dd8db03 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION 59#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3446 3446
3447 /* 3447 /*
3448 * if short on Rx space, Rx wins and must trump Tx 3448 * if short on Rx space, Rx wins and must trump Tx
3449 * adjustment or use Early Receive if available 3449 * adjustment
3450 */ 3450 */
3451 if (pba < min_rx_space) 3451 if (pba < min_rx_space)
3452 pba = min_rx_space; 3452 pba = min_rx_space;
@@ -3755,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3755 e_dbg("icr is %08X\n", icr); 3755 e_dbg("icr is %08X\n", icr);
3756 if (icr & E1000_ICR_RXSEQ) { 3756 if (icr & E1000_ICR_RXSEQ) {
3757 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3757 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3758 /*
3759 * Force memory writes to complete before acknowledging the
3760 * interrupt is handled.
3761 */
3758 wmb(); 3762 wmb();
3759 } 3763 }
3760 3764
@@ -3796,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3796 goto msi_test_failed; 3800 goto msi_test_failed;
3797 } 3801 }
3798 3802
3803 /*
3804 * Force memory writes to complete before enabling and firing an
3805 * interrupt.
3806 */
3799 wmb(); 3807 wmb();
3800 3808
3801 e1000_irq_enable(adapter); 3809 e1000_irq_enable(adapter);
@@ -3807,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3807 3815
3808 e1000_irq_disable(adapter); 3816 e1000_irq_disable(adapter);
3809 3817
3810 rmb(); 3818 rmb(); /* read flags after interrupt has been fired */
3811 3819
3812 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3820 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3813 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3821 adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4670,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4670 struct e1000_buffer *buffer_info; 4678 struct e1000_buffer *buffer_info;
4671 unsigned int i; 4679 unsigned int i;
4672 u32 cmd_length = 0; 4680 u32 cmd_length = 0;
4673 u16 ipcse = 0, tucse, mss; 4681 u16 ipcse = 0, mss;
4674 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4682 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4675 4683
4676 if (!skb_is_gso(skb)) 4684 if (!skb_is_gso(skb))
@@ -4704,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4704 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4712 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4705 tucss = skb_transport_offset(skb); 4713 tucss = skb_transport_offset(skb);
4706 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4714 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4707 tucse = 0;
4708 4715
4709 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4716 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4710 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4717 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4718,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4718 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4725 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4719 context_desc->upper_setup.tcp_fields.tucss = tucss; 4726 context_desc->upper_setup.tcp_fields.tucss = tucss;
4720 context_desc->upper_setup.tcp_fields.tucso = tucso; 4727 context_desc->upper_setup.tcp_fields.tucso = tucso;
4721 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4728 context_desc->upper_setup.tcp_fields.tucse = 0;
4722 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4729 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4723 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4730 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4724 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4731 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b860d4f7ea2a..fc62a3f3a5be 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
84#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 84#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
85 85
86/* I82577 PHY Control 2 */ 86/* I82577 PHY Control 2 */
87#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 87#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
88#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 88#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
89#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
89 90
90/* I82577 PHY Diagnostics Status */ 91/* I82577 PHY Diagnostics Status */
91#define I82577_DSTATUS_CABLE_LENGTH 0x03FC 92#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
702 if (ret_val) 703 if (ret_val)
703 return ret_val; 704 return ret_val;
704 705
706 /* Set MDI/MDIX mode */
707 ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
708 if (ret_val)
709 return ret_val;
710 phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
711 /*
712 * Options:
713 * 0 - Auto (default)
714 * 1 - MDI mode
715 * 2 - MDI-X mode
716 */
717 switch (hw->phy.mdix) {
718 case 1:
719 break;
720 case 2:
721 phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
722 break;
723 case 0:
724 default:
725 phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
726 break;
727 }
728 ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
729 if (ret_val)
730 return ret_val;
731
705 return e1000_set_master_slave_mode(hw); 732 return e1000_set_master_slave_mode(hw);
706} 733}
707 734
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ba994fb4cec6..ca4641e2f748 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2223,11 +2223,10 @@ out:
2223s32 igb_set_eee_i350(struct e1000_hw *hw) 2223s32 igb_set_eee_i350(struct e1000_hw *hw)
2224{ 2224{
2225 s32 ret_val = 0; 2225 s32 ret_val = 0;
2226 u32 ipcnfg, eeer, ctrl_ext; 2226 u32 ipcnfg, eeer;
2227 2227
2228 ctrl_ext = rd32(E1000_CTRL_EXT); 2228 if ((hw->mac.type < e1000_i350) ||
2229 if ((hw->mac.type != e1000_i350) || 2229 (hw->phy.media_type != e1000_media_type_copper))
2230 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2231 goto out; 2230 goto out;
2232 ipcnfg = rd32(E1000_IPCNFG); 2231 ipcnfg = rd32(E1000_IPCNFG);
2233 eeer = rd32(E1000_EEER); 2232 eeer = rd32(E1000_EEER);
@@ -2240,6 +2239,14 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2240 E1000_EEER_RX_LPI_EN | 2239 E1000_EEER_RX_LPI_EN |
2241 E1000_EEER_LPI_FC); 2240 E1000_EEER_LPI_FC);
2242 2241
2242 /* keep the LPI clock running before EEE is enabled */
2243 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
2244 u32 eee_su;
2245 eee_su = rd32(E1000_EEE_SU);
2246 eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
2247 wr32(E1000_EEE_SU, eee_su);
2248 }
2249
2243 } else { 2250 } else {
2244 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2251 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2245 E1000_IPCNFG_EEE_100M_AN); 2252 E1000_IPCNFG_EEE_100M_AN);
@@ -2249,6 +2256,8 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2249 } 2256 }
2250 wr32(E1000_IPCNFG, ipcnfg); 2257 wr32(E1000_IPCNFG, ipcnfg);
2251 wr32(E1000_EEER, eeer); 2258 wr32(E1000_EEER, eeer);
2259 rd32(E1000_IPCNFG);
2260 rd32(E1000_EEER);
2252out: 2261out:
2253 2262
2254 return ret_val; 2263 return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ec7e4fe3e3ee..de4b41ec3c40 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -322,6 +322,9 @@
322#define E1000_FCRTC_RTH_COAL_SHIFT 4 322#define E1000_FCRTC_RTH_COAL_SHIFT 4
323#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ 323#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
324 324
325/* Timestamp in Rx buffer */
326#define E1000_RXPBS_CFG_TS_EN 0x80000000
327
325/* SerDes Control */ 328/* SerDes Control */
326#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 329#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
327 330
@@ -360,6 +363,7 @@
360#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 363#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
361#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 364#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
362#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ 365#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
366#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
363#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ 367#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
364/* If this bit asserted, the driver should claim the interrupt */ 368/* If this bit asserted, the driver should claim the interrupt */
365#define E1000_ICR_INT_ASSERTED 0x80000000 369#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -399,6 +403,7 @@
399#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 403#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
400#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 404#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
401#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ 405#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
406#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
402#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 407#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
403#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 408#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
404#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 409#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
@@ -510,6 +515,9 @@
510 515
511#define E1000_TIMINCA_16NS_SHIFT 24 516#define E1000_TIMINCA_16NS_SHIFT 24
512 517
518#define E1000_TSICR_TXTS 0x00000002
519#define E1000_TSIM_TXTS 0x00000002
520
513#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ 521#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
514#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ 522#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
515#define E1000_MDICNFG_PHY_MASK 0x03E00000 523#define E1000_MDICNFG_PHY_MASK 0x03E00000
@@ -849,8 +857,9 @@
849#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ 857#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
850#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ 858#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
851#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ 859#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
852#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ 860#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
853#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ 861#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
862#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
854 863
855/* SerDes Control */ 864/* SerDes Control */
856#define E1000_GEN_CTL_READY 0x80000000 865#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7be98b6f1052..3404bc79f4ca 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
464 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; 464 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
465 465
466 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); 466 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
467 if (ret_val)
468 goto out;
469
470 /* Set MDI/MDIX mode */
471 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
472 if (ret_val)
473 goto out;
474 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
475 /*
476 * Options:
477 * 0 - Auto (default)
478 * 1 - MDI mode
479 * 2 - MDI-X mode
480 */
481 switch (hw->phy.mdix) {
482 case 1:
483 break;
484 case 2:
485 phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
486 break;
487 case 0:
488 default:
489 phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
490 break;
491 }
492 ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
467 493
468out: 494out:
469 return ret_val; 495 return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2246 if (ret_val) 2272 if (ret_val)
2247 goto out; 2273 goto out;
2248 2274
2249 phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; 2275 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
2250 phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
2251 2276
2252 ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); 2277 ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
2253 if (ret_val) 2278 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 34e40619f16b..6ac3299bfcb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -111,8 +111,9 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
111#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 111#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
112 112
113/* I82580 PHY Control 2 */ 113/* I82580 PHY Control 2 */
114#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 114#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200
115#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 115#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
116#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600
116 117
117/* I82580 PHY Diagnostics Status */ 118/* I82580 PHY Diagnostics Status */
118#define I82580_DSTATUS_CABLE_LENGTH 0x03FC 119#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 28394bea5253..e5db48594e8a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -91,6 +91,8 @@
91#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ 91#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
92#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ 92#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
93#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ 93#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
94#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
95#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
94 96
95/* Filtering Registers */ 97/* Filtering Registers */
96#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) 98#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -347,6 +349,7 @@
347/* Energy Efficient Ethernet "EEE" register */ 349/* Energy Efficient Ethernet "EEE" register */
348#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ 350#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
349#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ 351#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
352#define E1000_EEE_SU 0X0E34 /* EEE Setup */
350 353
351/* Thermal Sensor Register */ 354/* Thermal Sensor Register */
352#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ 355#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9e572dd29ab2..8aad230c0592 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,9 +34,11 @@
34#include "e1000_mac.h" 34#include "e1000_mac.h"
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#ifdef CONFIG_IGB_PTP
37#include <linux/clocksource.h> 38#include <linux/clocksource.h>
38#include <linux/net_tstamp.h> 39#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h> 40#include <linux/ptp_clock_kernel.h>
41#endif /* CONFIG_IGB_PTP */
40#include <linux/bitops.h> 42#include <linux/bitops.h>
41#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
42 44
@@ -99,7 +101,6 @@ struct vf_data_storage {
99 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 101 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
100 u16 pf_qos; 102 u16 pf_qos;
101 u16 tx_rate; 103 u16 tx_rate;
102 struct pci_dev *vfdev;
103}; 104};
104 105
105#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 106#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -131,9 +132,9 @@ struct vf_data_storage {
131#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 132#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
132 133
133/* Supported Rx Buffer Sizes */ 134/* Supported Rx Buffer Sizes */
134#define IGB_RXBUFFER_512 512 135#define IGB_RXBUFFER_256 256
135#define IGB_RXBUFFER_16384 16384 136#define IGB_RXBUFFER_16384 16384
136#define IGB_RX_HDR_LEN IGB_RXBUFFER_512 137#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
137 138
138/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 139/* How many Tx Descriptors do we need to call netif_wake_queue ? */
139#define IGB_TX_QUEUE_WAKE 16 140#define IGB_TX_QUEUE_WAKE 16
@@ -167,8 +168,8 @@ struct igb_tx_buffer {
167 unsigned int bytecount; 168 unsigned int bytecount;
168 u16 gso_segs; 169 u16 gso_segs;
169 __be16 protocol; 170 __be16 protocol;
170 dma_addr_t dma; 171 DEFINE_DMA_UNMAP_ADDR(dma);
171 u32 length; 172 DEFINE_DMA_UNMAP_LEN(len);
172 u32 tx_flags; 173 u32 tx_flags;
173}; 174};
174 175
@@ -212,7 +213,6 @@ struct igb_q_vector {
212 struct igb_ring_container rx, tx; 213 struct igb_ring_container rx, tx;
213 214
214 struct napi_struct napi; 215 struct napi_struct napi;
215 int numa_node;
216 216
217 u16 itr_val; 217 u16 itr_val;
218 u8 set_itr; 218 u8 set_itr;
@@ -257,7 +257,6 @@ struct igb_ring {
257 }; 257 };
258 /* Items past this point are only used during ring alloc / free */ 258 /* Items past this point are only used during ring alloc / free */
259 dma_addr_t dma; /* phys address of the ring */ 259 dma_addr_t dma; /* phys address of the ring */
260 int numa_node; /* node to alloc ring memory on */
261}; 260};
262 261
263enum e1000_ring_flags_t { 262enum e1000_ring_flags_t {
@@ -342,7 +341,6 @@ struct igb_adapter {
342 341
343 /* OS defined structs */ 342 /* OS defined structs */
344 struct pci_dev *pdev; 343 struct pci_dev *pdev;
345 struct hwtstamp_config hwtstamp_config;
346 344
347 spinlock_t stats64_lock; 345 spinlock_t stats64_lock;
348 struct rtnl_link_stats64 stats64; 346 struct rtnl_link_stats64 stats64;
@@ -373,15 +371,19 @@ struct igb_adapter {
373 int vf_rate_link_speed; 371 int vf_rate_link_speed;
374 u32 rss_queues; 372 u32 rss_queues;
375 u32 wvbr; 373 u32 wvbr;
376 int node;
377 u32 *shadow_vfta; 374 u32 *shadow_vfta;
378 375
376#ifdef CONFIG_IGB_PTP
379 struct ptp_clock *ptp_clock; 377 struct ptp_clock *ptp_clock;
380 struct ptp_clock_info caps; 378 struct ptp_clock_info ptp_caps;
381 struct delayed_work overflow_work; 379 struct delayed_work ptp_overflow_work;
380 struct work_struct ptp_tx_work;
381 struct sk_buff *ptp_tx_skb;
382 spinlock_t tmreg_lock; 382 spinlock_t tmreg_lock;
383 struct cyclecounter cc; 383 struct cyclecounter cc;
384 struct timecounter tc; 384 struct timecounter tc;
385#endif /* CONFIG_IGB_PTP */
386
385 char fw_version[32]; 387 char fw_version[32];
386}; 388};
387 389
@@ -390,6 +392,7 @@ struct igb_adapter {
390#define IGB_FLAG_QUAD_PORT_A (1 << 2) 392#define IGB_FLAG_QUAD_PORT_A (1 << 2)
391#define IGB_FLAG_QUEUE_PAIRS (1 << 3) 393#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
392#define IGB_FLAG_DMAC (1 << 4) 394#define IGB_FLAG_DMAC (1 << 4)
395#define IGB_FLAG_PTP (1 << 5)
393 396
394/* DMA Coalescing defines */ 397/* DMA Coalescing defines */
395#define IGB_MIN_TXPBSIZE 20408 398#define IGB_MIN_TXPBSIZE 20408
@@ -435,13 +438,17 @@ extern void igb_power_up_link(struct igb_adapter *);
435extern void igb_set_fw_version(struct igb_adapter *); 438extern void igb_set_fw_version(struct igb_adapter *);
436#ifdef CONFIG_IGB_PTP 439#ifdef CONFIG_IGB_PTP
437extern void igb_ptp_init(struct igb_adapter *adapter); 440extern void igb_ptp_init(struct igb_adapter *adapter);
438extern void igb_ptp_remove(struct igb_adapter *adapter); 441extern void igb_ptp_stop(struct igb_adapter *adapter);
439 442extern void igb_ptp_reset(struct igb_adapter *adapter);
440extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter, 443extern void igb_ptp_tx_work(struct work_struct *work);
441 struct skb_shared_hwtstamps *hwtstamps, 444extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
442 u64 systim); 445extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
446 union e1000_adv_rx_desc *rx_desc,
447 struct sk_buff *skb);
448extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
449 struct ifreq *ifr, int cmd);
450#endif /* CONFIG_IGB_PTP */
443 451
444#endif
445static inline s32 igb_reset_phy(struct e1000_hw *hw) 452static inline s32 igb_reset_phy(struct e1000_hw *hw)
446{ 453{
447 if (hw->phy.ops.reset) 454 if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 70591117051b..2ea012849825 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
148 SUPPORTED_100baseT_Full | 148 SUPPORTED_100baseT_Full |
149 SUPPORTED_1000baseT_Full| 149 SUPPORTED_1000baseT_Full|
150 SUPPORTED_Autoneg | 150 SUPPORTED_Autoneg |
151 SUPPORTED_TP); 151 SUPPORTED_TP |
152 ecmd->advertising = (ADVERTISED_TP | 152 SUPPORTED_Pause);
153 ADVERTISED_Pause); 153 ecmd->advertising = ADVERTISED_TP;
154 154
155 if (hw->mac.autoneg == 1) { 155 if (hw->mac.autoneg == 1) {
156 ecmd->advertising |= ADVERTISED_Autoneg; 156 ecmd->advertising |= ADVERTISED_Autoneg;
@@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
158 ecmd->advertising |= hw->phy.autoneg_advertised; 158 ecmd->advertising |= hw->phy.autoneg_advertised;
159 } 159 }
160 160
161 if (hw->mac.autoneg != 1)
162 ecmd->advertising &= ~(ADVERTISED_Pause |
163 ADVERTISED_Asym_Pause);
164
165 if (hw->fc.requested_mode == e1000_fc_full)
166 ecmd->advertising |= ADVERTISED_Pause;
167 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
168 ecmd->advertising |= (ADVERTISED_Pause |
169 ADVERTISED_Asym_Pause);
170 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
171 ecmd->advertising |= ADVERTISED_Asym_Pause;
172 else
173 ecmd->advertising &= ~(ADVERTISED_Pause |
174 ADVERTISED_Asym_Pause);
175
161 ecmd->port = PORT_TP; 176 ecmd->port = PORT_TP;
162 ecmd->phy_address = hw->phy.addr; 177 ecmd->phy_address = hw->phy.addr;
163 } else { 178 } else {
@@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
198 } 213 }
199 214
200 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 215 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
216
217 /* MDI-X => 2; MDI =>1; Invalid =>0 */
218 if (hw->phy.media_type == e1000_media_type_copper)
219 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
220 ETH_TP_MDI;
221 else
222 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
223
224 if (hw->phy.mdix == AUTO_ALL_MODES)
225 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
226 else
227 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
228
201 return 0; 229 return 0;
202} 230}
203 231
@@ -214,6 +242,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
214 return -EINVAL; 242 return -EINVAL;
215 } 243 }
216 244
245 /*
246 * MDI setting is only allowed when autoneg enabled because
247 * some hardware doesn't allow MDI setting when speed or
248 * duplex is forced.
249 */
250 if (ecmd->eth_tp_mdix_ctrl) {
251 if (hw->phy.media_type != e1000_media_type_copper)
252 return -EOPNOTSUPP;
253
254 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
255 (ecmd->autoneg != AUTONEG_ENABLE)) {
256 dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
257 return -EINVAL;
258 }
259 }
260
217 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 261 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
218 msleep(1); 262 msleep(1);
219 263
@@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
227 hw->fc.requested_mode = e1000_fc_default; 271 hw->fc.requested_mode = e1000_fc_default;
228 } else { 272 } else {
229 u32 speed = ethtool_cmd_speed(ecmd); 273 u32 speed = ethtool_cmd_speed(ecmd);
274 /* calling this overrides forced MDI setting */
230 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { 275 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
231 clear_bit(__IGB_RESETTING, &adapter->state); 276 clear_bit(__IGB_RESETTING, &adapter->state);
232 return -EINVAL; 277 return -EINVAL;
233 } 278 }
234 } 279 }
235 280
281 /* MDI-X => 2; MDI => 1; Auto => 3 */
282 if (ecmd->eth_tp_mdix_ctrl) {
283 /*
284 * fix up the value for auto (3 => 0) as zero is mapped
285 * internally to auto
286 */
287 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
288 hw->phy.mdix = AUTO_ALL_MODES;
289 else
290 hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
291 }
292
236 /* reset the link */ 293 /* reset the link */
237 if (netif_running(adapter->netdev)) { 294 if (netif_running(adapter->netdev)) {
238 igb_down(adapter); 295 igb_down(adapter);
@@ -1469,33 +1526,22 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1469{ 1526{
1470 struct e1000_hw *hw = &adapter->hw; 1527 struct e1000_hw *hw = &adapter->hw;
1471 u32 ctrl_reg = 0; 1528 u32 ctrl_reg = 0;
1472 u16 phy_reg = 0;
1473 1529
1474 hw->mac.autoneg = false; 1530 hw->mac.autoneg = false;
1475 1531
1476 switch (hw->phy.type) { 1532 if (hw->phy.type == e1000_phy_m88) {
1477 case e1000_phy_m88: 1533 if (hw->phy.id != I210_I_PHY_ID) {
1478 /* Auto-MDI/MDIX Off */ 1534 /* Auto-MDI/MDIX Off */
1479 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1535 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1480 /* reset to update Auto-MDI/MDIX */ 1536 /* reset to update Auto-MDI/MDIX */
1481 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1537 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1482 /* autoneg off */ 1538 /* autoneg off */
1483 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1539 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1484 break; 1540 } else {
1485 case e1000_phy_82580: 1541 /* force 1000, set loopback */
1486 /* enable MII loopback */ 1542 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1487 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); 1543 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1488 break; 1544 }
1489 case e1000_phy_i210:
1490 /* set loopback speed in PHY */
1491 igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
1492 &phy_reg);
1493 phy_reg |= GS40G_MAC_SPEED_1G;
1494 igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
1495 phy_reg);
1496 ctrl_reg = rd32(E1000_CTRL_EXT);
1497 default:
1498 break;
1499 } 1545 }
1500 1546
1501 /* add small delay to avoid loopback test failure */ 1547 /* add small delay to avoid loopback test failure */
@@ -1513,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1513 E1000_CTRL_FD | /* Force Duplex to FULL */ 1559 E1000_CTRL_FD | /* Force Duplex to FULL */
1514 E1000_CTRL_SLU); /* Set link up enable bit */ 1560 E1000_CTRL_SLU); /* Set link up enable bit */
1515 1561
1516 if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) 1562 if (hw->phy.type == e1000_phy_m88)
1517 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1563 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1518 1564
1519 wr32(E1000_CTRL, ctrl_reg); 1565 wr32(E1000_CTRL, ctrl_reg);
@@ -1521,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1521 /* Disable the receiver on the PHY so when a cable is plugged in, the 1567 /* Disable the receiver on the PHY so when a cable is plugged in, the
1522 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1568 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1523 */ 1569 */
1524 if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) 1570 if (hw->phy.type == e1000_phy_m88)
1525 igb_phy_disable_receiver(adapter); 1571 igb_phy_disable_receiver(adapter);
1526 1572
1527 udelay(500); 1573 mdelay(500);
1528
1529 return 0; 1574 return 0;
1530} 1575}
1531 1576
@@ -1785,13 +1830,6 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1785 *data = 0; 1830 *data = 0;
1786 goto out; 1831 goto out;
1787 } 1832 }
1788 if ((adapter->hw.mac.type == e1000_i210)
1789 || (adapter->hw.mac.type == e1000_i211)) {
1790 dev_err(&adapter->pdev->dev,
1791 "Loopback test not supported on this part at this time.\n");
1792 *data = 0;
1793 goto out;
1794 }
1795 *data = igb_setup_desc_rings(adapter); 1833 *data = igb_setup_desc_rings(adapter);
1796 if (*data) 1834 if (*data)
1797 goto out; 1835 goto out;
@@ -2257,6 +2295,54 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2257 } 2295 }
2258} 2296}
2259 2297
2298static int igb_get_ts_info(struct net_device *dev,
2299 struct ethtool_ts_info *info)
2300{
2301 struct igb_adapter *adapter = netdev_priv(dev);
2302
2303 switch (adapter->hw.mac.type) {
2304#ifdef CONFIG_IGB_PTP
2305 case e1000_82576:
2306 case e1000_82580:
2307 case e1000_i350:
2308 case e1000_i210:
2309 case e1000_i211:
2310 info->so_timestamping =
2311 SOF_TIMESTAMPING_TX_HARDWARE |
2312 SOF_TIMESTAMPING_RX_HARDWARE |
2313 SOF_TIMESTAMPING_RAW_HARDWARE;
2314
2315 if (adapter->ptp_clock)
2316 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2317 else
2318 info->phc_index = -1;
2319
2320 info->tx_types =
2321 (1 << HWTSTAMP_TX_OFF) |
2322 (1 << HWTSTAMP_TX_ON);
2323
2324 info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
2325
2326 /* 82576 does not support timestamping all packets. */
2327 if (adapter->hw.mac.type >= e1000_82580)
2328 info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
2329 else
2330 info->rx_filters |=
2331 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2332 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2333 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2334 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2335 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2336 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2337 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2338
2339 return 0;
2340#endif /* CONFIG_IGB_PTP */
2341 default:
2342 return -EOPNOTSUPP;
2343 }
2344}
2345
2260static int igb_ethtool_begin(struct net_device *netdev) 2346static int igb_ethtool_begin(struct net_device *netdev)
2261{ 2347{
2262 struct igb_adapter *adapter = netdev_priv(netdev); 2348 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2270,38 +2356,6 @@ static void igb_ethtool_complete(struct net_device *netdev)
2270 pm_runtime_put(&adapter->pdev->dev); 2356 pm_runtime_put(&adapter->pdev->dev);
2271} 2357}
2272 2358
2273#ifdef CONFIG_IGB_PTP
2274static int igb_ethtool_get_ts_info(struct net_device *dev,
2275 struct ethtool_ts_info *info)
2276{
2277 struct igb_adapter *adapter = netdev_priv(dev);
2278
2279 info->so_timestamping =
2280 SOF_TIMESTAMPING_TX_HARDWARE |
2281 SOF_TIMESTAMPING_RX_HARDWARE |
2282 SOF_TIMESTAMPING_RAW_HARDWARE;
2283
2284 if (adapter->ptp_clock)
2285 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2286 else
2287 info->phc_index = -1;
2288
2289 info->tx_types =
2290 (1 << HWTSTAMP_TX_OFF) |
2291 (1 << HWTSTAMP_TX_ON);
2292
2293 info->rx_filters =
2294 (1 << HWTSTAMP_FILTER_NONE) |
2295 (1 << HWTSTAMP_FILTER_ALL) |
2296 (1 << HWTSTAMP_FILTER_SOME) |
2297 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2298 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2299 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2300
2301 return 0;
2302}
2303
2304#endif
2305static const struct ethtool_ops igb_ethtool_ops = { 2359static const struct ethtool_ops igb_ethtool_ops = {
2306 .get_settings = igb_get_settings, 2360 .get_settings = igb_get_settings,
2307 .set_settings = igb_set_settings, 2361 .set_settings = igb_set_settings,
@@ -2328,11 +2382,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
2328 .get_ethtool_stats = igb_get_ethtool_stats, 2382 .get_ethtool_stats = igb_get_ethtool_stats,
2329 .get_coalesce = igb_get_coalesce, 2383 .get_coalesce = igb_get_coalesce,
2330 .set_coalesce = igb_set_coalesce, 2384 .set_coalesce = igb_set_coalesce,
2385 .get_ts_info = igb_get_ts_info,
2331 .begin = igb_ethtool_begin, 2386 .begin = igb_ethtool_begin,
2332 .complete = igb_ethtool_complete, 2387 .complete = igb_ethtool_complete,
2333#ifdef CONFIG_IGB_PTP
2334 .get_ts_info = igb_ethtool_get_ts_info,
2335#endif
2336}; 2388};
2337 2389
2338void igb_set_ethtool_ops(struct net_device *netdev) 2390void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f88c822e57a6..e1ceb37ef12e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
172 172
173#ifdef CONFIG_PCI_IOV 173#ifdef CONFIG_PCI_IOV
174static int igb_vf_configure(struct igb_adapter *adapter, int vf); 174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
175static int igb_find_enabled_vfs(struct igb_adapter *adapter); 175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
176static int igb_check_vf_assignment(struct igb_adapter *adapter);
177#endif 176#endif
178 177
179#ifdef CONFIG_PM 178#ifdef CONFIG_PM
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
404 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
405 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", 404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
406 n, tx_ring->next_to_use, tx_ring->next_to_clean, 405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
407 (u64)buffer_info->dma, 406 (u64)dma_unmap_addr(buffer_info, dma),
408 buffer_info->length, 407 dma_unmap_len(buffer_info, len),
409 buffer_info->next_to_watch, 408 buffer_info->next_to_watch,
410 (u64)buffer_info->time_stamp); 409 (u64)buffer_info->time_stamp);
411 } 410 }
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
456 " %04X %p %016llX %p%s\n", i, 455 " %04X %p %016llX %p%s\n", i,
457 le64_to_cpu(u0->a), 456 le64_to_cpu(u0->a),
458 le64_to_cpu(u0->b), 457 le64_to_cpu(u0->b),
459 (u64)buffer_info->dma, 458 (u64)dma_unmap_addr(buffer_info, dma),
460 buffer_info->length, 459 dma_unmap_len(buffer_info, len),
461 buffer_info->next_to_watch, 460 buffer_info->next_to_watch,
462 (u64)buffer_info->time_stamp, 461 (u64)buffer_info->time_stamp,
463 buffer_info->skb, next_desc); 462 buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
466 print_hex_dump(KERN_INFO, "", 465 print_hex_dump(KERN_INFO, "",
467 DUMP_PREFIX_ADDRESS, 466 DUMP_PREFIX_ADDRESS,
468 16, 1, buffer_info->skb->data, 467 16, 1, buffer_info->skb->data,
469 buffer_info->length, true); 468 dma_unmap_len(buffer_info, len),
469 true);
470 } 470 }
471 } 471 }
472 472
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
683{ 683{
684 struct igb_ring *ring; 684 struct igb_ring *ring;
685 int i; 685 int i;
686 int orig_node = adapter->node;
687 686
688 for (i = 0; i < adapter->num_tx_queues; i++) { 687 for (i = 0; i < adapter->num_tx_queues; i++) {
689 if (orig_node == -1) { 688 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
690 int cur_node = next_online_node(adapter->node);
691 if (cur_node == MAX_NUMNODES)
692 cur_node = first_online_node;
693 adapter->node = cur_node;
694 }
695 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
696 adapter->node);
697 if (!ring)
698 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
699 if (!ring) 689 if (!ring)
700 goto err; 690 goto err;
701 ring->count = adapter->tx_ring_count; 691 ring->count = adapter->tx_ring_count;
702 ring->queue_index = i; 692 ring->queue_index = i;
703 ring->dev = &adapter->pdev->dev; 693 ring->dev = &adapter->pdev->dev;
704 ring->netdev = adapter->netdev; 694 ring->netdev = adapter->netdev;
705 ring->numa_node = adapter->node;
706 /* For 82575, context index must be unique per ring. */ 695 /* For 82575, context index must be unique per ring. */
707 if (adapter->hw.mac.type == e1000_82575) 696 if (adapter->hw.mac.type == e1000_82575)
708 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); 697 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
709 adapter->tx_ring[i] = ring; 698 adapter->tx_ring[i] = ring;
710 } 699 }
711 /* Restore the adapter's original node */
712 adapter->node = orig_node;
713 700
714 for (i = 0; i < adapter->num_rx_queues; i++) { 701 for (i = 0; i < adapter->num_rx_queues; i++) {
715 if (orig_node == -1) { 702 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
716 int cur_node = next_online_node(adapter->node);
717 if (cur_node == MAX_NUMNODES)
718 cur_node = first_online_node;
719 adapter->node = cur_node;
720 }
721 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
722 adapter->node);
723 if (!ring)
724 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
725 if (!ring) 703 if (!ring)
726 goto err; 704 goto err;
727 ring->count = adapter->rx_ring_count; 705 ring->count = adapter->rx_ring_count;
728 ring->queue_index = i; 706 ring->queue_index = i;
729 ring->dev = &adapter->pdev->dev; 707 ring->dev = &adapter->pdev->dev;
730 ring->netdev = adapter->netdev; 708 ring->netdev = adapter->netdev;
731 ring->numa_node = adapter->node;
732 /* set flag indicating ring supports SCTP checksum offload */ 709 /* set flag indicating ring supports SCTP checksum offload */
733 if (adapter->hw.mac.type >= e1000_82576) 710 if (adapter->hw.mac.type >= e1000_82576)
734 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 711 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
742 719
743 adapter->rx_ring[i] = ring; 720 adapter->rx_ring[i] = ring;
744 } 721 }
745 /* Restore the adapter's original node */
746 adapter->node = orig_node;
747 722
748 igb_cache_ring_register(adapter); 723 igb_cache_ring_register(adapter);
749 724
750 return 0; 725 return 0;
751 726
752err: 727err:
753 /* Restore the adapter's original node */
754 adapter->node = orig_node;
755 igb_free_queues(adapter); 728 igb_free_queues(adapter);
756 729
757 return -ENOMEM; 730 return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1117 struct igb_q_vector *q_vector; 1090 struct igb_q_vector *q_vector;
1118 struct e1000_hw *hw = &adapter->hw; 1091 struct e1000_hw *hw = &adapter->hw;
1119 int v_idx; 1092 int v_idx;
1120 int orig_node = adapter->node;
1121 1093
1122 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 1094 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1123 if ((adapter->num_q_vectors == (adapter->num_rx_queues + 1095 q_vector = kzalloc(sizeof(struct igb_q_vector),
1124 adapter->num_tx_queues)) && 1096 GFP_KERNEL);
1125 (adapter->num_rx_queues == v_idx))
1126 adapter->node = orig_node;
1127 if (orig_node == -1) {
1128 int cur_node = next_online_node(adapter->node);
1129 if (cur_node == MAX_NUMNODES)
1130 cur_node = first_online_node;
1131 adapter->node = cur_node;
1132 }
1133 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1134 adapter->node);
1135 if (!q_vector)
1136 q_vector = kzalloc(sizeof(struct igb_q_vector),
1137 GFP_KERNEL);
1138 if (!q_vector) 1097 if (!q_vector)
1139 goto err_out; 1098 goto err_out;
1140 q_vector->adapter = adapter; 1099 q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1143 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 1102 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1144 adapter->q_vector[v_idx] = q_vector; 1103 adapter->q_vector[v_idx] = q_vector;
1145 } 1104 }
1146 /* Restore the adapter's original node */
1147 adapter->node = orig_node;
1148 1105
1149 return 0; 1106 return 0;
1150 1107
1151err_out: 1108err_out:
1152 /* Restore the adapter's original node */
1153 adapter->node = orig_node;
1154 igb_free_q_vectors(adapter); 1109 igb_free_q_vectors(adapter);
1155 return -ENOMEM; 1110 return -ENOMEM;
1156} 1111}
@@ -1751,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter)
1751 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1706 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1752 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1707 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1753 1708
1709#ifdef CONFIG_IGB_PTP
1710 /* Re-enable PTP, where applicable. */
1711 igb_ptp_reset(adapter);
1712#endif /* CONFIG_IGB_PTP */
1713
1754 igb_get_phy_info(hw); 1714 igb_get_phy_info(hw);
1755} 1715}
1756 1716
@@ -2180,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2180 } 2140 }
2181 2141
2182#endif 2142#endif
2143
2183#ifdef CONFIG_IGB_PTP 2144#ifdef CONFIG_IGB_PTP
2184 /* do hw tstamp init after resetting */ 2145 /* do hw tstamp init after resetting */
2185 igb_ptp_init(adapter); 2146 igb_ptp_init(adapter);
2147#endif /* CONFIG_IGB_PTP */
2186 2148
2187#endif
2188 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2149 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2189 /* print bus type/speed/width info */ 2150 /* print bus type/speed/width info */
2190 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2151 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2259,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2259 2220
2260 pm_runtime_get_noresume(&pdev->dev); 2221 pm_runtime_get_noresume(&pdev->dev);
2261#ifdef CONFIG_IGB_PTP 2222#ifdef CONFIG_IGB_PTP
2262 igb_ptp_remove(adapter); 2223 igb_ptp_stop(adapter);
2224#endif /* CONFIG_IGB_PTP */
2263 2225
2264#endif
2265 /* 2226 /*
2266 * The watchdog timer may be rescheduled, so explicitly 2227 * The watchdog timer may be rescheduled, so explicitly
2267 * disable watchdog from being rescheduled. 2228 * disable watchdog from being rescheduled.
@@ -2294,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2294 /* reclaim resources allocated to VFs */ 2255 /* reclaim resources allocated to VFs */
2295 if (adapter->vf_data) { 2256 if (adapter->vf_data) {
2296 /* disable iov and allow time for transactions to clear */ 2257 /* disable iov and allow time for transactions to clear */
2297 if (!igb_check_vf_assignment(adapter)) { 2258 if (igb_vfs_are_assigned(adapter)) {
2259 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2260 } else {
2298 pci_disable_sriov(pdev); 2261 pci_disable_sriov(pdev);
2299 msleep(500); 2262 msleep(500);
2300 } else {
2301 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2302 } 2263 }
2303 2264
2304 kfree(adapter->vf_data); 2265 kfree(adapter->vf_data);
@@ -2338,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2338#ifdef CONFIG_PCI_IOV 2299#ifdef CONFIG_PCI_IOV
2339 struct pci_dev *pdev = adapter->pdev; 2300 struct pci_dev *pdev = adapter->pdev;
2340 struct e1000_hw *hw = &adapter->hw; 2301 struct e1000_hw *hw = &adapter->hw;
2341 int old_vfs = igb_find_enabled_vfs(adapter); 2302 int old_vfs = pci_num_vf(adapter->pdev);
2342 int i; 2303 int i;
2343 2304
2344 /* Virtualization features not supported on i210 family. */ 2305 /* Virtualization features not supported on i210 family. */
@@ -2418,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2418 VLAN_HLEN; 2379 VLAN_HLEN;
2419 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2380 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2420 2381
2421 adapter->node = -1;
2422
2423 spin_lock_init(&adapter->stats64_lock); 2382 spin_lock_init(&adapter->stats64_lock);
2424#ifdef CONFIG_PCI_IOV 2383#ifdef CONFIG_PCI_IOV
2425 switch (hw->mac.type) { 2384 switch (hw->mac.type) {
@@ -2666,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
2666int igb_setup_tx_resources(struct igb_ring *tx_ring) 2625int igb_setup_tx_resources(struct igb_ring *tx_ring)
2667{ 2626{
2668 struct device *dev = tx_ring->dev; 2627 struct device *dev = tx_ring->dev;
2669 int orig_node = dev_to_node(dev);
2670 int size; 2628 int size;
2671 2629
2672 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 2630 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
2673 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); 2631
2674 if (!tx_ring->tx_buffer_info) 2632 tx_ring->tx_buffer_info = vzalloc(size);
2675 tx_ring->tx_buffer_info = vzalloc(size);
2676 if (!tx_ring->tx_buffer_info) 2633 if (!tx_ring->tx_buffer_info)
2677 goto err; 2634 goto err;
2678 2635
@@ -2680,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2680 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2637 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2681 tx_ring->size = ALIGN(tx_ring->size, 4096); 2638 tx_ring->size = ALIGN(tx_ring->size, 4096);
2682 2639
2683 set_dev_node(dev, tx_ring->numa_node);
2684 tx_ring->desc = dma_alloc_coherent(dev, 2640 tx_ring->desc = dma_alloc_coherent(dev,
2685 tx_ring->size, 2641 tx_ring->size,
2686 &tx_ring->dma, 2642 &tx_ring->dma,
2687 GFP_KERNEL); 2643 GFP_KERNEL);
2688 set_dev_node(dev, orig_node);
2689 if (!tx_ring->desc)
2690 tx_ring->desc = dma_alloc_coherent(dev,
2691 tx_ring->size,
2692 &tx_ring->dma,
2693 GFP_KERNEL);
2694
2695 if (!tx_ring->desc) 2644 if (!tx_ring->desc)
2696 goto err; 2645 goto err;
2697 2646
@@ -2702,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2702 2651
2703err: 2652err:
2704 vfree(tx_ring->tx_buffer_info); 2653 vfree(tx_ring->tx_buffer_info);
2705 dev_err(dev, 2654 tx_ring->tx_buffer_info = NULL;
2706 "Unable to allocate memory for the transmit descriptor ring\n"); 2655 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
2707 return -ENOMEM; 2656 return -ENOMEM;
2708} 2657}
2709 2658
@@ -2820,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2820int igb_setup_rx_resources(struct igb_ring *rx_ring) 2769int igb_setup_rx_resources(struct igb_ring *rx_ring)
2821{ 2770{
2822 struct device *dev = rx_ring->dev; 2771 struct device *dev = rx_ring->dev;
2823 int orig_node = dev_to_node(dev); 2772 int size;
2824 int size, desc_len;
2825 2773
2826 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 2774 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
2827 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); 2775
2828 if (!rx_ring->rx_buffer_info) 2776 rx_ring->rx_buffer_info = vzalloc(size);
2829 rx_ring->rx_buffer_info = vzalloc(size);
2830 if (!rx_ring->rx_buffer_info) 2777 if (!rx_ring->rx_buffer_info)
2831 goto err; 2778 goto err;
2832 2779
2833 desc_len = sizeof(union e1000_adv_rx_desc);
2834 2780
2835 /* Round up to nearest 4K */ 2781 /* Round up to nearest 4K */
2836 rx_ring->size = rx_ring->count * desc_len; 2782 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
2837 rx_ring->size = ALIGN(rx_ring->size, 4096); 2783 rx_ring->size = ALIGN(rx_ring->size, 4096);
2838 2784
2839 set_dev_node(dev, rx_ring->numa_node);
2840 rx_ring->desc = dma_alloc_coherent(dev, 2785 rx_ring->desc = dma_alloc_coherent(dev,
2841 rx_ring->size, 2786 rx_ring->size,
2842 &rx_ring->dma, 2787 &rx_ring->dma,
2843 GFP_KERNEL); 2788 GFP_KERNEL);
2844 set_dev_node(dev, orig_node);
2845 if (!rx_ring->desc)
2846 rx_ring->desc = dma_alloc_coherent(dev,
2847 rx_ring->size,
2848 &rx_ring->dma,
2849 GFP_KERNEL);
2850
2851 if (!rx_ring->desc) 2789 if (!rx_ring->desc)
2852 goto err; 2790 goto err;
2853 2791
@@ -2859,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2859err: 2797err:
2860 vfree(rx_ring->rx_buffer_info); 2798 vfree(rx_ring->rx_buffer_info);
2861 rx_ring->rx_buffer_info = NULL; 2799 rx_ring->rx_buffer_info = NULL;
2862 dev_err(dev, "Unable to allocate memory for the receive descriptor" 2800 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
2863 " ring\n");
2864 return -ENOMEM; 2801 return -ENOMEM;
2865} 2802}
2866 2803
@@ -2898,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2898{ 2835{
2899 struct e1000_hw *hw = &adapter->hw; 2836 struct e1000_hw *hw = &adapter->hw;
2900 u32 mrqc, rxcsum; 2837 u32 mrqc, rxcsum;
2901 u32 j, num_rx_queues, shift = 0, shift2 = 0; 2838 u32 j, num_rx_queues, shift = 0;
2902 union e1000_reta { 2839 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
2903 u32 dword; 2840 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
2904 u8 bytes[4]; 2841 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
2905 } reta; 2842 0xFA01ACBE };
2906 static const u8 rsshash[40] = {
2907 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2908 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2909 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2910 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2911 2843
2912 /* Fill out hash function seeds */ 2844 /* Fill out hash function seeds */
2913 for (j = 0; j < 10; j++) { 2845 for (j = 0; j < 10; j++)
2914 u32 rsskey = rsshash[(j * 4)]; 2846 wr32(E1000_RSSRK(j), rsskey[j]);
2915 rsskey |= rsshash[(j * 4) + 1] << 8;
2916 rsskey |= rsshash[(j * 4) + 2] << 16;
2917 rsskey |= rsshash[(j * 4) + 3] << 24;
2918 array_wr32(E1000_RSSRK(0), j, rsskey);
2919 }
2920 2847
2921 num_rx_queues = adapter->rss_queues; 2848 num_rx_queues = adapter->rss_queues;
2922 2849
2923 if (adapter->vfs_allocated_count) { 2850 switch (hw->mac.type) {
2924 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2851 case e1000_82575:
2925 switch (hw->mac.type) { 2852 shift = 6;
2926 case e1000_i350: 2853 break;
2927 case e1000_82580: 2854 case e1000_82576:
2928 num_rx_queues = 1; 2855 /* 82576 supports 2 RSS queues for SR-IOV */
2929 shift = 0; 2856 if (adapter->vfs_allocated_count) {
2930 break;
2931 case e1000_82576:
2932 shift = 3; 2857 shift = 3;
2933 num_rx_queues = 2; 2858 num_rx_queues = 2;
2934 break;
2935 case e1000_82575:
2936 shift = 2;
2937 shift2 = 6;
2938 default:
2939 break;
2940 } 2859 }
2941 } else { 2860 break;
2942 if (hw->mac.type == e1000_82575) 2861 default:
2943 shift = 6; 2862 break;
2944 } 2863 }
2945 2864
2946 for (j = 0; j < (32 * 4); j++) { 2865 /*
2947 reta.bytes[j & 3] = (j % num_rx_queues) << shift; 2866 * Populate the indirection table 4 entries at a time. To do this
2948 if (shift2) 2867 * we are generating the results for n and n+2 and then interleaving
2949 reta.bytes[j & 3] |= num_rx_queues << shift2; 2868 * those with the results with n+1 and n+3.
2950 if ((j & 3) == 3) 2869 */
2951 wr32(E1000_RETA(j >> 2), reta.dword); 2870 for (j = 0; j < 32; j++) {
2871 /* first pass generates n and n+2 */
2872 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
2873 u32 reta = (base & 0x07800780) >> (7 - shift);
2874
2875 /* second pass generates n+1 and n+3 */
2876 base += 0x00010001 * num_rx_queues;
2877 reta |= (base & 0x07800780) << (1 + shift);
2878
2879 wr32(E1000_RETA(j), reta);
2952 } 2880 }
2953 2881
2954 /* 2882 /*
@@ -3184,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3184 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; 3112 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3185#endif 3113#endif
3186 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 3114 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3115#ifdef CONFIG_IGB_PTP
3187 if (hw->mac.type >= e1000_82580) 3116 if (hw->mac.type >= e1000_82580)
3188 srrctl |= E1000_SRRCTL_TIMESTAMP; 3117 srrctl |= E1000_SRRCTL_TIMESTAMP;
3118#endif /* CONFIG_IGB_PTP */
3189 /* Only set Drop Enable if we are supporting multiple queues */ 3119 /* Only set Drop Enable if we are supporting multiple queues */
3190 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 3120 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3191 srrctl |= E1000_SRRCTL_DROP_EN; 3121 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3269,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3269{ 3199{
3270 if (tx_buffer->skb) { 3200 if (tx_buffer->skb) {
3271 dev_kfree_skb_any(tx_buffer->skb); 3201 dev_kfree_skb_any(tx_buffer->skb);
3272 if (tx_buffer->dma) 3202 if (dma_unmap_len(tx_buffer, len))
3273 dma_unmap_single(ring->dev, 3203 dma_unmap_single(ring->dev,
3274 tx_buffer->dma, 3204 dma_unmap_addr(tx_buffer, dma),
3275 tx_buffer->length, 3205 dma_unmap_len(tx_buffer, len),
3276 DMA_TO_DEVICE); 3206 DMA_TO_DEVICE);
3277 } else if (tx_buffer->dma) { 3207 } else if (dma_unmap_len(tx_buffer, len)) {
3278 dma_unmap_page(ring->dev, 3208 dma_unmap_page(ring->dev,
3279 tx_buffer->dma, 3209 dma_unmap_addr(tx_buffer, dma),
3280 tx_buffer->length, 3210 dma_unmap_len(tx_buffer, len),
3281 DMA_TO_DEVICE); 3211 DMA_TO_DEVICE);
3282 } 3212 }
3283 tx_buffer->next_to_watch = NULL; 3213 tx_buffer->next_to_watch = NULL;
3284 tx_buffer->skb = NULL; 3214 tx_buffer->skb = NULL;
3285 tx_buffer->dma = 0; 3215 dma_unmap_len_set(tx_buffer, len, 0);
3286 /* buffer_info must be completely set up in the transmit path */ 3216 /* buffer_info must be completely set up in the transmit path */
3287} 3217}
3288 3218
@@ -4229,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
4229 if (tx_flags & IGB_TX_FLAGS_VLAN) 4159 if (tx_flags & IGB_TX_FLAGS_VLAN)
4230 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE); 4160 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4231 4161
4162#ifdef CONFIG_IGB_PTP
4232 /* set timestamp bit if present */ 4163 /* set timestamp bit if present */
4233 if (tx_flags & IGB_TX_FLAGS_TSTAMP) 4164 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
4234 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); 4165 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4166#endif /* CONFIG_IGB_PTP */
4235 4167
4236 /* set segmentation bits for TSO */ 4168 /* set segmentation bits for TSO */
4237 if (tx_flags & IGB_TX_FLAGS_TSO) 4169 if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4275,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4275 const u8 hdr_len) 4207 const u8 hdr_len)
4276{ 4208{
4277 struct sk_buff *skb = first->skb; 4209 struct sk_buff *skb = first->skb;
4278 struct igb_tx_buffer *tx_buffer_info; 4210 struct igb_tx_buffer *tx_buffer;
4279 union e1000_adv_tx_desc *tx_desc; 4211 union e1000_adv_tx_desc *tx_desc;
4280 dma_addr_t dma; 4212 dma_addr_t dma;
4281 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 4213 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4296,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4296 goto dma_error; 4228 goto dma_error;
4297 4229
4298 /* record length, and DMA address */ 4230 /* record length, and DMA address */
4299 first->length = size; 4231 dma_unmap_len_set(first, len, size);
4300 first->dma = dma; 4232 dma_unmap_addr_set(first, dma, dma);
4301 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4233 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4302 4234
4303 for (;;) { 4235 for (;;) {
@@ -4339,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4339 if (dma_mapping_error(tx_ring->dev, dma)) 4271 if (dma_mapping_error(tx_ring->dev, dma))
4340 goto dma_error; 4272 goto dma_error;
4341 4273
4342 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4274 tx_buffer = &tx_ring->tx_buffer_info[i];
4343 tx_buffer_info->length = size; 4275 dma_unmap_len_set(tx_buffer, len, size);
4344 tx_buffer_info->dma = dma; 4276 dma_unmap_addr_set(tx_buffer, dma, dma);
4345 4277
4346 tx_desc->read.olinfo_status = 0; 4278 tx_desc->read.olinfo_status = 0;
4347 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4279 tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4392,9 +4324,9 @@ dma_error:
4392 4324
4393 /* clear dma mappings for failed tx_buffer_info map */ 4325 /* clear dma mappings for failed tx_buffer_info map */
4394 for (;;) { 4326 for (;;) {
4395 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4327 tx_buffer = &tx_ring->tx_buffer_info[i];
4396 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4328 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4397 if (tx_buffer_info == first) 4329 if (tx_buffer == first)
4398 break; 4330 break;
4399 if (i == 0) 4331 if (i == 0)
4400 i = tx_ring->count; 4332 i = tx_ring->count;
@@ -4440,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4440netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4372netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4441 struct igb_ring *tx_ring) 4373 struct igb_ring *tx_ring)
4442{ 4374{
4375#ifdef CONFIG_IGB_PTP
4376 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4377#endif /* CONFIG_IGB_PTP */
4443 struct igb_tx_buffer *first; 4378 struct igb_tx_buffer *first;
4444 int tso; 4379 int tso;
4445 u32 tx_flags = 0; 4380 u32 tx_flags = 0;
@@ -4462,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4462 first->bytecount = skb->len; 4397 first->bytecount = skb->len;
4463 first->gso_segs = 1; 4398 first->gso_segs = 1;
4464 4399
4465 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 4400#ifdef CONFIG_IGB_PTP
4401 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4402 !(adapter->ptp_tx_skb))) {
4466 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4467 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4404 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4405
4406 adapter->ptp_tx_skb = skb_get(skb);
4407 if (adapter->hw.mac.type == e1000_82576)
4408 schedule_work(&adapter->ptp_tx_work);
4468 } 4409 }
4410#endif /* CONFIG_IGB_PTP */
4469 4411
4470 if (vlan_tx_tag_present(skb)) { 4412 if (vlan_tx_tag_present(skb)) {
4471 tx_flags |= IGB_TX_FLAGS_VLAN; 4413 tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4661,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter,
4661 bytes = 0; 4603 bytes = 0;
4662 packets = 0; 4604 packets = 0;
4663 for (i = 0; i < adapter->num_rx_queues; i++) { 4605 for (i = 0; i < adapter->num_rx_queues; i++) {
4664 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 4606 u32 rqdpc = rd32(E1000_RQDPC(i));
4665 struct igb_ring *ring = adapter->rx_ring[i]; 4607 struct igb_ring *ring = adapter->rx_ring[i];
4666 4608
4667 ring->rx_stats.drops += rqdpc_tmp; 4609 if (rqdpc) {
4668 net_stats->rx_fifo_errors += rqdpc_tmp; 4610 ring->rx_stats.drops += rqdpc;
4611 net_stats->rx_fifo_errors += rqdpc;
4612 }
4669 4613
4670 do { 4614 do {
4671 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 4615 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
@@ -4755,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
4755 reg = rd32(E1000_CTRL_EXT); 4699 reg = rd32(E1000_CTRL_EXT);
4756 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { 4700 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4757 adapter->stats.rxerrc += rd32(E1000_RXERRC); 4701 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4758 adapter->stats.tncrs += rd32(E1000_TNCRS); 4702
4703 /* this stat has invalid values on i210/i211 */
4704 if ((hw->mac.type != e1000_i210) &&
4705 (hw->mac.type != e1000_i211))
4706 adapter->stats.tncrs += rd32(E1000_TNCRS);
4759 } 4707 }
4760 4708
4761 adapter->stats.tsctc += rd32(E1000_TSCTC); 4709 adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -4852,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4852 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4800 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4853 } 4801 }
4854 4802
4803#ifdef CONFIG_IGB_PTP
4804 if (icr & E1000_ICR_TS) {
4805 u32 tsicr = rd32(E1000_TSICR);
4806
4807 if (tsicr & E1000_TSICR_TXTS) {
4808 /* acknowledge the interrupt */
4809 wr32(E1000_TSICR, E1000_TSICR_TXTS);
4810 /* retrieve hardware timestamp */
4811 schedule_work(&adapter->ptp_tx_work);
4812 }
4813 }
4814#endif /* CONFIG_IGB_PTP */
4815
4855 wr32(E1000_EIMS, adapter->eims_other); 4816 wr32(E1000_EIMS, adapter->eims_other);
4856 4817
4857 return IRQ_HANDLED; 4818 return IRQ_HANDLED;
@@ -5002,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5002static int igb_vf_configure(struct igb_adapter *adapter, int vf) 4963static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5003{ 4964{
5004 unsigned char mac_addr[ETH_ALEN]; 4965 unsigned char mac_addr[ETH_ALEN];
5005 struct pci_dev *pdev = adapter->pdev;
5006 struct e1000_hw *hw = &adapter->hw;
5007 struct pci_dev *pvfdev;
5008 unsigned int device_id;
5009 u16 thisvf_devfn;
5010 4966
5011 eth_random_addr(mac_addr); 4967 eth_random_addr(mac_addr);
5012 igb_set_vf_mac(adapter, vf, mac_addr); 4968 igb_set_vf_mac(adapter, vf, mac_addr);
5013 4969
5014 switch (adapter->hw.mac.type) { 4970 return 0;
5015 case e1000_82576:
5016 device_id = IGB_82576_VF_DEV_ID;
5017 /* VF Stride for 82576 is 2 */
5018 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
5019 (pdev->devfn & 1);
5020 break;
5021 case e1000_i350:
5022 device_id = IGB_I350_VF_DEV_ID;
5023 /* VF Stride for I350 is 4 */
5024 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
5025 (pdev->devfn & 3);
5026 break;
5027 default:
5028 device_id = 0;
5029 thisvf_devfn = 0;
5030 break;
5031 }
5032
5033 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5034 while (pvfdev) {
5035 if (pvfdev->devfn == thisvf_devfn)
5036 break;
5037 pvfdev = pci_get_device(hw->vendor_id,
5038 device_id, pvfdev);
5039 }
5040
5041 if (pvfdev)
5042 adapter->vf_data[vf].vfdev = pvfdev;
5043 else
5044 dev_err(&pdev->dev,
5045 "Couldn't find pci dev ptr for VF %4.4x\n",
5046 thisvf_devfn);
5047 return pvfdev != NULL;
5048} 4971}
5049 4972
5050static int igb_find_enabled_vfs(struct igb_adapter *adapter) 4973static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
5051{ 4974{
5052 struct e1000_hw *hw = &adapter->hw;
5053 struct pci_dev *pdev = adapter->pdev; 4975 struct pci_dev *pdev = adapter->pdev;
5054 struct pci_dev *pvfdev; 4976 struct pci_dev *vfdev;
5055 u16 vf_devfn = 0; 4977 int dev_id;
5056 u16 vf_stride;
5057 unsigned int device_id;
5058 int vfs_found = 0;
5059 4978
5060 switch (adapter->hw.mac.type) { 4979 switch (adapter->hw.mac.type) {
5061 case e1000_82576: 4980 case e1000_82576:
5062 device_id = IGB_82576_VF_DEV_ID; 4981 dev_id = IGB_82576_VF_DEV_ID;
5063 /* VF Stride for 82576 is 2 */
5064 vf_stride = 2;
5065 break; 4982 break;
5066 case e1000_i350: 4983 case e1000_i350:
5067 device_id = IGB_I350_VF_DEV_ID; 4984 dev_id = IGB_I350_VF_DEV_ID;
5068 /* VF Stride for I350 is 4 */
5069 vf_stride = 4;
5070 break; 4985 break;
5071 default: 4986 default:
5072 device_id = 0; 4987 return false;
5073 vf_stride = 0;
5074 break;
5075 }
5076
5077 vf_devfn = pdev->devfn + 0x80;
5078 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5079 while (pvfdev) {
5080 if (pvfdev->devfn == vf_devfn &&
5081 (pvfdev->bus->number >= pdev->bus->number))
5082 vfs_found++;
5083 vf_devfn += vf_stride;
5084 pvfdev = pci_get_device(hw->vendor_id,
5085 device_id, pvfdev);
5086 } 4988 }
5087 4989
5088 return vfs_found; 4990 /* loop through all the VFs to see if we own any that are assigned */
5089} 4991 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5090 4992 while (vfdev) {
5091static int igb_check_vf_assignment(struct igb_adapter *adapter) 4993 /* if we don't own it we don't care */
5092{ 4994 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5093 int i; 4995 /* if it is assigned we cannot release it */
5094 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4996 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
5095 if (adapter->vf_data[i].vfdev) {
5096 if (adapter->vf_data[i].vfdev->dev_flags &
5097 PCI_DEV_FLAGS_ASSIGNED)
5098 return true; 4997 return true;
5099 } 4998 }
4999
5000 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
5100 } 5001 }
5002
5101 return false; 5003 return false;
5102} 5004}
5103 5005
@@ -5643,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5643 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5545 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5644 } 5546 }
5645 5547
5548#ifdef CONFIG_IGB_PTP
5549 if (icr & E1000_ICR_TS) {
5550 u32 tsicr = rd32(E1000_TSICR);
5551
5552 if (tsicr & E1000_TSICR_TXTS) {
5553 /* acknowledge the interrupt */
5554 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5555 /* retrieve hardware timestamp */
5556 schedule_work(&adapter->ptp_tx_work);
5557 }
5558 }
5559#endif /* CONFIG_IGB_PTP */
5560
5646 napi_schedule(&q_vector->napi); 5561 napi_schedule(&q_vector->napi);
5647 5562
5648 return IRQ_HANDLED; 5563 return IRQ_HANDLED;
@@ -5684,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data)
5684 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5599 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5685 } 5600 }
5686 5601
5602#ifdef CONFIG_IGB_PTP
5603 if (icr & E1000_ICR_TS) {
5604 u32 tsicr = rd32(E1000_TSICR);
5605
5606 if (tsicr & E1000_TSICR_TXTS) {
5607 /* acknowledge the interrupt */
5608 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5609 /* retrieve hardware timestamp */
5610 schedule_work(&adapter->ptp_tx_work);
5611 }
5612 }
5613#endif /* CONFIG_IGB_PTP */
5614
5687 napi_schedule(&q_vector->napi); 5615 napi_schedule(&q_vector->napi);
5688 5616
5689 return IRQ_HANDLED; 5617 return IRQ_HANDLED;
@@ -5743,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget)
5743 return 0; 5671 return 0;
5744} 5672}
5745 5673
5746#ifdef CONFIG_IGB_PTP
5747/**
5748 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5749 * @q_vector: pointer to q_vector containing needed info
5750 * @buffer: pointer to igb_tx_buffer structure
5751 *
5752 * If we were asked to do hardware stamping and such a time stamp is
5753 * available, then it must have been for this skb here because we only
5754 * allow only one such packet into the queue.
5755 */
5756static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5757 struct igb_tx_buffer *buffer_info)
5758{
5759 struct igb_adapter *adapter = q_vector->adapter;
5760 struct e1000_hw *hw = &adapter->hw;
5761 struct skb_shared_hwtstamps shhwtstamps;
5762 u64 regval;
5763
5764 /* if skb does not support hw timestamp or TX stamp not valid exit */
5765 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
5766 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5767 return;
5768
5769 regval = rd32(E1000_TXSTMPL);
5770 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5771
5772 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
5773 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5774}
5775
5776#endif
5777/** 5674/**
5778 * igb_clean_tx_irq - Reclaim resources after transmit completes 5675 * igb_clean_tx_irq - Reclaim resources after transmit completes
5779 * @q_vector: pointer to q_vector containing needed info 5676 * @q_vector: pointer to q_vector containing needed info
@@ -5785,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5785 struct igb_adapter *adapter = q_vector->adapter; 5682 struct igb_adapter *adapter = q_vector->adapter;
5786 struct igb_ring *tx_ring = q_vector->tx.ring; 5683 struct igb_ring *tx_ring = q_vector->tx.ring;
5787 struct igb_tx_buffer *tx_buffer; 5684 struct igb_tx_buffer *tx_buffer;
5788 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5685 union e1000_adv_tx_desc *tx_desc;
5789 unsigned int total_bytes = 0, total_packets = 0; 5686 unsigned int total_bytes = 0, total_packets = 0;
5790 unsigned int budget = q_vector->tx.work_limit; 5687 unsigned int budget = q_vector->tx.work_limit;
5791 unsigned int i = tx_ring->next_to_clean; 5688 unsigned int i = tx_ring->next_to_clean;
@@ -5797,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5797 tx_desc = IGB_TX_DESC(tx_ring, i); 5694 tx_desc = IGB_TX_DESC(tx_ring, i);
5798 i -= tx_ring->count; 5695 i -= tx_ring->count;
5799 5696
5800 for (; budget; budget--) { 5697 do {
5801 eop_desc = tx_buffer->next_to_watch; 5698 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
5802
5803 /* prevent any other reads prior to eop_desc */
5804 rmb();
5805 5699
5806 /* if next_to_watch is not set then there is no work pending */ 5700 /* if next_to_watch is not set then there is no work pending */
5807 if (!eop_desc) 5701 if (!eop_desc)
5808 break; 5702 break;
5809 5703
5704 /* prevent any other reads prior to eop_desc */
5705 rmb();
5706
5810 /* if DD is not set pending work has not been completed */ 5707 /* if DD is not set pending work has not been completed */
5811 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) 5708 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5812 break; 5709 break;
@@ -5818,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5818 total_bytes += tx_buffer->bytecount; 5715 total_bytes += tx_buffer->bytecount;
5819 total_packets += tx_buffer->gso_segs; 5716 total_packets += tx_buffer->gso_segs;
5820 5717
5821#ifdef CONFIG_IGB_PTP
5822 /* retrieve hardware timestamp */
5823 igb_tx_hwtstamp(q_vector, tx_buffer);
5824
5825#endif
5826 /* free the skb */ 5718 /* free the skb */
5827 dev_kfree_skb_any(tx_buffer->skb); 5719 dev_kfree_skb_any(tx_buffer->skb);
5828 tx_buffer->skb = NULL;
5829 5720
5830 /* unmap skb header data */ 5721 /* unmap skb header data */
5831 dma_unmap_single(tx_ring->dev, 5722 dma_unmap_single(tx_ring->dev,
5832 tx_buffer->dma, 5723 dma_unmap_addr(tx_buffer, dma),
5833 tx_buffer->length, 5724 dma_unmap_len(tx_buffer, len),
5834 DMA_TO_DEVICE); 5725 DMA_TO_DEVICE);
5835 5726
5727 /* clear tx_buffer data */
5728 tx_buffer->skb = NULL;
5729 dma_unmap_len_set(tx_buffer, len, 0);
5730
5836 /* clear last DMA location and unmap remaining buffers */ 5731 /* clear last DMA location and unmap remaining buffers */
5837 while (tx_desc != eop_desc) { 5732 while (tx_desc != eop_desc) {
5838 tx_buffer->dma = 0;
5839
5840 tx_buffer++; 5733 tx_buffer++;
5841 tx_desc++; 5734 tx_desc++;
5842 i++; 5735 i++;
@@ -5847,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5847 } 5740 }
5848 5741
5849 /* unmap any remaining paged data */ 5742 /* unmap any remaining paged data */
5850 if (tx_buffer->dma) { 5743 if (dma_unmap_len(tx_buffer, len)) {
5851 dma_unmap_page(tx_ring->dev, 5744 dma_unmap_page(tx_ring->dev,
5852 tx_buffer->dma, 5745 dma_unmap_addr(tx_buffer, dma),
5853 tx_buffer->length, 5746 dma_unmap_len(tx_buffer, len),
5854 DMA_TO_DEVICE); 5747 DMA_TO_DEVICE);
5748 dma_unmap_len_set(tx_buffer, len, 0);
5855 } 5749 }
5856 } 5750 }
5857 5751
5858 /* clear last DMA location */
5859 tx_buffer->dma = 0;
5860
5861 /* move us one more past the eop_desc for start of next pkt */ 5752 /* move us one more past the eop_desc for start of next pkt */
5862 tx_buffer++; 5753 tx_buffer++;
5863 tx_desc++; 5754 tx_desc++;
@@ -5867,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5867 tx_buffer = tx_ring->tx_buffer_info; 5758 tx_buffer = tx_ring->tx_buffer_info;
5868 tx_desc = IGB_TX_DESC(tx_ring, 0); 5759 tx_desc = IGB_TX_DESC(tx_ring, 0);
5869 } 5760 }
5870 } 5761
5762 /* issue prefetch for next Tx descriptor */
5763 prefetch(tx_desc);
5764
5765 /* update budget accounting */
5766 budget--;
5767 } while (likely(budget));
5871 5768
5872 netdev_tx_completed_queue(txring_txq(tx_ring), 5769 netdev_tx_completed_queue(txring_txq(tx_ring),
5873 total_packets, total_bytes); 5770 total_packets, total_bytes);
@@ -5883,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5883 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 5780 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
5884 struct e1000_hw *hw = &adapter->hw; 5781 struct e1000_hw *hw = &adapter->hw;
5885 5782
5886 eop_desc = tx_buffer->next_to_watch;
5887
5888 /* Detect a transmit hang in hardware, this serializes the 5783 /* Detect a transmit hang in hardware, this serializes the
5889 * check with the clearing of time_stamp and movement of i */ 5784 * check with the clearing of time_stamp and movement of i */
5890 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5785 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5891 if (eop_desc && 5786 if (tx_buffer->next_to_watch &&
5892 time_after(jiffies, tx_buffer->time_stamp + 5787 time_after(jiffies, tx_buffer->time_stamp +
5893 (adapter->tx_timeout_factor * HZ)) && 5788 (adapter->tx_timeout_factor * HZ)) &&
5894 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5789 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5912,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5912 tx_ring->next_to_use, 5807 tx_ring->next_to_use,
5913 tx_ring->next_to_clean, 5808 tx_ring->next_to_clean,
5914 tx_buffer->time_stamp, 5809 tx_buffer->time_stamp,
5915 eop_desc, 5810 tx_buffer->next_to_watch,
5916 jiffies, 5811 jiffies,
5917 eop_desc->wb.status); 5812 tx_buffer->next_to_watch->wb.status);
5918 netif_stop_subqueue(tx_ring->netdev, 5813 netif_stop_subqueue(tx_ring->netdev,
5919 tx_ring->queue_index); 5814 tx_ring->queue_index);
5920 5815
@@ -5994,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5994 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 5889 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5995} 5890}
5996 5891
5997#ifdef CONFIG_IGB_PTP
5998static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5999 union e1000_adv_rx_desc *rx_desc,
6000 struct sk_buff *skb)
6001{
6002 struct igb_adapter *adapter = q_vector->adapter;
6003 struct e1000_hw *hw = &adapter->hw;
6004 u64 regval;
6005
6006 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
6007 E1000_RXDADV_STAT_TS))
6008 return;
6009
6010 /*
6011 * If this bit is set, then the RX registers contain the time stamp. No
6012 * other packet will be time stamped until we read these registers, so
6013 * read the registers to make them available again. Because only one
6014 * packet can be time stamped at a time, we know that the register
6015 * values must belong to this one here and therefore we don't need to
6016 * compare any of the additional attributes stored for it.
6017 *
6018 * If nothing went wrong, then it should have a shared tx_flags that we
6019 * can turn into a skb_shared_hwtstamps.
6020 */
6021 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6022 u32 *stamp = (u32 *)skb->data;
6023 regval = le32_to_cpu(*(stamp + 2));
6024 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
6025 skb_pull(skb, IGB_TS_HDR_LEN);
6026 } else {
6027 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
6028 return;
6029
6030 regval = rd32(E1000_RXSTMPL);
6031 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
6032 }
6033
6034 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6035}
6036
6037#endif
6038static void igb_rx_vlan(struct igb_ring *ring, 5892static void igb_rx_vlan(struct igb_ring *ring,
6039 union e1000_adv_rx_desc *rx_desc, 5893 union e1000_adv_rx_desc *rx_desc,
6040 struct sk_buff *skb) 5894 struct sk_buff *skb)
@@ -6146,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
6146 } 6000 }
6147 6001
6148#ifdef CONFIG_IGB_PTP 6002#ifdef CONFIG_IGB_PTP
6149 igb_rx_hwtstamp(q_vector, rx_desc, skb); 6003 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
6150#endif 6004#endif /* CONFIG_IGB_PTP */
6151 igb_rx_hash(rx_ring, rx_desc, skb); 6005 igb_rx_hash(rx_ring, rx_desc, skb);
6152 igb_rx_checksum(rx_ring, rx_desc, skb); 6006 igb_rx_checksum(rx_ring, rx_desc, skb);
6153 igb_rx_vlan(rx_ring, rx_desc, skb); 6007 igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6341,181 +6195,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6341} 6195}
6342 6196
6343/** 6197/**
6344 * igb_hwtstamp_ioctl - control hardware time stamping
6345 * @netdev:
6346 * @ifreq:
6347 * @cmd:
6348 *
6349 * Outgoing time stamping can be enabled and disabled. Play nice and
6350 * disable it when requested, although it shouldn't case any overhead
6351 * when no packet needs it. At most one packet in the queue may be
6352 * marked for time stamping, otherwise it would be impossible to tell
6353 * for sure to which packet the hardware time stamp belongs.
6354 *
6355 * Incoming time stamping has to be configured via the hardware
6356 * filters. Not all combinations are supported, in particular event
6357 * type has to be specified. Matching the kind of event packet is
6358 * not supported, with the exception of "all V2 events regardless of
6359 * level 2 or 4".
6360 *
6361 **/
6362static int igb_hwtstamp_ioctl(struct net_device *netdev,
6363 struct ifreq *ifr, int cmd)
6364{
6365 struct igb_adapter *adapter = netdev_priv(netdev);
6366 struct e1000_hw *hw = &adapter->hw;
6367 struct hwtstamp_config config;
6368 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6369 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6370 u32 tsync_rx_cfg = 0;
6371 bool is_l4 = false;
6372 bool is_l2 = false;
6373 u32 regval;
6374
6375 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6376 return -EFAULT;
6377
6378 /* reserved for future extensions */
6379 if (config.flags)
6380 return -EINVAL;
6381
6382 switch (config.tx_type) {
6383 case HWTSTAMP_TX_OFF:
6384 tsync_tx_ctl = 0;
6385 case HWTSTAMP_TX_ON:
6386 break;
6387 default:
6388 return -ERANGE;
6389 }
6390
6391 switch (config.rx_filter) {
6392 case HWTSTAMP_FILTER_NONE:
6393 tsync_rx_ctl = 0;
6394 break;
6395 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6396 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6397 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6398 case HWTSTAMP_FILTER_ALL:
6399 /*
6400 * register TSYNCRXCFG must be set, therefore it is not
6401 * possible to time stamp both Sync and Delay_Req messages
6402 * => fall back to time stamping all packets
6403 */
6404 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6405 config.rx_filter = HWTSTAMP_FILTER_ALL;
6406 break;
6407 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6408 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
6409 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
6410 is_l4 = true;
6411 break;
6412 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6413 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
6414 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
6415 is_l4 = true;
6416 break;
6417 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6418 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6419 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
6420 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
6421 is_l2 = true;
6422 is_l4 = true;
6423 config.rx_filter = HWTSTAMP_FILTER_SOME;
6424 break;
6425 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6426 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6427 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
6428 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
6429 is_l2 = true;
6430 is_l4 = true;
6431 config.rx_filter = HWTSTAMP_FILTER_SOME;
6432 break;
6433 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6434 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6435 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6436 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
6437 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
6438 is_l2 = true;
6439 is_l4 = true;
6440 break;
6441 default:
6442 return -ERANGE;
6443 }
6444
6445 if (hw->mac.type == e1000_82575) {
6446 if (tsync_rx_ctl | tsync_tx_ctl)
6447 return -EINVAL;
6448 return 0;
6449 }
6450
6451 /*
6452 * Per-packet timestamping only works if all packets are
6453 * timestamped, so enable timestamping in all packets as
6454 * long as one rx filter was configured.
6455 */
6456 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
6457 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6458 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6459 }
6460
6461 /* enable/disable TX */
6462 regval = rd32(E1000_TSYNCTXCTL);
6463 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6464 regval |= tsync_tx_ctl;
6465 wr32(E1000_TSYNCTXCTL, regval);
6466
6467 /* enable/disable RX */
6468 regval = rd32(E1000_TSYNCRXCTL);
6469 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6470 regval |= tsync_rx_ctl;
6471 wr32(E1000_TSYNCRXCTL, regval);
6472
6473 /* define which PTP packets are time stamped */
6474 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6475
6476 /* define ethertype filter for timestamped packets */
6477 if (is_l2)
6478 wr32(E1000_ETQF(3),
6479 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6480 E1000_ETQF_1588 | /* enable timestamping */
6481 ETH_P_1588)); /* 1588 eth protocol type */
6482 else
6483 wr32(E1000_ETQF(3), 0);
6484
6485#define PTP_PORT 319
6486 /* L4 Queue Filter[3]: filter by destination port and protocol */
6487 if (is_l4) {
6488 u32 ftqf = (IPPROTO_UDP /* UDP */
6489 | E1000_FTQF_VF_BP /* VF not compared */
6490 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6491 | E1000_FTQF_MASK); /* mask all inputs */
6492 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
6493
6494 wr32(E1000_IMIR(3), htons(PTP_PORT));
6495 wr32(E1000_IMIREXT(3),
6496 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6497 if (hw->mac.type == e1000_82576) {
6498 /* enable source port check */
6499 wr32(E1000_SPQF(3), htons(PTP_PORT));
6500 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6501 }
6502 wr32(E1000_FTQF(3), ftqf);
6503 } else {
6504 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6505 }
6506 wrfl();
6507
6508 adapter->hwtstamp_config = config;
6509
6510 /* clear TX/RX time stamp registers, just to be sure */
6511 regval = rd32(E1000_TXSTMPH);
6512 regval = rd32(E1000_RXSTMPH);
6513
6514 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6515 -EFAULT : 0;
6516}
6517
6518/**
6519 * igb_ioctl - 6198 * igb_ioctl -
6520 * @netdev: 6199 * @netdev:
6521 * @ifreq: 6200 * @ifreq:
@@ -6528,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6528 case SIOCGMIIREG: 6207 case SIOCGMIIREG:
6529 case SIOCSMIIREG: 6208 case SIOCSMIIREG:
6530 return igb_mii_ioctl(netdev, ifr, cmd); 6209 return igb_mii_ioctl(netdev, ifr, cmd);
6210#ifdef CONFIG_IGB_PTP
6531 case SIOCSHWTSTAMP: 6211 case SIOCSHWTSTAMP:
6532 return igb_hwtstamp_ioctl(netdev, ifr, cmd); 6212 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
6213#endif /* CONFIG_IGB_PTP */
6533 default: 6214 default:
6534 return -EOPNOTSUPP; 6215 return -EOPNOTSUPP;
6535 } 6216 }
@@ -6667,6 +6348,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6667 default: 6348 default:
6668 goto err_inval; 6349 goto err_inval;
6669 } 6350 }
6351
6352 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6353 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6354
6670 return 0; 6355 return 0;
6671 6356
6672err_inval: 6357err_inval:
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c846ea9131a3..ee21445157a3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,22 +69,22 @@
69 * 2^40 * 10^-9 / 60 = 18.3 minutes. 69 * 2^40 * 10^-9 / 60 = 18.3 minutes.
70 */ 70 */
71 71
72#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9) 72#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) 74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40 76#define IGB_NBITS_82580 40
77 77
78/* 78/*
79 * SYSTIM read access for the 82576 79 * SYSTIM read access for the 82576
80 */ 80 */
81 81
82static cycle_t igb_82576_systim_read(const struct cyclecounter *cc) 82static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
83{ 83{
84 u64 val;
85 u32 lo, hi;
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 84 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
87 struct e1000_hw *hw = &igb->hw; 85 struct e1000_hw *hw = &igb->hw;
86 u64 val;
87 u32 lo, hi;
88 88
89 lo = rd32(E1000_SYSTIML); 89 lo = rd32(E1000_SYSTIML);
90 hi = rd32(E1000_SYSTIMH); 90 hi = rd32(E1000_SYSTIMH);
@@ -99,12 +99,12 @@ static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
99 * SYSTIM read access for the 82580 99 * SYSTIM read access for the 82580
100 */ 100 */
101 101
102static cycle_t igb_82580_systim_read(const struct cyclecounter *cc) 102static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
103{ 103{
104 u64 val;
105 u32 lo, hi, jk;
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 104 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
107 struct e1000_hw *hw = &igb->hw; 105 struct e1000_hw *hw = &igb->hw;
106 u64 val;
107 u32 lo, hi, jk;
108 108
109 /* 109 /*
110 * The timestamp latches on lowest register read. For the 82580 110 * The timestamp latches on lowest register read. For the 82580
@@ -122,16 +122,101 @@ static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
122} 122}
123 123
124/* 124/*
125 * SYSTIM read access for I210/I211
126 */
127
128static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
129{
130 struct e1000_hw *hw = &adapter->hw;
131 u32 sec, nsec, jk;
132
133 /*
134 * The timestamp latches on lowest register read. For I210/I211, the
135 * lowest register is SYSTIMR. Since we only need to provide nanosecond
136 * resolution, we can ignore it.
137 */
138 jk = rd32(E1000_SYSTIMR);
139 nsec = rd32(E1000_SYSTIML);
140 sec = rd32(E1000_SYSTIMH);
141
142 ts->tv_sec = sec;
143 ts->tv_nsec = nsec;
144}
145
146static void igb_ptp_write_i210(struct igb_adapter *adapter,
147 const struct timespec *ts)
148{
149 struct e1000_hw *hw = &adapter->hw;
150
151 /*
152 * Writing the SYSTIMR register is not necessary as it only provides
153 * sub-nanosecond resolution.
154 */
155 wr32(E1000_SYSTIML, ts->tv_nsec);
156 wr32(E1000_SYSTIMH, ts->tv_sec);
157}
158
159/**
160 * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
161 * @adapter: board private structure
162 * @hwtstamps: timestamp structure to update
163 * @systim: unsigned 64bit system time value.
164 *
165 * We need to convert the system time value stored in the RX/TXSTMP registers
166 * into a hwtstamp which can be used by the upper level timestamping functions.
167 *
168 * The 'tmreg_lock' spinlock is used to protect the consistency of the
169 * system time value. This is needed because reading the 64 bit time
170 * value involves reading two (or three) 32 bit registers. The first
171 * read latches the value. Ditto for writing.
172 *
173 * In addition, here have extended the system time with an overflow
174 * counter in software.
175 **/
176static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
177 struct skb_shared_hwtstamps *hwtstamps,
178 u64 systim)
179{
180 unsigned long flags;
181 u64 ns;
182
183 switch (adapter->hw.mac.type) {
184 case e1000_82576:
185 case e1000_82580:
186 case e1000_i350:
187 spin_lock_irqsave(&adapter->tmreg_lock, flags);
188
189 ns = timecounter_cyc2time(&adapter->tc, systim);
190
191 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
192
193 memset(hwtstamps, 0, sizeof(*hwtstamps));
194 hwtstamps->hwtstamp = ns_to_ktime(ns);
195 break;
196 case e1000_i210:
197 case e1000_i211:
198 memset(hwtstamps, 0, sizeof(*hwtstamps));
199 /* Upper 32 bits contain s, lower 32 bits contain ns. */
200 hwtstamps->hwtstamp = ktime_set(systim >> 32,
201 systim & 0xFFFFFFFF);
202 break;
203 default:
204 break;
205 }
206}
207
208/*
125 * PTP clock operations 209 * PTP clock operations
126 */ 210 */
127 211
128static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 212static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
129{ 213{
214 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
215 ptp_caps);
216 struct e1000_hw *hw = &igb->hw;
217 int neg_adj = 0;
130 u64 rate; 218 u64 rate;
131 u32 incvalue; 219 u32 incvalue;
132 int neg_adj = 0;
133 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
134 struct e1000_hw *hw = &igb->hw;
135 220
136 if (ppb < 0) { 221 if (ppb < 0) {
137 neg_adj = 1; 222 neg_adj = 1;
@@ -153,13 +238,14 @@ static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
153 return 0; 238 return 0;
154} 239}
155 240
156static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 241static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
157{ 242{
243 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
244 ptp_caps);
245 struct e1000_hw *hw = &igb->hw;
246 int neg_adj = 0;
158 u64 rate; 247 u64 rate;
159 u32 inca; 248 u32 inca;
160 int neg_adj = 0;
161 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
162 struct e1000_hw *hw = &igb->hw;
163 249
164 if (ppb < 0) { 250 if (ppb < 0) {
165 neg_adj = 1; 251 neg_adj = 1;
@@ -178,11 +264,12 @@ static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
178 return 0; 264 return 0;
179} 265}
180 266
181static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta) 267static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
182{ 268{
183 s64 now; 269 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
270 ptp_caps);
184 unsigned long flags; 271 unsigned long flags;
185 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); 272 s64 now;
186 273
187 spin_lock_irqsave(&igb->tmreg_lock, flags); 274 spin_lock_irqsave(&igb->tmreg_lock, flags);
188 275
@@ -195,12 +282,32 @@ static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
195 return 0; 282 return 0;
196} 283}
197 284
198static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts) 285static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
199{ 286{
287 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
288 ptp_caps);
289 unsigned long flags;
290 struct timespec now, then = ns_to_timespec(delta);
291
292 spin_lock_irqsave(&igb->tmreg_lock, flags);
293
294 igb_ptp_read_i210(igb, &now);
295 now = timespec_add(now, then);
296 igb_ptp_write_i210(igb, (const struct timespec *)&now);
297
298 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
299
300 return 0;
301}
302
303static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
304 struct timespec *ts)
305{
306 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
307 ptp_caps);
308 unsigned long flags;
200 u64 ns; 309 u64 ns;
201 u32 remainder; 310 u32 remainder;
202 unsigned long flags;
203 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
204 311
205 spin_lock_irqsave(&igb->tmreg_lock, flags); 312 spin_lock_irqsave(&igb->tmreg_lock, flags);
206 313
@@ -214,11 +321,29 @@ static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
214 return 0; 321 return 0;
215} 322}
216 323
217static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts) 324static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
325 struct timespec *ts)
218{ 326{
219 u64 ns; 327 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
328 ptp_caps);
220 unsigned long flags; 329 unsigned long flags;
221 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); 330
331 spin_lock_irqsave(&igb->tmreg_lock, flags);
332
333 igb_ptp_read_i210(igb, ts);
334
335 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
336
337 return 0;
338}
339
340static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
341 const struct timespec *ts)
342{
343 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
344 ptp_caps);
345 unsigned long flags;
346 u64 ns;
222 347
223 ns = ts->tv_sec * 1000000000ULL; 348 ns = ts->tv_sec * 1000000000ULL;
224 ns += ts->tv_nsec; 349 ns += ts->tv_nsec;
@@ -232,77 +357,369 @@ static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
232 return 0; 357 return 0;
233} 358}
234 359
235static int ptp_82576_enable(struct ptp_clock_info *ptp, 360static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
236 struct ptp_clock_request *rq, int on) 361 const struct timespec *ts)
237{ 362{
238 return -EOPNOTSUPP; 363 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
364 ptp_caps);
365 unsigned long flags;
366
367 spin_lock_irqsave(&igb->tmreg_lock, flags);
368
369 igb_ptp_write_i210(igb, ts);
370
371 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
372
373 return 0;
239} 374}
240 375
241static int ptp_82580_enable(struct ptp_clock_info *ptp, 376static int igb_ptp_enable(struct ptp_clock_info *ptp,
242 struct ptp_clock_request *rq, int on) 377 struct ptp_clock_request *rq, int on)
243{ 378{
244 return -EOPNOTSUPP; 379 return -EOPNOTSUPP;
245} 380}
246 381
247static void igb_overflow_check(struct work_struct *work) 382/**
383 * igb_ptp_tx_work
384 * @work: pointer to work struct
385 *
386 * This work function polls the TSYNCTXCTL valid bit to determine when a
387 * timestamp has been taken for the current stored skb.
388 */
389void igb_ptp_tx_work(struct work_struct *work)
390{
391 struct igb_adapter *adapter = container_of(work, struct igb_adapter,
392 ptp_tx_work);
393 struct e1000_hw *hw = &adapter->hw;
394 u32 tsynctxctl;
395
396 if (!adapter->ptp_tx_skb)
397 return;
398
399 tsynctxctl = rd32(E1000_TSYNCTXCTL);
400 if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
401 igb_ptp_tx_hwtstamp(adapter);
402 else
403 /* reschedule to check later */
404 schedule_work(&adapter->ptp_tx_work);
405}
406
407static void igb_ptp_overflow_check(struct work_struct *work)
248{ 408{
249 struct timespec ts;
250 struct igb_adapter *igb = 409 struct igb_adapter *igb =
251 container_of(work, struct igb_adapter, overflow_work.work); 410 container_of(work, struct igb_adapter, ptp_overflow_work.work);
411 struct timespec ts;
252 412
253 igb_gettime(&igb->caps, &ts); 413 igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
254 414
255 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); 415 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
256 416
257 schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD); 417 schedule_delayed_work(&igb->ptp_overflow_work,
418 IGB_SYSTIM_OVERFLOW_PERIOD);
419}
420
421/**
422 * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
423 * @adapter: Board private structure.
424 *
425 * If we were asked to do hardware stamping and such a time stamp is
426 * available, then it must have been for this skb here because we only
427 * allow only one such packet into the queue.
428 */
429void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
430{
431 struct e1000_hw *hw = &adapter->hw;
432 struct skb_shared_hwtstamps shhwtstamps;
433 u64 regval;
434
435 regval = rd32(E1000_TXSTMPL);
436 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
437
438 igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
439 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
440 dev_kfree_skb_any(adapter->ptp_tx_skb);
441 adapter->ptp_tx_skb = NULL;
442}
443
444void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
445 union e1000_adv_rx_desc *rx_desc,
446 struct sk_buff *skb)
447{
448 struct igb_adapter *adapter = q_vector->adapter;
449 struct e1000_hw *hw = &adapter->hw;
450 u64 regval;
451
452 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
453 E1000_RXDADV_STAT_TS))
454 return;
455
456 /*
457 * If this bit is set, then the RX registers contain the time stamp. No
458 * other packet will be time stamped until we read these registers, so
459 * read the registers to make them available again. Because only one
460 * packet can be time stamped at a time, we know that the register
461 * values must belong to this one here and therefore we don't need to
462 * compare any of the additional attributes stored for it.
463 *
464 * If nothing went wrong, then it should have a shared tx_flags that we
465 * can turn into a skb_shared_hwtstamps.
466 */
467 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
468 u32 *stamp = (u32 *)skb->data;
469 regval = le32_to_cpu(*(stamp + 2));
470 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
471 skb_pull(skb, IGB_TS_HDR_LEN);
472 } else {
473 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
474 return;
475
476 regval = rd32(E1000_RXSTMPL);
477 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
478 }
479
480 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
481}
482
483/**
484 * igb_ptp_hwtstamp_ioctl - control hardware time stamping
485 * @netdev:
486 * @ifreq:
487 * @cmd:
488 *
489 * Outgoing time stamping can be enabled and disabled. Play nice and
490 * disable it when requested, although it shouldn't case any overhead
491 * when no packet needs it. At most one packet in the queue may be
492 * marked for time stamping, otherwise it would be impossible to tell
493 * for sure to which packet the hardware time stamp belongs.
494 *
495 * Incoming time stamping has to be configured via the hardware
496 * filters. Not all combinations are supported, in particular event
497 * type has to be specified. Matching the kind of event packet is
498 * not supported, with the exception of "all V2 events regardless of
499 * level 2 or 4".
500 *
501 **/
502int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
503 struct ifreq *ifr, int cmd)
504{
505 struct igb_adapter *adapter = netdev_priv(netdev);
506 struct e1000_hw *hw = &adapter->hw;
507 struct hwtstamp_config config;
508 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
509 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
510 u32 tsync_rx_cfg = 0;
511 bool is_l4 = false;
512 bool is_l2 = false;
513 u32 regval;
514
515 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
516 return -EFAULT;
517
518 /* reserved for future extensions */
519 if (config.flags)
520 return -EINVAL;
521
522 switch (config.tx_type) {
523 case HWTSTAMP_TX_OFF:
524 tsync_tx_ctl = 0;
525 case HWTSTAMP_TX_ON:
526 break;
527 default:
528 return -ERANGE;
529 }
530
531 switch (config.rx_filter) {
532 case HWTSTAMP_FILTER_NONE:
533 tsync_rx_ctl = 0;
534 break;
535 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
536 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
537 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
538 case HWTSTAMP_FILTER_ALL:
539 /*
540 * register TSYNCRXCFG must be set, therefore it is not
541 * possible to time stamp both Sync and Delay_Req messages
542 * => fall back to time stamping all packets
543 */
544 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
545 config.rx_filter = HWTSTAMP_FILTER_ALL;
546 break;
547 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
548 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
549 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
550 is_l4 = true;
551 break;
552 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
553 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
554 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
555 is_l4 = true;
556 break;
557 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
558 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
559 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
560 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
561 is_l2 = true;
562 is_l4 = true;
563 config.rx_filter = HWTSTAMP_FILTER_SOME;
564 break;
565 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
566 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
567 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
568 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
569 is_l2 = true;
570 is_l4 = true;
571 config.rx_filter = HWTSTAMP_FILTER_SOME;
572 break;
573 case HWTSTAMP_FILTER_PTP_V2_EVENT:
574 case HWTSTAMP_FILTER_PTP_V2_SYNC:
575 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
576 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
577 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
578 is_l2 = true;
579 is_l4 = true;
580 break;
581 default:
582 return -ERANGE;
583 }
584
585 if (hw->mac.type == e1000_82575) {
586 if (tsync_rx_ctl | tsync_tx_ctl)
587 return -EINVAL;
588 return 0;
589 }
590
591 /*
592 * Per-packet timestamping only works if all packets are
593 * timestamped, so enable timestamping in all packets as
594 * long as one rx filter was configured.
595 */
596 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
597 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
598 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
599
600 if ((hw->mac.type == e1000_i210) ||
601 (hw->mac.type == e1000_i211)) {
602 regval = rd32(E1000_RXPBS);
603 regval |= E1000_RXPBS_CFG_TS_EN;
604 wr32(E1000_RXPBS, regval);
605 }
606 }
607
608 /* enable/disable TX */
609 regval = rd32(E1000_TSYNCTXCTL);
610 regval &= ~E1000_TSYNCTXCTL_ENABLED;
611 regval |= tsync_tx_ctl;
612 wr32(E1000_TSYNCTXCTL, regval);
613
614 /* enable/disable RX */
615 regval = rd32(E1000_TSYNCRXCTL);
616 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
617 regval |= tsync_rx_ctl;
618 wr32(E1000_TSYNCRXCTL, regval);
619
620 /* define which PTP packets are time stamped */
621 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
622
623 /* define ethertype filter for timestamped packets */
624 if (is_l2)
625 wr32(E1000_ETQF(3),
626 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
627 E1000_ETQF_1588 | /* enable timestamping */
628 ETH_P_1588)); /* 1588 eth protocol type */
629 else
630 wr32(E1000_ETQF(3), 0);
631
632#define PTP_PORT 319
633 /* L4 Queue Filter[3]: filter by destination port and protocol */
634 if (is_l4) {
635 u32 ftqf = (IPPROTO_UDP /* UDP */
636 | E1000_FTQF_VF_BP /* VF not compared */
637 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
638 | E1000_FTQF_MASK); /* mask all inputs */
639 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
640
641 wr32(E1000_IMIR(3), htons(PTP_PORT));
642 wr32(E1000_IMIREXT(3),
643 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
644 if (hw->mac.type == e1000_82576) {
645 /* enable source port check */
646 wr32(E1000_SPQF(3), htons(PTP_PORT));
647 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
648 }
649 wr32(E1000_FTQF(3), ftqf);
650 } else {
651 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
652 }
653 wrfl();
654
655 /* clear TX/RX time stamp registers, just to be sure */
656 regval = rd32(E1000_TXSTMPL);
657 regval = rd32(E1000_TXSTMPH);
658 regval = rd32(E1000_RXSTMPL);
659 regval = rd32(E1000_RXSTMPH);
660
661 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
662 -EFAULT : 0;
258} 663}
259 664
260void igb_ptp_init(struct igb_adapter *adapter) 665void igb_ptp_init(struct igb_adapter *adapter)
261{ 666{
262 struct e1000_hw *hw = &adapter->hw; 667 struct e1000_hw *hw = &adapter->hw;
668 struct net_device *netdev = adapter->netdev;
263 669
264 switch (hw->mac.type) { 670 switch (hw->mac.type) {
265 case e1000_i210: 671 case e1000_82576:
266 case e1000_i211: 672 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
267 case e1000_i350: 673 adapter->ptp_caps.owner = THIS_MODULE;
674 adapter->ptp_caps.max_adj = 1000000000;
675 adapter->ptp_caps.n_ext_ts = 0;
676 adapter->ptp_caps.pps = 0;
677 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
678 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
679 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
680 adapter->ptp_caps.settime = igb_ptp_settime_82576;
681 adapter->ptp_caps.enable = igb_ptp_enable;
682 adapter->cc.read = igb_ptp_read_82576;
683 adapter->cc.mask = CLOCKSOURCE_MASK(64);
684 adapter->cc.mult = 1;
685 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
686 /* Dial the nominal frequency. */
687 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
688 break;
268 case e1000_82580: 689 case e1000_82580:
269 adapter->caps.owner = THIS_MODULE; 690 case e1000_i350:
270 strcpy(adapter->caps.name, "igb-82580"); 691 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
271 adapter->caps.max_adj = 62499999; 692 adapter->ptp_caps.owner = THIS_MODULE;
272 adapter->caps.n_ext_ts = 0; 693 adapter->ptp_caps.max_adj = 62499999;
273 adapter->caps.pps = 0; 694 adapter->ptp_caps.n_ext_ts = 0;
274 adapter->caps.adjfreq = ptp_82580_adjfreq; 695 adapter->ptp_caps.pps = 0;
275 adapter->caps.adjtime = igb_adjtime; 696 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
276 adapter->caps.gettime = igb_gettime; 697 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
277 adapter->caps.settime = igb_settime; 698 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
278 adapter->caps.enable = ptp_82580_enable; 699 adapter->ptp_caps.settime = igb_ptp_settime_82576;
279 adapter->cc.read = igb_82580_systim_read; 700 adapter->ptp_caps.enable = igb_ptp_enable;
280 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 701 adapter->cc.read = igb_ptp_read_82580;
281 adapter->cc.mult = 1; 702 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
282 adapter->cc.shift = 0; 703 adapter->cc.mult = 1;
704 adapter->cc.shift = 0;
283 /* Enable the timer functions by clearing bit 31. */ 705 /* Enable the timer functions by clearing bit 31. */
284 wr32(E1000_TSAUXC, 0x0); 706 wr32(E1000_TSAUXC, 0x0);
285 break; 707 break;
286 708 case e1000_i210:
287 case e1000_82576: 709 case e1000_i211:
288 adapter->caps.owner = THIS_MODULE; 710 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
289 strcpy(adapter->caps.name, "igb-82576"); 711 adapter->ptp_caps.owner = THIS_MODULE;
290 adapter->caps.max_adj = 1000000000; 712 adapter->ptp_caps.max_adj = 62499999;
291 adapter->caps.n_ext_ts = 0; 713 adapter->ptp_caps.n_ext_ts = 0;
292 adapter->caps.pps = 0; 714 adapter->ptp_caps.pps = 0;
293 adapter->caps.adjfreq = ptp_82576_adjfreq; 715 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
294 adapter->caps.adjtime = igb_adjtime; 716 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
295 adapter->caps.gettime = igb_gettime; 717 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
296 adapter->caps.settime = igb_settime; 718 adapter->ptp_caps.settime = igb_ptp_settime_i210;
297 adapter->caps.enable = ptp_82576_enable; 719 adapter->ptp_caps.enable = igb_ptp_enable;
298 adapter->cc.read = igb_82576_systim_read; 720 /* Enable the timer functions by clearing bit 31. */
299 adapter->cc.mask = CLOCKSOURCE_MASK(64); 721 wr32(E1000_TSAUXC, 0x0);
300 adapter->cc.mult = 1;
301 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
302 /* Dial the nominal frequency. */
303 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
304 break; 722 break;
305
306 default: 723 default:
307 adapter->ptp_clock = NULL; 724 adapter->ptp_clock = NULL;
308 return; 725 return;
@@ -310,86 +727,114 @@ void igb_ptp_init(struct igb_adapter *adapter)
310 727
311 wrfl(); 728 wrfl();
312 729
313 timecounter_init(&adapter->tc, &adapter->cc, 730 spin_lock_init(&adapter->tmreg_lock);
314 ktime_to_ns(ktime_get_real())); 731 INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
732
733 /* Initialize the clock and overflow work for devices that need it. */
734 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
735 struct timespec ts = ktime_to_timespec(ktime_get_real());
315 736
316 INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check); 737 igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
738 } else {
739 timecounter_init(&adapter->tc, &adapter->cc,
740 ktime_to_ns(ktime_get_real()));
317 741
318 spin_lock_init(&adapter->tmreg_lock); 742 INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
743 igb_ptp_overflow_check);
319 744
320 schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD); 745 schedule_delayed_work(&adapter->ptp_overflow_work,
746 IGB_SYSTIM_OVERFLOW_PERIOD);
747 }
748
749 /* Initialize the time sync interrupts for devices that support it. */
750 if (hw->mac.type >= e1000_82580) {
751 wr32(E1000_TSIM, E1000_TSIM_TXTS);
752 wr32(E1000_IMS, E1000_IMS_TS);
753 }
321 754
322 adapter->ptp_clock = ptp_clock_register(&adapter->caps); 755 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
756 &adapter->pdev->dev);
323 if (IS_ERR(adapter->ptp_clock)) { 757 if (IS_ERR(adapter->ptp_clock)) {
324 adapter->ptp_clock = NULL; 758 adapter->ptp_clock = NULL;
325 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); 759 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
326 } else 760 } else {
327 dev_info(&adapter->pdev->dev, "added PHC on %s\n", 761 dev_info(&adapter->pdev->dev, "added PHC on %s\n",
328 adapter->netdev->name); 762 adapter->netdev->name);
763 adapter->flags |= IGB_FLAG_PTP;
764 }
329} 765}
330 766
331void igb_ptp_remove(struct igb_adapter *adapter) 767/**
768 * igb_ptp_stop - Disable PTP device and stop the overflow check.
769 * @adapter: Board private structure.
770 *
771 * This function stops the PTP support and cancels the delayed work.
772 **/
773void igb_ptp_stop(struct igb_adapter *adapter)
332{ 774{
333 switch (adapter->hw.mac.type) { 775 switch (adapter->hw.mac.type) {
334 case e1000_i211:
335 case e1000_i210:
336 case e1000_i350:
337 case e1000_82580:
338 case e1000_82576: 776 case e1000_82576:
339 cancel_delayed_work_sync(&adapter->overflow_work); 777 case e1000_82580:
778 case e1000_i350:
779 cancel_delayed_work_sync(&adapter->ptp_overflow_work);
780 break;
781 case e1000_i210:
782 case e1000_i211:
783 /* No delayed work to cancel. */
340 break; 784 break;
341 default: 785 default:
342 return; 786 return;
343 } 787 }
344 788
789 cancel_work_sync(&adapter->ptp_tx_work);
790
345 if (adapter->ptp_clock) { 791 if (adapter->ptp_clock) {
346 ptp_clock_unregister(adapter->ptp_clock); 792 ptp_clock_unregister(adapter->ptp_clock);
347 dev_info(&adapter->pdev->dev, "removed PHC on %s\n", 793 dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
348 adapter->netdev->name); 794 adapter->netdev->name);
795 adapter->flags &= ~IGB_FLAG_PTP;
349 } 796 }
350} 797}
351 798
352/** 799/**
353 * igb_systim_to_hwtstamp - convert system time value to hw timestamp 800 * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
354 * @adapter: board private structure 801 * @adapter: Board private structure.
355 * @hwtstamps: timestamp structure to update
356 * @systim: unsigned 64bit system time value.
357 *
358 * We need to convert the system time value stored in the RX/TXSTMP registers
359 * into a hwtstamp which can be used by the upper level timestamping functions.
360 * 802 *
361 * The 'tmreg_lock' spinlock is used to protect the consistency of the 803 * This function handles the reset work required to re-enable the PTP device.
362 * system time value. This is needed because reading the 64 bit time
363 * value involves reading two (or three) 32 bit registers. The first
364 * read latches the value. Ditto for writing.
365 *
366 * In addition, here have extended the system time with an overflow
367 * counter in software.
368 **/ 804 **/
369void igb_systim_to_hwtstamp(struct igb_adapter *adapter, 805void igb_ptp_reset(struct igb_adapter *adapter)
370 struct skb_shared_hwtstamps *hwtstamps,
371 u64 systim)
372{ 806{
373 u64 ns; 807 struct e1000_hw *hw = &adapter->hw;
374 unsigned long flags; 808
809 if (!(adapter->flags & IGB_FLAG_PTP))
810 return;
375 811
376 switch (adapter->hw.mac.type) { 812 switch (adapter->hw.mac.type) {
813 case e1000_82576:
814 /* Dial the nominal frequency. */
815 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
816 break;
817 case e1000_82580:
818 case e1000_i350:
377 case e1000_i210: 819 case e1000_i210:
378 case e1000_i211: 820 case e1000_i211:
379 case e1000_i350: 821 /* Enable the timer functions and interrupts. */
380 case e1000_82580: 822 wr32(E1000_TSAUXC, 0x0);
381 case e1000_82576: 823 wr32(E1000_TSIM, E1000_TSIM_TXTS);
824 wr32(E1000_IMS, E1000_IMS_TS);
382 break; 825 break;
383 default: 826 default:
827 /* No work to do. */
384 return; 828 return;
385 } 829 }
386 830
387 spin_lock_irqsave(&adapter->tmreg_lock, flags); 831 /* Re-initialize the timer. */
832 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
833 struct timespec ts = ktime_to_timespec(ktime_get_real());
388 834
389 ns = timecounter_cyc2time(&adapter->tc, systim); 835 igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
390 836 } else {
391 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 837 timecounter_init(&adapter->tc, &adapter->cc,
392 838 ktime_to_ns(ktime_get_real()));
393 memset(hwtstamps, 0, sizeof(*hwtstamps)); 839 }
394 hwtstamps->hwtstamp = ns_to_ktime(ns);
395} 840}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 5fd5d04c26c9..89f40e51fc13 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -32,7 +32,7 @@
32 32
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
38 38
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b9623e9ea895..5bd26763554c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -78,6 +78,9 @@
78 78
79/* Supported Rx Buffer Sizes */ 79/* Supported Rx Buffer Sizes */
80#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ 80#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
81#define IXGBE_RXBUFFER_2K 2048
82#define IXGBE_RXBUFFER_3K 3072
83#define IXGBE_RXBUFFER_4K 4096
81#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 84#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
82 85
83/* 86/*
@@ -104,6 +107,7 @@
104#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) 107#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
105#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) 108#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
106#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) 109#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
110#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
107#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 111#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
108#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 112#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
109#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 113#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
293 * this is twice the size of a half page we need to double the page order 297 * this is twice the size of a half page we need to double the page order
294 * for FCoE enabled Rx queues. 298 * for FCoE enabled Rx queues.
295 */ 299 */
296#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) 300static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
297static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
298{ 301{
299 return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; 302#ifdef IXGBE_FCOE
303 if (test_bit(__IXGBE_RX_FCOE, &ring->state))
304 return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
305 IXGBE_RXBUFFER_3K;
306#endif
307 return IXGBE_RXBUFFER_2K;
300} 308}
301#else 309
302#define ixgbe_rx_pg_order(_ring) 0 310static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
311{
312#ifdef IXGBE_FCOE
313 if (test_bit(__IXGBE_RX_FCOE, &ring->state))
314 return (PAGE_SIZE < 8192) ? 1 : 0;
303#endif 315#endif
316 return 0;
317}
304#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) 318#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
305#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
306 319
307struct ixgbe_ring_container { 320struct ixgbe_ring_container {
308 struct ixgbe_ring *ring; /* pointer to linked list of rings */ 321 struct ixgbe_ring *ring; /* pointer to linked list of rings */
@@ -584,6 +597,9 @@ struct ixgbe_adapter {
584#ifdef CONFIG_IXGBE_HWMON 597#ifdef CONFIG_IXGBE_HWMON
585 struct hwmon_buff ixgbe_hwmon_buff; 598 struct hwmon_buff ixgbe_hwmon_buff;
586#endif /* CONFIG_IXGBE_HWMON */ 599#endif /* CONFIG_IXGBE_HWMON */
600#ifdef CONFIG_DEBUG_FS
601 struct dentry *ixgbe_dbg_adapter;
602#endif /*CONFIG_DEBUG_FS*/
587}; 603};
588 604
589struct ixgbe_fdir_filter { 605struct ixgbe_fdir_filter {
@@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
712 struct netdev_fcoe_hbainfo *info); 728 struct netdev_fcoe_hbainfo *info);
713extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); 729extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
714#endif /* IXGBE_FCOE */ 730#endif /* IXGBE_FCOE */
715 731#ifdef CONFIG_DEBUG_FS
732extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
733extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
734extern void ixgbe_dbg_init(void);
735extern void ixgbe_dbg_exit(void);
736#endif /* CONFIG_DEBUG_FS */
716static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 737static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
717{ 738{
718 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 739 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
new file mode 100644
index 000000000000..8d3a21889099
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -0,0 +1,300 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifdef CONFIG_DEBUG_FS
29
30#include <linux/debugfs.h>
31#include <linux/module.h>
32
33#include "ixgbe.h"
34
35static struct dentry *ixgbe_dbg_root;
36
37static char ixgbe_dbg_reg_ops_buf[256] = "";
38
39/**
40 * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
41 * @inode: inode that was opened
42 * @filp: file info
43 *
44 * Stash the adapter pointer hiding in the inode into the file pointer where
45 * we can find it later in the read and write calls
46 **/
47static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
48{
49 filp->private_data = inode->i_private;
50 return 0;
51}
52
53/**
54 * ixgbe_dbg_reg_ops_read - read for reg_ops datum
55 * @filp: the opened file
56 * @buffer: where to write the data for the user to read
57 * @count: the size of the user's buffer
58 * @ppos: file position offset
59 **/
60static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
61 size_t count, loff_t *ppos)
62{
63 struct ixgbe_adapter *adapter = filp->private_data;
64 char buf[256];
65 int bytes_not_copied;
66 int len;
67
68 /* don't allow partial reads */
69 if (*ppos != 0)
70 return 0;
71
72 len = snprintf(buf, sizeof(buf), "%s: %s\n",
73 adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
74 if (count < len)
75 return -ENOSPC;
76 bytes_not_copied = copy_to_user(buffer, buf, len);
77 if (bytes_not_copied < 0)
78 return bytes_not_copied;
79
80 *ppos = len;
81 return len;
82}
83
84/**
85 * ixgbe_dbg_reg_ops_write - write into reg_ops datum
86 * @filp: the opened file
87 * @buffer: where to find the user's data
88 * @count: the length of the user's data
89 * @ppos: file position offset
90 **/
91static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
92 const char __user *buffer,
93 size_t count, loff_t *ppos)
94{
95 struct ixgbe_adapter *adapter = filp->private_data;
96 int bytes_not_copied;
97
98 /* don't allow partial writes */
99 if (*ppos != 0)
100 return 0;
101 if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
102 return -ENOSPC;
103
104 bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
105 if (bytes_not_copied < 0)
106 return bytes_not_copied;
107 else if (bytes_not_copied < count)
108 count -= bytes_not_copied;
109 else
110 return -ENOSPC;
111 ixgbe_dbg_reg_ops_buf[count] = '\0';
112
113 if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
114 u32 reg, value;
115 int cnt;
116 cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value);
117 if (cnt == 2) {
118 IXGBE_WRITE_REG(&adapter->hw, reg, value);
119 value = IXGBE_READ_REG(&adapter->hw, reg);
120 e_dev_info("write: 0x%08x = 0x%08x\n", reg, value);
121 } else {
122 e_dev_info("write <reg> <value>\n");
123 }
124 } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) {
125 u32 reg, value;
126 int cnt;
127 cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", &reg);
128 if (cnt == 1) {
129 value = IXGBE_READ_REG(&adapter->hw, reg);
130 e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
131 } else {
132 e_dev_info("read <reg>\n");
133 }
134 } else {
135 e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf);
136 e_dev_info("Available commands:\n");
137 e_dev_info(" read <reg>\n");
138 e_dev_info(" write <reg> <value>\n");
139 }
140 return count;
141}
142
143static const struct file_operations ixgbe_dbg_reg_ops_fops = {
144 .owner = THIS_MODULE,
145 .open = ixgbe_dbg_reg_ops_open,
146 .read = ixgbe_dbg_reg_ops_read,
147 .write = ixgbe_dbg_reg_ops_write,
148};
149
150static char ixgbe_dbg_netdev_ops_buf[256] = "";
151
152/**
153 * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
154 * @inode: inode that was opened
155 * @filp: file info
156 *
157 * Stash the adapter pointer hiding in the inode into the file pointer
158 * where we can find it later in the read and write calls
159 **/
160static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
161{
162 filp->private_data = inode->i_private;
163 return 0;
164}
165
166/**
167 * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
168 * @filp: the opened file
169 * @buffer: where to write the data for the user to read
170 * @count: the size of the user's buffer
171 * @ppos: file position offset
172 **/
173static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
174 char __user *buffer,
175 size_t count, loff_t *ppos)
176{
177 struct ixgbe_adapter *adapter = filp->private_data;
178 char buf[256];
179 int bytes_not_copied;
180 int len;
181
182 /* don't allow partial reads */
183 if (*ppos != 0)
184 return 0;
185
186 len = snprintf(buf, sizeof(buf), "%s: %s\n",
187 adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
188 if (count < len)
189 return -ENOSPC;
190 bytes_not_copied = copy_to_user(buffer, buf, len);
191 if (bytes_not_copied < 0)
192 return bytes_not_copied;
193
194 *ppos = len;
195 return len;
196}
197
198/**
199 * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum
200 * @filp: the opened file
201 * @buffer: where to find the user's data
202 * @count: the length of the user's data
203 * @ppos: file position offset
204 **/
205static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
206 const char __user *buffer,
207 size_t count, loff_t *ppos)
208{
209 struct ixgbe_adapter *adapter = filp->private_data;
210 int bytes_not_copied;
211
212 /* don't allow partial writes */
213 if (*ppos != 0)
214 return 0;
215 if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
216 return -ENOSPC;
217
218 bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
219 buffer, count);
220 if (bytes_not_copied < 0)
221 return bytes_not_copied;
222 else if (bytes_not_copied < count)
223 count -= bytes_not_copied;
224 else
225 return -ENOSPC;
226 ixgbe_dbg_netdev_ops_buf[count] = '\0';
227
228 if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
229 adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
230 e_dev_info("tx_timeout called\n");
231 } else {
232 e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
233 e_dev_info("Available commands:\n");
234 e_dev_info(" tx_timeout\n");
235 }
236 return count;
237}
238
239static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
240 .owner = THIS_MODULE,
241 .open = ixgbe_dbg_netdev_ops_open,
242 .read = ixgbe_dbg_netdev_ops_read,
243 .write = ixgbe_dbg_netdev_ops_write,
244};
245
246/**
247 * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter
248 * @adapter: the adapter that is starting up
249 **/
250void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
251{
252 const char *name = pci_name(adapter->pdev);
253 struct dentry *pfile;
254 adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
255 if (adapter->ixgbe_dbg_adapter) {
256 pfile = debugfs_create_file("reg_ops", 0600,
257 adapter->ixgbe_dbg_adapter, adapter,
258 &ixgbe_dbg_reg_ops_fops);
259 if (!pfile)
260 e_dev_err("debugfs reg_ops for %s failed\n", name);
261 pfile = debugfs_create_file("netdev_ops", 0600,
262 adapter->ixgbe_dbg_adapter, adapter,
263 &ixgbe_dbg_netdev_ops_fops);
264 if (!pfile)
265 e_dev_err("debugfs netdev_ops for %s failed\n", name);
266 } else {
267 e_dev_err("debugfs entry for %s failed\n", name);
268 }
269}
270
271/**
272 * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
273 * @pf: the pf that is stopping
274 **/
275void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
276{
277 if (adapter->ixgbe_dbg_adapter)
278 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
279 adapter->ixgbe_dbg_adapter = NULL;
280}
281
282/**
283 * ixgbe_dbg_init - start up debugfs for the driver
284 **/
285void ixgbe_dbg_init(void)
286{
287 ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
288 if (ixgbe_dbg_root == NULL)
289 pr_err("init of debugfs failed\n");
290}
291
292/**
293 * ixgbe_dbg_exit - clean out the driver's debugfs entries
294 **/
295void ixgbe_dbg_exit(void)
296{
297 debugfs_remove_recursive(ixgbe_dbg_root);
298}
299
300#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ee61819d6088..868af6938219 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1167 } 1167 }
1168 1168
1169 bi->dma = dma; 1169 bi->dma = dma;
1170 bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); 1170 bi->page_offset = 0;
1171 1171
1172 return true; 1172 return true;
1173} 1173}
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1320 return max_len; 1320 return max_len;
1321} 1321}
1322 1322
1323static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
1324 union ixgbe_adv_rx_desc *rx_desc,
1325 struct sk_buff *skb)
1326{
1327 __le32 rsc_enabled;
1328 u32 rsc_cnt;
1329
1330 if (!ring_is_rsc_enabled(rx_ring))
1331 return;
1332
1333 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1334 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1335
1336 /* If this is an RSC frame rsc_cnt should be non-zero */
1337 if (!rsc_enabled)
1338 return;
1339
1340 rsc_cnt = le32_to_cpu(rsc_enabled);
1341 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1342
1343 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1344}
1345
1346static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1323static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1347 struct sk_buff *skb) 1324 struct sk_buff *skb)
1348{ 1325{
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1440 1417
1441 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); 1418 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1442 1419
1443 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 1420 /* update RSC append count if present */
1444 return false; 1421 if (ring_is_rsc_enabled(rx_ring)) {
1422 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1423 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1424
1425 if (unlikely(rsc_enabled)) {
1426 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1427
1428 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1429 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1445 1430
1446 /* append_cnt indicates packet is RSC, if so fetch nextp */ 1431 /* update ntc based on RSC value */
1447 if (IXGBE_CB(skb)->append_cnt) { 1432 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1448 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); 1433 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1449 ntc &= IXGBE_RXDADV_NEXTP_MASK; 1434 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1450 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; 1435 }
1451 } 1436 }
1452 1437
1438 /* if we are the last buffer then there is nothing else to do */
1439 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1440 return false;
1441
1453 /* place skb in next buffer to be received */ 1442 /* place skb in next buffer to be received */
1454 rx_ring->rx_buffer_info[ntc].skb = skb; 1443 rx_ring->rx_buffer_info[ntc].skb = skb;
1455 rx_ring->rx_stats.non_eop_descs++; 1444 rx_ring->rx_stats.non_eop_descs++;
@@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1458} 1447}
1459 1448
1460/** 1449/**
1450 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1451 * @rx_ring: rx descriptor ring packet is being transacted on
1452 * @skb: pointer to current skb being adjusted
1453 *
1454 * This function is an ixgbe specific version of __pskb_pull_tail. The
1455 * main difference between this version and the original function is that
1456 * this function can make several assumptions about the state of things
1457 * that allow for significant optimizations versus the standard function.
1458 * As a result we can do things like drop a frag and maintain an accurate
1459 * truesize for the skb.
1460 */
1461static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1462 struct sk_buff *skb)
1463{
1464 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1465 unsigned char *va;
1466 unsigned int pull_len;
1467
1468 /*
1469 * it is valid to use page_address instead of kmap since we are
1470 * working with pages allocated out of the lomem pool per
1471 * alloc_page(GFP_ATOMIC)
1472 */
1473 va = skb_frag_address(frag);
1474
1475 /*
1476 * we need the header to contain the greater of either ETH_HLEN or
1477 * 60 bytes if the skb->len is less than 60 for skb_pad.
1478 */
1479 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1480
1481 /* align pull length to size of long to optimize memcpy performance */
1482 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1483
1484 /* update all of the pointers */
1485 skb_frag_size_sub(frag, pull_len);
1486 frag->page_offset += pull_len;
1487 skb->data_len -= pull_len;
1488 skb->tail += pull_len;
1489}
1490
1491/**
1492 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1493 * @rx_ring: rx descriptor ring packet is being transacted on
1494 * @skb: pointer to current skb being updated
1495 *
1496 * This function provides a basic DMA sync up for the first fragment of an
1497 * skb. The reason for doing this is that the first fragment cannot be
1498 * unmapped until we have reached the end of packet descriptor for a buffer
1499 * chain.
1500 */
1501static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1502 struct sk_buff *skb)
1503{
1504 /* if the page was released unmap it, else just sync our portion */
1505 if (unlikely(IXGBE_CB(skb)->page_released)) {
1506 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1507 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1508 IXGBE_CB(skb)->page_released = false;
1509 } else {
1510 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1511
1512 dma_sync_single_range_for_cpu(rx_ring->dev,
1513 IXGBE_CB(skb)->dma,
1514 frag->page_offset,
1515 ixgbe_rx_bufsz(rx_ring),
1516 DMA_FROM_DEVICE);
1517 }
1518 IXGBE_CB(skb)->dma = 0;
1519}
1520
1521/**
1461 * ixgbe_cleanup_headers - Correct corrupted or empty headers 1522 * ixgbe_cleanup_headers - Correct corrupted or empty headers
1462 * @rx_ring: rx descriptor ring packet is being transacted on 1523 * @rx_ring: rx descriptor ring packet is being transacted on
1463 * @rx_desc: pointer to the EOP Rx descriptor 1524 * @rx_desc: pointer to the EOP Rx descriptor
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1479 union ixgbe_adv_rx_desc *rx_desc, 1540 union ixgbe_adv_rx_desc *rx_desc,
1480 struct sk_buff *skb) 1541 struct sk_buff *skb)
1481{ 1542{
1482 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1483 struct net_device *netdev = rx_ring->netdev; 1543 struct net_device *netdev = rx_ring->netdev;
1484 unsigned char *va;
1485 unsigned int pull_len;
1486
1487 /* if the page was released unmap it, else just sync our portion */
1488 if (unlikely(IXGBE_CB(skb)->page_released)) {
1489 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1490 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1491 IXGBE_CB(skb)->page_released = false;
1492 } else {
1493 dma_sync_single_range_for_cpu(rx_ring->dev,
1494 IXGBE_CB(skb)->dma,
1495 frag->page_offset,
1496 ixgbe_rx_bufsz(rx_ring),
1497 DMA_FROM_DEVICE);
1498 }
1499 IXGBE_CB(skb)->dma = 0;
1500 1544
1501 /* verify that the packet does not have any known errors */ 1545 /* verify that the packet does not have any known errors */
1502 if (unlikely(ixgbe_test_staterr(rx_desc, 1546 if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1506 return true; 1550 return true;
1507 } 1551 }
1508 1552
1509 /* 1553 /* place header in linear portion of buffer */
1510 * it is valid to use page_address instead of kmap since we are 1554 if (skb_is_nonlinear(skb))
1511 * working with pages allocated out of the lomem pool per 1555 ixgbe_pull_tail(rx_ring, skb);
1512 * alloc_page(GFP_ATOMIC)
1513 */
1514 va = skb_frag_address(frag);
1515
1516 /*
1517 * we need the header to contain the greater of either ETH_HLEN or
1518 * 60 bytes if the skb->len is less than 60 for skb_pad.
1519 */
1520 pull_len = skb_frag_size(frag);
1521 if (pull_len > IXGBE_RX_HDR_SIZE)
1522 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1523
1524 /* align pull length to size of long to optimize memcpy performance */
1525 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1526
1527 /* update all of the pointers */
1528 skb_frag_size_sub(frag, pull_len);
1529 frag->page_offset += pull_len;
1530 skb->data_len -= pull_len;
1531 skb->tail += pull_len;
1532
1533 /*
1534 * if we sucked the frag empty then we should free it,
1535 * if there are other frags here something is screwed up in hardware
1536 */
1537 if (skb_frag_size(frag) == 0) {
1538 BUG_ON(skb_shinfo(skb)->nr_frags != 1);
1539 skb_shinfo(skb)->nr_frags = 0;
1540 __skb_frag_unref(frag);
1541 skb->truesize -= ixgbe_rx_bufsz(rx_ring);
1542 }
1543 1556
1544#ifdef IXGBE_FCOE 1557#ifdef IXGBE_FCOE
1545 /* do not attempt to pad FCoE Frames as this will disrupt DDP */ 1558 /* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1560} 1573}
1561 1574
1562/** 1575/**
1563 * ixgbe_can_reuse_page - determine if we can reuse a page
1564 * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
1565 *
1566 * Returns true if page can be reused in another Rx buffer
1567 **/
1568static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
1569{
1570 struct page *page = rx_buffer->page;
1571
1572 /* if we are only owner of page and it is local we can reuse it */
1573 return likely(page_count(page) == 1) &&
1574 likely(page_to_nid(page) == numa_node_id());
1575}
1576
1577/**
1578 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring 1576 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1579 * @rx_ring: rx descriptor ring to store buffers on 1577 * @rx_ring: rx descriptor ring to store buffers on
1580 * @old_buff: donor buffer to have page reused 1578 * @old_buff: donor buffer to have page reused
1581 * 1579 *
1582 * Syncronizes page for reuse by the adapter 1580 * Synchronizes page for reuse by the adapter
1583 **/ 1581 **/
1584static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, 1582static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1585 struct ixgbe_rx_buffer *old_buff) 1583 struct ixgbe_rx_buffer *old_buff)
1586{ 1584{
1587 struct ixgbe_rx_buffer *new_buff; 1585 struct ixgbe_rx_buffer *new_buff;
1588 u16 nta = rx_ring->next_to_alloc; 1586 u16 nta = rx_ring->next_to_alloc;
1589 u16 bufsz = ixgbe_rx_bufsz(rx_ring);
1590 1587
1591 new_buff = &rx_ring->rx_buffer_info[nta]; 1588 new_buff = &rx_ring->rx_buffer_info[nta];
1592 1589
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1597 /* transfer page from old buffer to new buffer */ 1594 /* transfer page from old buffer to new buffer */
1598 new_buff->page = old_buff->page; 1595 new_buff->page = old_buff->page;
1599 new_buff->dma = old_buff->dma; 1596 new_buff->dma = old_buff->dma;
1600 1597 new_buff->page_offset = old_buff->page_offset;
1601 /* flip page offset to other buffer and store to new_buff */
1602 new_buff->page_offset = old_buff->page_offset ^ bufsz;
1603 1598
1604 /* sync the buffer for use by the device */ 1599 /* sync the buffer for use by the device */
1605 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 1600 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1606 new_buff->page_offset, bufsz, 1601 new_buff->page_offset,
1602 ixgbe_rx_bufsz(rx_ring),
1607 DMA_FROM_DEVICE); 1603 DMA_FROM_DEVICE);
1608
1609 /* bump ref count on page before it is given to the stack */
1610 get_page(new_buff->page);
1611} 1604}
1612 1605
1613/** 1606/**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1617 * @rx_desc: descriptor containing length of buffer written by hardware 1610 * @rx_desc: descriptor containing length of buffer written by hardware
1618 * @skb: sk_buff to place the data into 1611 * @skb: sk_buff to place the data into
1619 * 1612 *
1620 * This function is based on skb_add_rx_frag. I would have used that 1613 * This function will add the data contained in rx_buffer->page to the skb.
1621 * function however it doesn't handle the truesize case correctly since we 1614 * This is done either through a direct copy if the data in the buffer is
1622 * are allocating more memory than might be used for a single receive. 1615 * less than the skb header size, otherwise it will just attach the page as
1616 * a frag to the skb.
1617 *
1618 * The function will then update the page offset if necessary and return
1619 * true if the buffer can be reused by the adapter.
1623 **/ 1620 **/
1624static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, 1621static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1625 struct ixgbe_rx_buffer *rx_buffer, 1622 struct ixgbe_rx_buffer *rx_buffer,
1626 struct sk_buff *skb, int size) 1623 union ixgbe_adv_rx_desc *rx_desc,
1624 struct sk_buff *skb)
1627{ 1625{
1628 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1626 struct page *page = rx_buffer->page;
1629 rx_buffer->page, rx_buffer->page_offset, 1627 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1630 size); 1628#if (PAGE_SIZE < 8192)
1631 skb->len += size; 1629 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1632 skb->data_len += size; 1630#else
1633 skb->truesize += ixgbe_rx_bufsz(rx_ring); 1631 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1632 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1633 ixgbe_rx_bufsz(rx_ring);
1634#endif
1635
1636 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1637 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1638
1639 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1640
1641 /* we can reuse buffer as-is, just make sure it is local */
1642 if (likely(page_to_nid(page) == numa_node_id()))
1643 return true;
1644
1645 /* this page cannot be reused so discard it */
1646 put_page(page);
1647 return false;
1648 }
1649
1650 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1651 rx_buffer->page_offset, size, truesize);
1652
1653 /* avoid re-using remote pages */
1654 if (unlikely(page_to_nid(page) != numa_node_id()))
1655 return false;
1656
1657#if (PAGE_SIZE < 8192)
1658 /* if we are only owner of page we can reuse it */
1659 if (unlikely(page_count(page) != 1))
1660 return false;
1661
1662 /* flip page offset to other buffer */
1663 rx_buffer->page_offset ^= truesize;
1664
1665 /*
1666 * since we are the only owner of the page and we need to
1667 * increment it, just set the value to 2 in order to avoid
1668 * an unecessary locked operation
1669 */
1670 atomic_set(&page->_count, 2);
1671#else
1672 /* move offset up to the next cache line */
1673 rx_buffer->page_offset += truesize;
1674
1675 if (rx_buffer->page_offset > last_offset)
1676 return false;
1677
1678 /* bump ref count on page before it is given to the stack */
1679 get_page(page);
1680#endif
1681
1682 return true;
1683}
1684
1685static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1686 union ixgbe_adv_rx_desc *rx_desc)
1687{
1688 struct ixgbe_rx_buffer *rx_buffer;
1689 struct sk_buff *skb;
1690 struct page *page;
1691
1692 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1693 page = rx_buffer->page;
1694 prefetchw(page);
1695
1696 skb = rx_buffer->skb;
1697
1698 if (likely(!skb)) {
1699 void *page_addr = page_address(page) +
1700 rx_buffer->page_offset;
1701
1702 /* prefetch first cache line of first page */
1703 prefetch(page_addr);
1704#if L1_CACHE_BYTES < 128
1705 prefetch(page_addr + L1_CACHE_BYTES);
1706#endif
1707
1708 /* allocate a skb to store the frags */
1709 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1710 IXGBE_RX_HDR_SIZE);
1711 if (unlikely(!skb)) {
1712 rx_ring->rx_stats.alloc_rx_buff_failed++;
1713 return NULL;
1714 }
1715
1716 /*
1717 * we will be copying header into skb->data in
1718 * pskb_may_pull so it is in our interest to prefetch
1719 * it now to avoid a possible cache miss
1720 */
1721 prefetchw(skb->data);
1722
1723 /*
1724 * Delay unmapping of the first packet. It carries the
1725 * header information, HW may still access the header
1726 * after the writeback. Only unmap it when EOP is
1727 * reached
1728 */
1729 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1730 goto dma_sync;
1731
1732 IXGBE_CB(skb)->dma = rx_buffer->dma;
1733 } else {
1734 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1735 ixgbe_dma_sync_frag(rx_ring, skb);
1736
1737dma_sync:
1738 /* we are reusing so sync this buffer for CPU use */
1739 dma_sync_single_range_for_cpu(rx_ring->dev,
1740 rx_buffer->dma,
1741 rx_buffer->page_offset,
1742 ixgbe_rx_bufsz(rx_ring),
1743 DMA_FROM_DEVICE);
1744 }
1745
1746 /* pull page into skb */
1747 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1748 /* hand second half of page back to the ring */
1749 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1750 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1751 /* the page has been released from the ring */
1752 IXGBE_CB(skb)->page_released = true;
1753 } else {
1754 /* we are not reusing the buffer so unmap it */
1755 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1756 ixgbe_rx_pg_size(rx_ring),
1757 DMA_FROM_DEVICE);
1758 }
1759
1760 /* clear contents of buffer_info */
1761 rx_buffer->skb = NULL;
1762 rx_buffer->dma = 0;
1763 rx_buffer->page = NULL;
1764
1765 return skb;
1634} 1766}
1635 1767
1636/** 1768/**
@@ -1653,16 +1785,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1653 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1785 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1654#ifdef IXGBE_FCOE 1786#ifdef IXGBE_FCOE
1655 struct ixgbe_adapter *adapter = q_vector->adapter; 1787 struct ixgbe_adapter *adapter = q_vector->adapter;
1656 int ddp_bytes = 0; 1788 int ddp_bytes;
1789 unsigned int mss = 0;
1657#endif /* IXGBE_FCOE */ 1790#endif /* IXGBE_FCOE */
1658 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 1791 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1659 1792
1660 do { 1793 do {
1661 struct ixgbe_rx_buffer *rx_buffer;
1662 union ixgbe_adv_rx_desc *rx_desc; 1794 union ixgbe_adv_rx_desc *rx_desc;
1663 struct sk_buff *skb; 1795 struct sk_buff *skb;
1664 struct page *page;
1665 u16 ntc;
1666 1796
1667 /* return some buffers to hardware, one at a time is too slow */ 1797 /* return some buffers to hardware, one at a time is too slow */
1668 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1798 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1800,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1670 cleaned_count = 0; 1800 cleaned_count = 0;
1671 } 1801 }
1672 1802
1673 ntc = rx_ring->next_to_clean; 1803 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1674 rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
1675 rx_buffer = &rx_ring->rx_buffer_info[ntc];
1676 1804
1677 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 1805 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1678 break; 1806 break;
@@ -1684,75 +1812,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1684 */ 1812 */
1685 rmb(); 1813 rmb();
1686 1814
1687 page = rx_buffer->page; 1815 /* retrieve a buffer from the ring */
1688 prefetchw(page); 1816 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
1689
1690 skb = rx_buffer->skb;
1691
1692 if (likely(!skb)) {
1693 void *page_addr = page_address(page) +
1694 rx_buffer->page_offset;
1695
1696 /* prefetch first cache line of first page */
1697 prefetch(page_addr);
1698#if L1_CACHE_BYTES < 128
1699 prefetch(page_addr + L1_CACHE_BYTES);
1700#endif
1701 1817
1702 /* allocate a skb to store the frags */ 1818 /* exit if we failed to retrieve a buffer */
1703 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 1819 if (!skb)
1704 IXGBE_RX_HDR_SIZE); 1820 break;
1705 if (unlikely(!skb)) {
1706 rx_ring->rx_stats.alloc_rx_buff_failed++;
1707 break;
1708 }
1709
1710 /*
1711 * we will be copying header into skb->data in
1712 * pskb_may_pull so it is in our interest to prefetch
1713 * it now to avoid a possible cache miss
1714 */
1715 prefetchw(skb->data);
1716
1717 /*
1718 * Delay unmapping of the first packet. It carries the
1719 * header information, HW may still access the header
1720 * after the writeback. Only unmap it when EOP is
1721 * reached
1722 */
1723 IXGBE_CB(skb)->dma = rx_buffer->dma;
1724 } else {
1725 /* we are reusing so sync this buffer for CPU use */
1726 dma_sync_single_range_for_cpu(rx_ring->dev,
1727 rx_buffer->dma,
1728 rx_buffer->page_offset,
1729 ixgbe_rx_bufsz(rx_ring),
1730 DMA_FROM_DEVICE);
1731 }
1732
1733 /* pull page into skb */
1734 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
1735 le16_to_cpu(rx_desc->wb.upper.length));
1736
1737 if (ixgbe_can_reuse_page(rx_buffer)) {
1738 /* hand second half of page back to the ring */
1739 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1740 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1741 /* the page has been released from the ring */
1742 IXGBE_CB(skb)->page_released = true;
1743 } else {
1744 /* we are not reusing the buffer so unmap it */
1745 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1746 ixgbe_rx_pg_size(rx_ring),
1747 DMA_FROM_DEVICE);
1748 }
1749
1750 /* clear contents of buffer_info */
1751 rx_buffer->skb = NULL;
1752 rx_buffer->dma = 0;
1753 rx_buffer->page = NULL;
1754
1755 ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
1756 1821
1757 cleaned_count++; 1822 cleaned_count++;
1758 1823
@@ -1775,6 +1840,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1775 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1840 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1776 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { 1841 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1777 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 1842 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1843 /* include DDPed FCoE data */
1844 if (ddp_bytes > 0) {
1845 if (!mss) {
1846 mss = rx_ring->netdev->mtu -
1847 sizeof(struct fcoe_hdr) -
1848 sizeof(struct fc_frame_header) -
1849 sizeof(struct fcoe_crc_eof);
1850 if (mss > 512)
1851 mss &= ~511;
1852 }
1853 total_rx_bytes += ddp_bytes;
1854 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
1855 mss);
1856 }
1778 if (!ddp_bytes) { 1857 if (!ddp_bytes) {
1779 dev_kfree_skb_any(skb); 1858 dev_kfree_skb_any(skb);
1780 continue; 1859 continue;
@@ -1788,21 +1867,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1788 budget--; 1867 budget--;
1789 } while (likely(budget)); 1868 } while (likely(budget));
1790 1869
1791#ifdef IXGBE_FCOE
1792 /* include DDPed FCoE data */
1793 if (ddp_bytes > 0) {
1794 unsigned int mss;
1795
1796 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1797 sizeof(struct fc_frame_header) -
1798 sizeof(struct fcoe_crc_eof);
1799 if (mss > 512)
1800 mss &= ~511;
1801 total_rx_bytes += ddp_bytes;
1802 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1803 }
1804
1805#endif /* IXGBE_FCOE */
1806 u64_stats_update_begin(&rx_ring->syncp); 1870 u64_stats_update_begin(&rx_ring->syncp);
1807 rx_ring->stats.packets += total_rx_packets; 1871 rx_ring->stats.packets += total_rx_packets;
1808 rx_ring->stats.bytes += total_rx_bytes; 1872 rx_ring->stats.bytes += total_rx_bytes;
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2868 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 2932 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
2869 2933
2870 /* configure the packet buffer length */ 2934 /* configure the packet buffer length */
2871#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
2872 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2873#else
2874 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2935 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2875#endif
2876 2936
2877 /* configure descriptor type */ 2937 /* configure descriptor type */
2878 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2938 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2980 * total size of max desc * buf_len is not greater 3040 * total size of max desc * buf_len is not greater
2981 * than 65536 3041 * than 65536
2982 */ 3042 */
2983#if (PAGE_SIZE <= 8192)
2984 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 3043 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2985#elif (PAGE_SIZE <= 16384)
2986 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2987#else
2988 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2989#endif
2990 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 3044 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2991} 3045}
2992 3046
@@ -3606,8 +3660,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3606 if (hw->mac.type == ixgbe_mac_82598EB) 3660 if (hw->mac.type == ixgbe_mac_82598EB)
3607 netif_set_gso_max_size(adapter->netdev, 32768); 3661 netif_set_gso_max_size(adapter->netdev, 32768);
3608 3662
3609 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3610
3611#ifdef IXGBE_FCOE 3663#ifdef IXGBE_FCOE
3612 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 3664 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3613 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3665 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3807,6 +3859,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3807#ifdef CONFIG_IXGBE_DCB 3859#ifdef CONFIG_IXGBE_DCB
3808 ixgbe_configure_dcb(adapter); 3860 ixgbe_configure_dcb(adapter);
3809#endif 3861#endif
3862 /*
3863 * We must restore virtualization before VLANs or else
3864 * the VLVF registers will not be populated
3865 */
3866 ixgbe_configure_virtualization(adapter);
3810 3867
3811 ixgbe_set_rx_mode(adapter->netdev); 3868 ixgbe_set_rx_mode(adapter->netdev);
3812 ixgbe_restore_vlan(adapter); 3869 ixgbe_restore_vlan(adapter);
@@ -3838,8 +3895,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3838 break; 3895 break;
3839 } 3896 }
3840 3897
3841 ixgbe_configure_virtualization(adapter);
3842
3843#ifdef IXGBE_FCOE 3898#ifdef IXGBE_FCOE
3844 /* configure FCoE L2 filters, redirection table, and Rx control */ 3899 /* configure FCoE L2 filters, redirection table, and Rx control */
3845 ixgbe_configure_fcoe(adapter); 3900 ixgbe_configure_fcoe(adapter);
@@ -4130,27 +4185,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4130} 4185}
4131 4186
4132/** 4187/**
4133 * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
4134 * @rx_ring: ring to setup
4135 *
4136 * On many IA platforms the L1 cache has a critical stride of 4K, this
4137 * results in each receive buffer starting in the same cache set. To help
4138 * reduce the pressure on this cache set we can interleave the offsets so
4139 * that only every other buffer will be in the same cache set.
4140 **/
4141static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
4142{
4143 struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
4144 u16 i;
4145
4146 for (i = 0; i < rx_ring->count; i += 2) {
4147 rx_buffer[0].page_offset = 0;
4148 rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
4149 rx_buffer = &rx_buffer[2];
4150 }
4151}
4152
4153/**
4154 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4188 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4155 * @rx_ring: ring to free buffers from 4189 * @rx_ring: ring to free buffers from
4156 **/ 4190 **/
@@ -4195,8 +4229,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4195 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4229 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4196 memset(rx_ring->rx_buffer_info, 0, size); 4230 memset(rx_ring->rx_buffer_info, 0, size);
4197 4231
4198 ixgbe_init_rx_page_offset(rx_ring);
4199
4200 /* Zero out the descriptor ring */ 4232 /* Zero out the descriptor ring */
4201 memset(rx_ring->desc, 0, rx_ring->size); 4233 memset(rx_ring->desc, 0, rx_ring->size);
4202 4234
@@ -4646,8 +4678,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
4646 rx_ring->next_to_clean = 0; 4678 rx_ring->next_to_clean = 0;
4647 rx_ring->next_to_use = 0; 4679 rx_ring->next_to_use = 0;
4648 4680
4649 ixgbe_init_rx_page_offset(rx_ring);
4650
4651 return 0; 4681 return 0;
4652err: 4682err:
4653 vfree(rx_ring->rx_buffer_info); 4683 vfree(rx_ring->rx_buffer_info);
@@ -5530,8 +5560,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5530{ 5560{
5531 u32 ssvpc; 5561 u32 ssvpc;
5532 5562
5533 /* Do not perform spoof check for 82598 */ 5563 /* Do not perform spoof check for 82598 or if not in IOV mode */
5534 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 5564 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
5565 adapter->num_vfs == 0)
5535 return; 5566 return;
5536 5567
5537 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); 5568 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
@@ -5543,7 +5574,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5543 if (!ssvpc) 5574 if (!ssvpc)
5544 return; 5575 return;
5545 5576
5546 e_warn(drv, "%d Spoofed packets detected\n", ssvpc); 5577 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
5547} 5578}
5548 5579
5549/** 5580/**
@@ -5874,9 +5905,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
5874 u32 type_tucmd = 0; 5905 u32 type_tucmd = 0;
5875 5906
5876 if (skb->ip_summed != CHECKSUM_PARTIAL) { 5907 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5877 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && 5908 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
5878 !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) 5909 if (unlikely(skb->no_fcs))
5879 return; 5910 first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
5911 if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
5912 return;
5913 }
5880 } else { 5914 } else {
5881 u8 l4_hdr = 0; 5915 u8 l4_hdr = 0;
5882 switch (first->protocol) { 5916 switch (first->protocol) {
@@ -5938,7 +5972,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5938{ 5972{
5939 /* set type for advanced descriptor with frame checksum insertion */ 5973 /* set type for advanced descriptor with frame checksum insertion */
5940 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | 5974 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
5941 IXGBE_ADVTXD_DCMD_IFCS |
5942 IXGBE_ADVTXD_DCMD_DEXT); 5975 IXGBE_ADVTXD_DCMD_DEXT);
5943 5976
5944 /* set HW vlan bit if vlan is present */ 5977 /* set HW vlan bit if vlan is present */
@@ -5958,6 +5991,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5958#endif 5991#endif
5959 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); 5992 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
5960 5993
5994 /* insert frame checksum */
5995 if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
5996 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
5997
5961 return cmd_type; 5998 return cmd_type;
5962} 5999}
5963 6000
@@ -6063,8 +6100,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6063 if (likely(!data_len)) 6100 if (likely(!data_len))
6064 break; 6101 break;
6065 6102
6066 if (unlikely(skb->no_fcs))
6067 cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
6068 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 6103 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
6069 6104
6070 i++; 6105 i++;
@@ -6854,9 +6889,9 @@ static int ixgbe_set_features(struct net_device *netdev,
6854 return 0; 6889 return 0;
6855} 6890}
6856 6891
6857static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, 6892static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6858 struct net_device *dev, 6893 struct net_device *dev,
6859 unsigned char *addr, 6894 const unsigned char *addr,
6860 u16 flags) 6895 u16 flags)
6861{ 6896{
6862 struct ixgbe_adapter *adapter = netdev_priv(dev); 6897 struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -6893,7 +6928,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
6893 6928
6894static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, 6929static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6895 struct net_device *dev, 6930 struct net_device *dev,
6896 unsigned char *addr) 6931 const unsigned char *addr)
6897{ 6932{
6898 struct ixgbe_adapter *adapter = netdev_priv(dev); 6933 struct ixgbe_adapter *adapter = netdev_priv(dev);
6899 int err = -EOPNOTSUPP; 6934 int err = -EOPNOTSUPP;
@@ -7136,11 +7171,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7136 goto err_ioremap; 7171 goto err_ioremap;
7137 } 7172 }
7138 7173
7139 for (i = 1; i <= 5; i++) {
7140 if (pci_resource_len(pdev, i) == 0)
7141 continue;
7142 }
7143
7144 netdev->netdev_ops = &ixgbe_netdev_ops; 7174 netdev->netdev_ops = &ixgbe_netdev_ops;
7145 ixgbe_set_ethtool_ops(netdev); 7175 ixgbe_set_ethtool_ops(netdev);
7146 netdev->watchdog_timeo = 5 * HZ; 7176 netdev->watchdog_timeo = 5 * HZ;
@@ -7419,6 +7449,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7419 e_err(probe, "failed to allocate sysfs resources\n"); 7449 e_err(probe, "failed to allocate sysfs resources\n");
7420#endif /* CONFIG_IXGBE_HWMON */ 7450#endif /* CONFIG_IXGBE_HWMON */
7421 7451
7452#ifdef CONFIG_DEBUG_FS
7453 ixgbe_dbg_adapter_init(adapter);
7454#endif /* CONFIG_DEBUG_FS */
7455
7422 return 0; 7456 return 0;
7423 7457
7424err_register: 7458err_register:
@@ -7453,6 +7487,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7453 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 7487 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7454 struct net_device *netdev = adapter->netdev; 7488 struct net_device *netdev = adapter->netdev;
7455 7489
7490#ifdef CONFIG_DEBUG_FS
7491 ixgbe_dbg_adapter_exit(adapter);
7492#endif /*CONFIG_DEBUG_FS */
7493
7456 set_bit(__IXGBE_DOWN, &adapter->state); 7494 set_bit(__IXGBE_DOWN, &adapter->state);
7457 cancel_work_sync(&adapter->service_task); 7495 cancel_work_sync(&adapter->service_task);
7458 7496
@@ -7708,6 +7746,10 @@ static int __init ixgbe_init_module(void)
7708 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); 7746 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7709 pr_info("%s\n", ixgbe_copyright); 7747 pr_info("%s\n", ixgbe_copyright);
7710 7748
7749#ifdef CONFIG_DEBUG_FS
7750 ixgbe_dbg_init();
7751#endif /* CONFIG_DEBUG_FS */
7752
7711#ifdef CONFIG_IXGBE_DCA 7753#ifdef CONFIG_IXGBE_DCA
7712 dca_register_notify(&dca_notifier); 7754 dca_register_notify(&dca_notifier);
7713#endif 7755#endif
@@ -7730,6 +7772,11 @@ static void __exit ixgbe_exit_module(void)
7730 dca_unregister_notify(&dca_notifier); 7772 dca_unregister_notify(&dca_notifier);
7731#endif 7773#endif
7732 pci_unregister_driver(&ixgbe_driver); 7774 pci_unregister_driver(&ixgbe_driver);
7775
7776#ifdef CONFIG_DEBUG_FS
7777 ixgbe_dbg_exit();
7778#endif /* CONFIG_DEBUG_FS */
7779
7733 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 7780 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7734} 7781}
7735 7782
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 3456d5617143..39881cb17a4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -960,7 +960,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
960 /* (Re)start the overflow check */ 960 /* (Re)start the overflow check */
961 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; 961 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
962 962
963 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps); 963 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
964 &adapter->pdev->dev);
964 if (IS_ERR(adapter->ptp_clock)) { 965 if (IS_ERR(adapter->ptp_clock)) {
965 adapter->ptp_clock = NULL; 966 adapter->ptp_clock = NULL;
966 e_dev_err("ptp_clock_register failed\n"); 967 e_dev_err("ptp_clock_register failed\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 4fea8716ab64..dce48bf64d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
346static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 346static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
347 u32 vf) 347 u32 vf)
348{ 348{
349 /* VLAN 0 is a special case, don't allow it to be removed */
350 if (!vid && !add)
351 return 0;
352
349 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 353 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
350} 354}
351 355
@@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
414 VLAN_PRIO_SHIFT)), vf); 418 VLAN_PRIO_SHIFT)), vf);
415 ixgbe_set_vmolr(hw, vf, false); 419 ixgbe_set_vmolr(hw, vf, false);
416 } else { 420 } else {
421 ixgbe_set_vf_vlan(adapter, true, 0, vf);
417 ixgbe_set_vmvir(adapter, 0, vf); 422 ixgbe_set_vmvir(adapter, 0, vf);
418 ixgbe_set_vmolr(hw, vf, true); 423 ixgbe_set_vmolr(hw, vf, true);
419 } 424 }
@@ -810,9 +815,9 @@ out:
810 return err; 815 return err;
811} 816}
812 817
813static int ixgbe_link_mbps(int internal_link_speed) 818static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
814{ 819{
815 switch (internal_link_speed) { 820 switch (adapter->link_speed) {
816 case IXGBE_LINK_SPEED_100_FULL: 821 case IXGBE_LINK_SPEED_100_FULL:
817 return 100; 822 return 100;
818 case IXGBE_LINK_SPEED_1GB_FULL: 823 case IXGBE_LINK_SPEED_1GB_FULL:
@@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed)
824 } 829 }
825} 830}
826 831
827static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, 832static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
828 int link_speed)
829{ 833{
830 int rf_dec, rf_int; 834 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
831 u32 bcnrc_val; 835 struct ixgbe_hw *hw = &adapter->hw;
836 u32 bcnrc_val = 0;
837 u16 queue, queues_per_pool;
838 u16 tx_rate = adapter->vfinfo[vf].tx_rate;
839
840 if (tx_rate) {
841 /* start with base link speed value */
842 bcnrc_val = adapter->vf_rate_link_speed;
832 843
833 if (tx_rate != 0) {
834 /* Calculate the rate factor values to set */ 844 /* Calculate the rate factor values to set */
835 rf_int = link_speed / tx_rate; 845 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
836 rf_dec = (link_speed - (rf_int * tx_rate)); 846 bcnrc_val /= tx_rate;
837 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; 847
838 848 /* clear everything but the rate factor */
839 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 849 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
840 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) & 850 IXGBE_RTTBCNRC_RF_DEC_MASK;
841 IXGBE_RTTBCNRC_RF_INT_MASK); 851
842 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 852 /* enable the rate scheduler */
843 } else { 853 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
844 bcnrc_val = 0;
845 } 854 }
846 855
847 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
848 /* 856 /*
849 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 857 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
850 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported 858 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
@@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
861 break; 869 break;
862 } 870 }
863 871
864 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 872 /* determine how many queues per pool based on VMDq mask */
873 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
874
875 /* write value for all Tx queues belonging to VF */
876 for (queue = 0; queue < queues_per_pool; queue++) {
877 unsigned int reg_idx = (vf * queues_per_pool) + queue;
878
879 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
880 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
881 }
865} 882}
866 883
867void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) 884void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
868{ 885{
869 int actual_link_speed, i; 886 int i;
870 bool reset_rate = false;
871 887
872 /* VF Tx rate limit was not set */ 888 /* VF Tx rate limit was not set */
873 if (adapter->vf_rate_link_speed == 0) 889 if (!adapter->vf_rate_link_speed)
874 return; 890 return;
875 891
876 actual_link_speed = ixgbe_link_mbps(adapter->link_speed); 892 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
877 if (actual_link_speed != adapter->vf_rate_link_speed) {
878 reset_rate = true;
879 adapter->vf_rate_link_speed = 0; 893 adapter->vf_rate_link_speed = 0;
880 dev_info(&adapter->pdev->dev, 894 dev_info(&adapter->pdev->dev,
881 "Link speed has been changed. VF Transmit rate " 895 "Link speed has been changed. VF Transmit rate is disabled\n");
882 "is disabled\n");
883 } 896 }
884 897
885 for (i = 0; i < adapter->num_vfs; i++) { 898 for (i = 0; i < adapter->num_vfs; i++) {
886 if (reset_rate) 899 if (!adapter->vf_rate_link_speed)
887 adapter->vfinfo[i].tx_rate = 0; 900 adapter->vfinfo[i].tx_rate = 0;
888 901
889 ixgbe_set_vf_rate_limit(&adapter->hw, i, 902 ixgbe_set_vf_rate_limit(adapter, i);
890 adapter->vfinfo[i].tx_rate,
891 actual_link_speed);
892 } 903 }
893} 904}
894 905
895int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 906int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
896{ 907{
897 struct ixgbe_adapter *adapter = netdev_priv(netdev); 908 struct ixgbe_adapter *adapter = netdev_priv(netdev);
898 struct ixgbe_hw *hw = &adapter->hw; 909 int link_speed;
899 int actual_link_speed; 910
911 /* verify VF is active */
912 if (vf >= adapter->num_vfs)
913 return -EINVAL;
900 914
901 actual_link_speed = ixgbe_link_mbps(adapter->link_speed); 915 /* verify link is up */
902 if ((vf >= adapter->num_vfs) || (!adapter->link_up) || 916 if (!adapter->link_up)
903 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
904 ((tx_rate != 0) && (tx_rate <= 10)))
905 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
906 return -EINVAL; 917 return -EINVAL;
907 918
908 adapter->vf_rate_link_speed = actual_link_speed; 919 /* verify we are linked at 10Gbps */
909 adapter->vfinfo[vf].tx_rate = (u16)tx_rate; 920 link_speed = ixgbe_link_mbps(adapter);
910 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); 921 if (link_speed != 10000)
922 return -EINVAL;
923
924 /* rate limit cannot be less than 10Mbs or greater than link speed */
925 if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
926 return -EINVAL;
927
928 /* store values */
929 adapter->vf_rate_link_speed = link_speed;
930 adapter->vfinfo[vf].tx_rate = tx_rate;
931
932 /* update hardware configuration */
933 ixgbe_set_vf_rate_limit(adapter, vf);
911 934
912 return 0; 935 return 0;
913} 936}
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 418af827b230..da17ccf5c09d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -272,5 +272,6 @@ struct ixgbe_adv_tx_context_desc {
272/* Error Codes */ 272/* Error Codes */
273#define IXGBE_ERR_INVALID_MAC_ADDR -1 273#define IXGBE_ERR_INVALID_MAC_ADDR -1
274#define IXGBE_ERR_RESET_FAILED -2 274#define IXGBE_ERR_RESET_FAILED -2
275#define IXGBE_ERR_INVALID_ARGUMENT -3
275 276
276#endif /* _IXGBEVF_DEFINES_H_ */ 277#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 98cadb0c4dab..383b4e1cd175 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
101 101
102/* Supported Rx Buffer Sizes */ 102/* Supported Rx Buffer Sizes */
103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
104#define IXGBEVF_RXBUFFER_2048 2048 104#define IXGBEVF_RXBUFFER_3K 3072
105#define IXGBEVF_RXBUFFER_7K 7168
106#define IXGBEVF_RXBUFFER_15K 15360
105#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ 107#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
106 108
107#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 109#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -259,6 +261,11 @@ enum ixbgevf_state_t {
259 __IXGBEVF_DOWN 261 __IXGBEVF_DOWN
260}; 262};
261 263
264struct ixgbevf_cb {
265 struct sk_buff *prev;
266};
267#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
268
262enum ixgbevf_boards { 269enum ixgbevf_boards {
263 board_82599_vf, 270 board_82599_vf,
264 board_X540_vf, 271 board_X540_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 6647383c4ddc..0ee9bd4819f4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -263,6 +263,8 @@ cont_loop:
263 tx_ring->total_bytes += total_bytes; 263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets; 264 tx_ring->total_packets += total_packets;
265 u64_stats_update_end(&tx_ring->syncp); 265 u64_stats_update_end(&tx_ring->syncp);
266 q_vector->tx.total_bytes += total_bytes;
267 q_vector->tx.total_packets += total_packets;
266 268
267 return count < tx_ring->count; 269 return count < tx_ring->count;
268} 270}
@@ -272,12 +274,10 @@ cont_loop:
272 * @q_vector: structure containing interrupt and ring information 274 * @q_vector: structure containing interrupt and ring information
273 * @skb: packet to send up 275 * @skb: packet to send up
274 * @status: hardware indication of status of receive 276 * @status: hardware indication of status of receive
275 * @rx_ring: rx descriptor ring (for a specific queue) to setup
276 * @rx_desc: rx descriptor 277 * @rx_desc: rx descriptor
277 **/ 278 **/
278static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
279 struct sk_buff *skb, u8 status, 280 struct sk_buff *skb, u8 status,
280 struct ixgbevf_ring *ring,
281 union ixgbe_adv_rx_desc *rx_desc) 281 union ixgbe_adv_rx_desc *rx_desc)
282{ 282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter; 283 struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -433,11 +433,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
433 433
434 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 434 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
435 skb->next = next_buffer->skb; 435 skb->next = next_buffer->skb;
436 skb->next->prev = skb; 436 IXGBE_CB(skb->next)->prev = skb;
437 adapter->non_eop_descs++; 437 adapter->non_eop_descs++;
438 goto next_desc; 438 goto next_desc;
439 } 439 }
440 440
441 /* we should not be chaining buffers, if we did drop the skb */
442 if (IXGBE_CB(skb)->prev) {
443 do {
444 struct sk_buff *this = skb;
445 skb = IXGBE_CB(skb)->prev;
446 dev_kfree_skb(this);
447 } while (skb);
448 goto next_desc;
449 }
450
441 /* ERR_MASK will only have valid bits if EOP set */ 451 /* ERR_MASK will only have valid bits if EOP set */
442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 452 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
443 dev_kfree_skb_irq(skb); 453 dev_kfree_skb_irq(skb);
@@ -461,7 +471,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
461 } 471 }
462 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 472 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
463 473
464 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 474 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
465 475
466next_desc: 476next_desc:
467 rx_desc->wb.upper.status_error = 0; 477 rx_desc->wb.upper.status_error = 0;
@@ -490,6 +500,8 @@ next_desc:
490 rx_ring->total_packets += total_rx_packets; 500 rx_ring->total_packets += total_rx_packets;
491 rx_ring->total_bytes += total_rx_bytes; 501 rx_ring->total_bytes += total_rx_bytes;
492 u64_stats_update_end(&rx_ring->syncp); 502 u64_stats_update_end(&rx_ring->syncp);
503 q_vector->rx.total_packets += total_rx_packets;
504 q_vector->rx.total_bytes += total_rx_bytes;
493 505
494 return !!budget; 506 return !!budget;
495} 507}
@@ -716,40 +728,15 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
716 } 728 }
717} 729}
718 730
719static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 731static irqreturn_t ixgbevf_msix_other(int irq, void *data)
720{ 732{
721 struct ixgbevf_adapter *adapter = data; 733 struct ixgbevf_adapter *adapter = data;
722 struct ixgbe_hw *hw = &adapter->hw; 734 struct ixgbe_hw *hw = &adapter->hw;
723 u32 msg;
724 bool got_ack = false;
725
726 if (!hw->mbx.ops.check_for_ack(hw))
727 got_ack = true;
728 735
729 if (!hw->mbx.ops.check_for_msg(hw)) { 736 hw->mac.get_link_status = 1;
730 hw->mbx.ops.read(hw, &msg, 1);
731 737
732 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 738 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
733 mod_timer(&adapter->watchdog_timer, 739 mod_timer(&adapter->watchdog_timer, jiffies);
734 round_jiffies(jiffies + 1));
735
736 if (msg & IXGBE_VT_MSGTYPE_NACK)
737 pr_warn("Last Request of type %2.2x to PF Nacked\n",
738 msg & 0xFF);
739 /*
740 * Restore the PFSTS bit in case someone is polling for a
741 * return message from the PF
742 */
743 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
744 }
745
746 /*
747 * checking for the ack clears the PFACK bit. Place
748 * it back in the v2p_mailbox cache so that anyone
749 * polling for an ack will not miss it
750 */
751 if (got_ack)
752 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
753 740
754 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 741 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
755 742
@@ -899,10 +886,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
899 } 886 }
900 887
901 err = request_irq(adapter->msix_entries[vector].vector, 888 err = request_irq(adapter->msix_entries[vector].vector,
902 &ixgbevf_msix_mbx, 0, netdev->name, adapter); 889 &ixgbevf_msix_other, 0, netdev->name, adapter);
903 if (err) { 890 if (err) {
904 hw_dbg(&adapter->hw, 891 hw_dbg(&adapter->hw,
905 "request_irq for msix_mbx failed: %d\n", err); 892 "request_irq for msix_other failed: %d\n", err);
906 goto free_queue_irqs; 893 goto free_queue_irqs;
907 } 894 }
908 895
@@ -1057,15 +1044,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1057 1044
1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1045 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1059 1046
1060 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1047 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1061 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1048 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1062 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1049
1063 else
1064 srrctl |= rx_ring->rx_buf_len >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1050 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1067} 1051}
1068 1052
1053static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1054{
1055 struct ixgbe_hw *hw = &adapter->hw;
1056 struct net_device *netdev = adapter->netdev;
1057 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1058 int i;
1059 u16 rx_buf_len;
1060
1061 /* notify the PF of our intent to use this size of frame */
1062 ixgbevf_rlpml_set_vf(hw, max_frame);
1063
1064 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1065 max_frame += VLAN_HLEN;
1066
1067 /*
1068 * Make best use of allocation by using all but 1K of a
1069 * power of 2 allocation that will be used for skb->head.
1070 */
1071 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1072 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1073 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1074 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1075 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1076 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1077 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1078 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1079 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1080 else
1081 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1082
1083 for (i = 0; i < adapter->num_rx_queues; i++)
1084 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1085}
1086
1069/** 1087/**
1070 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1088 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1071 * @adapter: board private structure 1089 * @adapter: board private structure
@@ -1076,18 +1094,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1076{ 1094{
1077 u64 rdba; 1095 u64 rdba;
1078 struct ixgbe_hw *hw = &adapter->hw; 1096 struct ixgbe_hw *hw = &adapter->hw;
1079 struct net_device *netdev = adapter->netdev;
1080 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1081 int i, j; 1097 int i, j;
1082 u32 rdlen; 1098 u32 rdlen;
1083 int rx_buf_len;
1084 1099
1085 /* PSRTYPE must be initialized in 82599 */ 1100 /* PSRTYPE must be initialized in 82599 */
1086 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1101 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1087 if (netdev->mtu <= ETH_DATA_LEN) 1102
1088 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1103 /* set_rx_buffer_len must be called before ring initialization */
1089 else 1104 ixgbevf_set_rx_buffer_len(adapter);
1090 rx_buf_len = ALIGN(max_frame, 1024);
1091 1105
1092 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1106 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1093 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1107 /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1117,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1117 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1104 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1118 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1105 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1119 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1107 1120
1108 ixgbevf_configure_srrctl(adapter, j); 1121 ixgbevf_configure_srrctl(adapter, j);
1109 } 1122 }
@@ -1113,36 +1126,47 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1113{ 1126{
1114 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1115 struct ixgbe_hw *hw = &adapter->hw; 1128 struct ixgbe_hw *hw = &adapter->hw;
1129 int err;
1130
1131 if (!hw->mac.ops.set_vfta)
1132 return -EOPNOTSUPP;
1116 1133
1117 spin_lock(&adapter->mbx_lock); 1134 spin_lock(&adapter->mbx_lock);
1118 1135
1119 /* add VID to filter table */ 1136 /* add VID to filter table */
1120 if (hw->mac.ops.set_vfta) 1137 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1121 hw->mac.ops.set_vfta(hw, vid, 0, true);
1122 1138
1123 spin_unlock(&adapter->mbx_lock); 1139 spin_unlock(&adapter->mbx_lock);
1124 1140
1141 /* translate error return types so error makes sense */
1142 if (err == IXGBE_ERR_MBX)
1143 return -EIO;
1144
1145 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1146 return -EACCES;
1147
1125 set_bit(vid, adapter->active_vlans); 1148 set_bit(vid, adapter->active_vlans);
1126 1149
1127 return 0; 1150 return err;
1128} 1151}
1129 1152
1130static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1153static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1131{ 1154{
1132 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1155 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1133 struct ixgbe_hw *hw = &adapter->hw; 1156 struct ixgbe_hw *hw = &adapter->hw;
1157 int err = -EOPNOTSUPP;
1134 1158
1135 spin_lock(&adapter->mbx_lock); 1159 spin_lock(&adapter->mbx_lock);
1136 1160
1137 /* remove VID from filter table */ 1161 /* remove VID from filter table */
1138 if (hw->mac.ops.set_vfta) 1162 if (hw->mac.ops.set_vfta)
1139 hw->mac.ops.set_vfta(hw, vid, 0, false); 1163 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1140 1164
1141 spin_unlock(&adapter->mbx_lock); 1165 spin_unlock(&adapter->mbx_lock);
1142 1166
1143 clear_bit(vid, adapter->active_vlans); 1167 clear_bit(vid, adapter->active_vlans);
1144 1168
1145 return 0; 1169 return err;
1146} 1170}
1147 1171
1148static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1172static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1308,6 +1332,25 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1308 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1332 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1309} 1333}
1310 1334
1335static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1336{
1337 struct ixgbe_hw *hw = &adapter->hw;
1338 int api[] = { ixgbe_mbox_api_10,
1339 ixgbe_mbox_api_unknown };
1340 int err = 0, idx = 0;
1341
1342 spin_lock(&adapter->mbx_lock);
1343
1344 while (api[idx] != ixgbe_mbox_api_unknown) {
1345 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1346 if (!err)
1347 break;
1348 idx++;
1349 }
1350
1351 spin_unlock(&adapter->mbx_lock);
1352}
1353
1311static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1354static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1312{ 1355{
1313 struct net_device *netdev = adapter->netdev; 1356 struct net_device *netdev = adapter->netdev;
@@ -1315,7 +1358,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1315 int i, j = 0; 1358 int i, j = 0;
1316 int num_rx_rings = adapter->num_rx_queues; 1359 int num_rx_rings = adapter->num_rx_queues;
1317 u32 txdctl, rxdctl; 1360 u32 txdctl, rxdctl;
1318 u32 msg[2];
1319 1361
1320 for (i = 0; i < adapter->num_tx_queues; i++) { 1362 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 j = adapter->tx_ring[i].reg_idx; 1363 j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1398,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1398 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1357 } 1399 }
1358 1400
1359 msg[0] = IXGBE_VF_SET_LPE;
1360 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1361 hw->mbx.ops.write_posted(hw, msg, 2);
1362
1363 spin_unlock(&adapter->mbx_lock); 1401 spin_unlock(&adapter->mbx_lock);
1364 1402
1365 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1403 clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1371,6 +1409,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1371 ixgbevf_save_reset_stats(adapter); 1409 ixgbevf_save_reset_stats(adapter);
1372 ixgbevf_init_last_counter_stats(adapter); 1410 ixgbevf_init_last_counter_stats(adapter);
1373 1411
1412 hw->mac.get_link_status = 1;
1374 mod_timer(&adapter->watchdog_timer, jiffies); 1413 mod_timer(&adapter->watchdog_timer, jiffies);
1375} 1414}
1376 1415
@@ -1378,6 +1417,8 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1378{ 1417{
1379 struct ixgbe_hw *hw = &adapter->hw; 1418 struct ixgbe_hw *hw = &adapter->hw;
1380 1419
1420 ixgbevf_negotiate_api(adapter);
1421
1381 ixgbevf_configure(adapter); 1422 ixgbevf_configure(adapter);
1382 1423
1383 ixgbevf_up_complete(adapter); 1424 ixgbevf_up_complete(adapter);
@@ -1419,7 +1460,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1419 rx_buffer_info->skb = NULL; 1460 rx_buffer_info->skb = NULL;
1420 do { 1461 do {
1421 struct sk_buff *this = skb; 1462 struct sk_buff *this = skb;
1422 skb = skb->prev; 1463 skb = IXGBE_CB(skb)->prev;
1423 dev_kfree_skb(this); 1464 dev_kfree_skb(this);
1424 } while (skb); 1465 } while (skb);
1425 } 1466 }
@@ -1547,8 +1588,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1547 1588
1548void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1589void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1549{ 1590{
1550 struct ixgbe_hw *hw = &adapter->hw;
1551
1552 WARN_ON(in_interrupt()); 1591 WARN_ON(in_interrupt());
1553 1592
1554 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1593 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -1561,10 +1600,8 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1561 * watchdog task will continue to schedule reset tasks until 1600 * watchdog task will continue to schedule reset tasks until
1562 * the PF is up and running. 1601 * the PF is up and running.
1563 */ 1602 */
1564 if (!hw->mac.ops.reset_hw(hw)) { 1603 ixgbevf_down(adapter);
1565 ixgbevf_down(adapter); 1604 ixgbevf_up(adapter);
1566 ixgbevf_up(adapter);
1567 }
1568 1605
1569 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1606 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1570} 1607}
@@ -1867,6 +1904,22 @@ err_set_interrupt:
1867} 1904}
1868 1905
1869/** 1906/**
1907 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1908 * @adapter: board private structure to clear interrupt scheme on
1909 *
1910 * We go through and clear interrupt specific resources and reset the structure
1911 * to pre-load conditions
1912 **/
1913static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1914{
1915 adapter->num_tx_queues = 0;
1916 adapter->num_rx_queues = 0;
1917
1918 ixgbevf_free_q_vectors(adapter);
1919 ixgbevf_reset_interrupt_capability(adapter);
1920}
1921
1922/**
1870 * ixgbevf_sw_init - Initialize general software structures 1923 * ixgbevf_sw_init - Initialize general software structures
1871 * (struct ixgbevf_adapter) 1924 * (struct ixgbevf_adapter)
1872 * @adapter: board private structure to initialize 1925 * @adapter: board private structure to initialize
@@ -2351,6 +2404,8 @@ static int ixgbevf_open(struct net_device *netdev)
2351 } 2404 }
2352 } 2405 }
2353 2406
2407 ixgbevf_negotiate_api(adapter);
2408
2354 /* allocate transmit descriptors */ 2409 /* allocate transmit descriptors */
2355 err = ixgbevf_setup_all_tx_resources(adapter); 2410 err = ixgbevf_setup_all_tx_resources(adapter);
2356 if (err) 2411 if (err)
@@ -2860,10 +2915,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2860static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 2915static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2861{ 2916{
2862 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2917 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2863 struct ixgbe_hw *hw = &adapter->hw;
2864 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2918 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2865 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 2919 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2866 u32 msg[2];
2867 2920
2868 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 2921 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2869 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 2922 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2930,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2877 /* must set new MTU before calling down or up */ 2930 /* must set new MTU before calling down or up */
2878 netdev->mtu = new_mtu; 2931 netdev->mtu = new_mtu;
2879 2932
2880 if (!netif_running(netdev)) {
2881 msg[0] = IXGBE_VF_SET_LPE;
2882 msg[1] = max_frame;
2883 hw->mbx.ops.write_posted(hw, msg, 2);
2884 }
2885
2886 if (netif_running(netdev)) 2933 if (netif_running(netdev))
2887 ixgbevf_reinit_locked(adapter); 2934 ixgbevf_reinit_locked(adapter);
2888 2935
2889 return 0; 2936 return 0;
2890} 2937}
2891 2938
2892static void ixgbevf_shutdown(struct pci_dev *pdev) 2939static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
2893{ 2940{
2894 struct net_device *netdev = pci_get_drvdata(pdev); 2941 struct net_device *netdev = pci_get_drvdata(pdev);
2895 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2942 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2943#ifdef CONFIG_PM
2944 int retval = 0;
2945#endif
2896 2946
2897 netif_device_detach(netdev); 2947 netif_device_detach(netdev);
2898 2948
2899 if (netif_running(netdev)) { 2949 if (netif_running(netdev)) {
2950 rtnl_lock();
2900 ixgbevf_down(adapter); 2951 ixgbevf_down(adapter);
2901 ixgbevf_free_irq(adapter); 2952 ixgbevf_free_irq(adapter);
2902 ixgbevf_free_all_tx_resources(adapter); 2953 ixgbevf_free_all_tx_resources(adapter);
2903 ixgbevf_free_all_rx_resources(adapter); 2954 ixgbevf_free_all_rx_resources(adapter);
2955 rtnl_unlock();
2904 } 2956 }
2905 2957
2906 pci_save_state(pdev); 2958 ixgbevf_clear_interrupt_scheme(adapter);
2907 2959
2960#ifdef CONFIG_PM
2961 retval = pci_save_state(pdev);
2962 if (retval)
2963 return retval;
2964
2965#endif
2908 pci_disable_device(pdev); 2966 pci_disable_device(pdev);
2967
2968 return 0;
2969}
2970
2971#ifdef CONFIG_PM
2972static int ixgbevf_resume(struct pci_dev *pdev)
2973{
2974 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
2975 struct net_device *netdev = adapter->netdev;
2976 u32 err;
2977
2978 pci_set_power_state(pdev, PCI_D0);
2979 pci_restore_state(pdev);
2980 /*
2981 * pci_restore_state clears dev->state_saved so call
2982 * pci_save_state to restore it.
2983 */
2984 pci_save_state(pdev);
2985
2986 err = pci_enable_device_mem(pdev);
2987 if (err) {
2988 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2989 return err;
2990 }
2991 pci_set_master(pdev);
2992
2993 rtnl_lock();
2994 err = ixgbevf_init_interrupt_scheme(adapter);
2995 rtnl_unlock();
2996 if (err) {
2997 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
2998 return err;
2999 }
3000
3001 ixgbevf_reset(adapter);
3002
3003 if (netif_running(netdev)) {
3004 err = ixgbevf_open(netdev);
3005 if (err)
3006 return err;
3007 }
3008
3009 netif_device_attach(netdev);
3010
3011 return err;
3012}
3013
3014#endif /* CONFIG_PM */
3015static void ixgbevf_shutdown(struct pci_dev *pdev)
3016{
3017 ixgbevf_suspend(pdev, PMSG_SUSPEND);
2909} 3018}
2910 3019
2911static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3020static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3055,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2946 return stats; 3055 return stats;
2947} 3056}
2948 3057
2949static const struct net_device_ops ixgbe_netdev_ops = { 3058static const struct net_device_ops ixgbevf_netdev_ops = {
2950 .ndo_open = ixgbevf_open, 3059 .ndo_open = ixgbevf_open,
2951 .ndo_stop = ixgbevf_close, 3060 .ndo_stop = ixgbevf_close,
2952 .ndo_start_xmit = ixgbevf_xmit_frame, 3061 .ndo_start_xmit = ixgbevf_xmit_frame,
@@ -2962,7 +3071,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
2962 3071
2963static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3072static void ixgbevf_assign_netdev_ops(struct net_device *dev)
2964{ 3073{
2965 dev->netdev_ops = &ixgbe_netdev_ops; 3074 dev->netdev_ops = &ixgbevf_netdev_ops;
2966 ixgbevf_set_ethtool_ops(dev); 3075 ixgbevf_set_ethtool_ops(dev);
2967 dev->watchdog_timeo = 5 * HZ; 3076 dev->watchdog_timeo = 5 * HZ;
2968} 3077}
@@ -3131,6 +3240,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3131 return 0; 3240 return 0;
3132 3241
3133err_register: 3242err_register:
3243 ixgbevf_clear_interrupt_scheme(adapter);
3134err_sw_init: 3244err_sw_init:
3135 ixgbevf_reset_interrupt_capability(adapter); 3245 ixgbevf_reset_interrupt_capability(adapter);
3136 iounmap(hw->hw_addr); 3246 iounmap(hw->hw_addr);
@@ -3168,6 +3278,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3168 if (netdev->reg_state == NETREG_REGISTERED) 3278 if (netdev->reg_state == NETREG_REGISTERED)
3169 unregister_netdev(netdev); 3279 unregister_netdev(netdev);
3170 3280
3281 ixgbevf_clear_interrupt_scheme(adapter);
3171 ixgbevf_reset_interrupt_capability(adapter); 3282 ixgbevf_reset_interrupt_capability(adapter);
3172 3283
3173 iounmap(adapter->hw.hw_addr); 3284 iounmap(adapter->hw.hw_addr);
@@ -3267,6 +3378,11 @@ static struct pci_driver ixgbevf_driver = {
3267 .id_table = ixgbevf_pci_tbl, 3378 .id_table = ixgbevf_pci_tbl,
3268 .probe = ixgbevf_probe, 3379 .probe = ixgbevf_probe,
3269 .remove = __devexit_p(ixgbevf_remove), 3380 .remove = __devexit_p(ixgbevf_remove),
3381#ifdef CONFIG_PM
3382 /* Power Management Hooks */
3383 .suspend = ixgbevf_suspend,
3384 .resume = ixgbevf_resume,
3385#endif
3270 .shutdown = ixgbevf_shutdown, 3386 .shutdown = ixgbevf_shutdown,
3271 .err_handler = &ixgbevf_err_handler 3387 .err_handler = &ixgbevf_err_handler
3272}; 3388};
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 9c955900fe64..d5028ddf4b31 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -86,14 +86,17 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
86static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 86static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
87{ 87{
88 struct ixgbe_mbx_info *mbx = &hw->mbx; 88 struct ixgbe_mbx_info *mbx = &hw->mbx;
89 s32 ret_val = IXGBE_ERR_MBX; 89 s32 ret_val = -IXGBE_ERR_MBX;
90
91 if (!mbx->ops.read)
92 goto out;
90 93
91 ret_val = ixgbevf_poll_for_msg(hw); 94 ret_val = ixgbevf_poll_for_msg(hw);
92 95
93 /* if ack received read message, otherwise we timed out */ 96 /* if ack received read message, otherwise we timed out */
94 if (!ret_val) 97 if (!ret_val)
95 ret_val = mbx->ops.read(hw, msg, size); 98 ret_val = mbx->ops.read(hw, msg, size);
96 99out:
97 return ret_val; 100 return ret_val;
98} 101}
99 102
@@ -109,7 +112,11 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
109static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 112static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
110{ 113{
111 struct ixgbe_mbx_info *mbx = &hw->mbx; 114 struct ixgbe_mbx_info *mbx = &hw->mbx;
112 s32 ret_val; 115 s32 ret_val = -IXGBE_ERR_MBX;
116
117 /* exit if either we can't write or there isn't a defined timeout */
118 if (!mbx->ops.write || !mbx->timeout)
119 goto out;
113 120
114 /* send msg */ 121 /* send msg */
115 ret_val = mbx->ops.write(hw, msg, size); 122 ret_val = mbx->ops.write(hw, msg, size);
@@ -117,7 +124,7 @@ static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
117 /* if msg sent wait until we receive an ack */ 124 /* if msg sent wait until we receive an ack */
118 if (!ret_val) 125 if (!ret_val)
119 ret_val = ixgbevf_poll_for_ack(hw); 126 ret_val = ixgbevf_poll_for_ack(hw);
120 127out:
121 return ret_val; 128 return ret_val;
122} 129}
123 130
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index cf9131c5c115..946ce86f337f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -76,12 +76,29 @@
76/* bits 23:16 are used for exra info for certain messages */ 76/* bits 23:16 are used for exra info for certain messages */
77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
78 78
79/* definitions to support mailbox API version negotiation */
80
81/*
82 * each element denotes a version of the API; existing numbers may not
83 * change; any additions must go at the end
84 */
85enum ixgbe_pfvf_api_rev {
86 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
87 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
88 /* This value should always be last */
89 ixgbe_mbox_api_unknown, /* indicates that API version is not known */
90};
91
92/* mailbox API, legacy requests */
79#define IXGBE_VF_RESET 0x01 /* VF requests reset */ 93#define IXGBE_VF_RESET 0x01 /* VF requests reset */
80#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 94#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 95#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 96#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 97
84#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ 98/* mailbox API, version 1.0 VF requests */
99#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
100#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
101#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
85 102
86/* length of permanent address message returned from PF */ 103/* length of permanent address message returned from PF */
87#define IXGBE_VF_PERMADDR_MSG_LEN 4 104#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index ec89b86f7ca4..0c7447e6fcc8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -79,6 +79,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
79 /* Call adapter stop to disable tx/rx and clear interrupts */ 79 /* Call adapter stop to disable tx/rx and clear interrupts */
80 hw->mac.ops.stop_adapter(hw); 80 hw->mac.ops.stop_adapter(hw);
81 81
82 /* reset the api version */
83 hw->api_version = ixgbe_mbox_api_10;
84
82 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); 85 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
83 IXGBE_WRITE_FLUSH(hw); 86 IXGBE_WRITE_FLUSH(hw);
84 87
@@ -97,7 +100,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
97 msgbuf[0] = IXGBE_VF_RESET; 100 msgbuf[0] = IXGBE_VF_RESET;
98 mbx->ops.write_posted(hw, msgbuf, 1); 101 mbx->ops.write_posted(hw, msgbuf, 1);
99 102
100 msleep(10); 103 mdelay(10);
101 104
102 /* set our "perm_addr" based on info provided by PF */ 105 /* set our "perm_addr" based on info provided by PF */
103 /* also set up the mc_filter_type which is piggy backed 106 /* also set up the mc_filter_type which is piggy backed
@@ -346,16 +349,32 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
346static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, 349static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
347 bool vlan_on) 350 bool vlan_on)
348{ 351{
352 struct ixgbe_mbx_info *mbx = &hw->mbx;
349 u32 msgbuf[2]; 353 u32 msgbuf[2];
354 s32 err;
350 355
351 msgbuf[0] = IXGBE_VF_SET_VLAN; 356 msgbuf[0] = IXGBE_VF_SET_VLAN;
352 msgbuf[1] = vlan; 357 msgbuf[1] = vlan;
353 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 358 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
354 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; 359 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
355 360
356 ixgbevf_write_msg_read_ack(hw, msgbuf, 2); 361 err = mbx->ops.write_posted(hw, msgbuf, 2);
362 if (err)
363 goto mbx_err;
357 364
358 return 0; 365 err = mbx->ops.read_posted(hw, msgbuf, 2);
366 if (err)
367 goto mbx_err;
368
369 /* remove extra bits from the message */
370 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
371 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
372
373 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
374 err = IXGBE_ERR_INVALID_ARGUMENT;
375
376mbx_err:
377 return err;
359} 378}
360 379
361/** 380/**
@@ -389,20 +408,23 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
389 bool *link_up, 408 bool *link_up,
390 bool autoneg_wait_to_complete) 409 bool autoneg_wait_to_complete)
391{ 410{
411 struct ixgbe_mbx_info *mbx = &hw->mbx;
412 struct ixgbe_mac_info *mac = &hw->mac;
413 s32 ret_val = 0;
392 u32 links_reg; 414 u32 links_reg;
415 u32 in_msg = 0;
393 416
394 if (!(hw->mbx.ops.check_for_rst(hw))) { 417 /* If we were hit with a reset drop the link */
395 *link_up = false; 418 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
396 *speed = 0; 419 mac->get_link_status = true;
397 return -1;
398 }
399 420
400 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 421 if (!mac->get_link_status)
422 goto out;
401 423
402 if (links_reg & IXGBE_LINKS_UP) 424 /* if link status is down no point in checking to see if pf is up */
403 *link_up = true; 425 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
404 else 426 if (!(links_reg & IXGBE_LINKS_UP))
405 *link_up = false; 427 goto out;
406 428
407 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 429 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
408 case IXGBE_LINKS_SPEED_10G_82599: 430 case IXGBE_LINKS_SPEED_10G_82599:
@@ -416,7 +438,79 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
416 break; 438 break;
417 } 439 }
418 440
419 return 0; 441 /* if the read failed it could just be a mailbox collision, best wait
442 * until we are called again and don't report an error */
443 if (mbx->ops.read(hw, &in_msg, 1))
444 goto out;
445
446 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
447 /* msg is not CTS and is NACK we must have lost CTS status */
448 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
449 ret_val = -1;
450 goto out;
451 }
452
453 /* the pf is talking, if we timed out in the past we reinit */
454 if (!mbx->timeout) {
455 ret_val = -1;
456 goto out;
457 }
458
459 /* if we passed all the tests above then the link is up and we no
460 * longer need to check for link */
461 mac->get_link_status = false;
462
463out:
464 *link_up = !mac->get_link_status;
465 return ret_val;
466}
467
468/**
469 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
470 * @hw: pointer to the HW structure
471 * @max_size: value to assign to max frame size
472 **/
473void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
474{
475 u32 msgbuf[2];
476
477 msgbuf[0] = IXGBE_VF_SET_LPE;
478 msgbuf[1] = max_size;
479 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
480}
481
482/**
483 * ixgbevf_negotiate_api_version - Negotiate supported API version
484 * @hw: pointer to the HW structure
485 * @api: integer containing requested API version
486 **/
487int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
488{
489 int err;
490 u32 msg[3];
491
492 /* Negotiate the mailbox API version */
493 msg[0] = IXGBE_VF_API_NEGOTIATE;
494 msg[1] = api;
495 msg[2] = 0;
496 err = hw->mbx.ops.write_posted(hw, msg, 3);
497
498 if (!err)
499 err = hw->mbx.ops.read_posted(hw, msg, 3);
500
501 if (!err) {
502 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
503
504 /* Store value and return 0 on success */
505 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
506 hw->api_version = api;
507 return 0;
508 }
509
510 err = IXGBE_ERR_INVALID_ARGUMENT;
511 }
512
513 return err;
420} 514}
421 515
422static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 516static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 25c951daee5d..47f11a584d8c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -137,6 +137,8 @@ struct ixgbe_hw {
137 137
138 u8 revision_id; 138 u8 revision_id;
139 bool adapter_stopped; 139 bool adapter_stopped;
140
141 int api_version;
140}; 142};
141 143
142struct ixgbevf_hw_stats { 144struct ixgbevf_hw_stats {
@@ -170,5 +172,7 @@ struct ixgbevf_info {
170 const struct ixgbe_mac_operations *mac_ops; 172 const struct ixgbe_mac_operations *mac_ops;
171}; 173};
172 174
175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
173#endif /* __IXGBE_VF_H__ */ 177#endif /* __IXGBE_VF_H__ */
174 178
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 10bba09c44ea..c10e3a6de09f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -712,10 +712,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
712 if (bounce) 712 if (bounce)
713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
714 714
715 /* Run destructor before passing skb to HW */
716 if (likely(!skb_shared(skb)))
717 skb_orphan(skb);
718
719 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 715 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
720 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 716 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
721 op_own |= htonl((bf_index & 0xffff) << 8); 717 op_own |= htonl((bf_index & 0xffff) << 8);
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
deleted file mode 100644
index db5285befe2a..000000000000
--- a/drivers/net/ethernet/mipsnet.c
+++ /dev/null
@@ -1,345 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/platform_device.h>
15#include <asm/mips-boards/simint.h>
16
17#define MIPSNET_VERSION "2007-11-17"
18
19/*
20 * Net status/control block as seen by sw in the core.
21 */
22struct mipsnet_regs {
23 /*
24 * Device info for probing, reads as MIPSNET%d where %d is some
25 * form of version.
26 */
27 u64 devId; /*0x00 */
28
29 /*
30 * read only busy flag.
31 * Set and cleared by the Net Device to indicate that an rx or a tx
32 * is in progress.
33 */
34 u32 busy; /*0x08 */
35
36 /*
37 * Set by the Net Device.
38 * The device will set it once data has been received.
39 * The value is the number of bytes that should be read from
40 * rxDataBuffer. The value will decrease till 0 until all the data
41 * from rxDataBuffer has been read.
42 */
43 u32 rxDataCount; /*0x0c */
44#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
45
46 /*
47 * Settable from the MIPS core, cleared by the Net Device.
48 * The core should set the number of bytes it wants to send,
49 * then it should write those bytes of data to txDataBuffer.
50 * The device will clear txDataCount has been processed (not
51 * necessarily sent).
52 */
53 u32 txDataCount; /*0x10 */
54
55 /*
56 * Interrupt control
57 *
58 * Used to clear the interrupted generated by this dev.
59 * Write a 1 to clear the interrupt. (except bit31).
60 *
61 * Bit0 is set if it was a tx-done interrupt.
62 * Bit1 is set when new rx-data is available.
63 * Until this bit is cleared there will be no other RXs.
64 *
65 * Bit31 is used for testing, it clears after a read.
66 * Writing 1 to this bit will cause an interrupt to be generated.
67 * To clear the test interrupt, write 0 to this register.
68 */
69 u32 interruptControl; /*0x14 */
70#define MIPSNET_INTCTL_TXDONE (1u << 0)
71#define MIPSNET_INTCTL_RXDONE (1u << 1)
72#define MIPSNET_INTCTL_TESTBIT (1u << 31)
73
74 /*
75 * Readonly core-specific interrupt info for the device to signal
76 * the core. The meaning of the contents of this field might change.
77 */
78 /* XXX: the whole memIntf interrupt scheme is messy: the device
79 * should have no control what so ever of what VPE/register set is
80 * being used.
81 * The MemIntf should only expose interrupt lines, and something in
82 * the config should be responsible for the line<->core/vpe bindings.
83 */
84 u32 interruptInfo; /*0x18 */
85
86 /*
87 * This is where the received data is read out.
88 * There is more data to read until rxDataReady is 0.
89 * Only 1 byte at this regs offset is used.
90 */
91 u32 rxDataBuffer; /*0x1c */
92
93 /*
94 * This is where the data to transmit is written.
95 * Data should be written for the amount specified in the
96 * txDataCount register.
97 * Only 1 byte at this regs offset is used.
98 */
99 u32 txDataBuffer; /*0x20 */
100};
101
102#define regaddr(dev, field) \
103 (dev->base_addr + offsetof(struct mipsnet_regs, field))
104
105static char mipsnet_string[] = "mipsnet";
106
107/*
108 * Copy data from the MIPSNET rx data port
109 */
110static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
111 int len)
112{
113 for (; len > 0; len--, kdata++)
114 *kdata = inb(regaddr(dev, rxDataBuffer));
115
116 return inl(regaddr(dev, rxDataCount));
117}
118
119static inline void mipsnet_put_todevice(struct net_device *dev,
120 struct sk_buff *skb)
121{
122 int count_to_go = skb->len;
123 char *buf_ptr = skb->data;
124
125 outl(skb->len, regaddr(dev, txDataCount));
126
127 for (; count_to_go; buf_ptr++, count_to_go--)
128 outb(*buf_ptr, regaddr(dev, txDataBuffer));
129
130 dev->stats.tx_packets++;
131 dev->stats.tx_bytes += skb->len;
132
133 dev_kfree_skb(skb);
134}
135
136static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
137{
138 /*
139 * Only one packet at a time. Once TXDONE interrupt is serviced, the
140 * queue will be restarted.
141 */
142 netif_stop_queue(dev);
143 mipsnet_put_todevice(dev, skb);
144
145 return NETDEV_TX_OK;
146}
147
148static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
149{
150 struct sk_buff *skb;
151
152 if (!len)
153 return len;
154
155 skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
156 if (!skb) {
157 dev->stats.rx_dropped++;
158 return -ENOMEM;
159 }
160
161 skb_reserve(skb, NET_IP_ALIGN);
162 if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
163 return -EFAULT;
164
165 skb->protocol = eth_type_trans(skb, dev);
166 skb->ip_summed = CHECKSUM_UNNECESSARY;
167
168 netif_rx(skb);
169
170 dev->stats.rx_packets++;
171 dev->stats.rx_bytes += len;
172
173 return len;
174}
175
176static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
177{
178 struct net_device *dev = dev_id;
179 u32 int_flags;
180 irqreturn_t ret = IRQ_NONE;
181
182 if (irq != dev->irq)
183 goto out_badirq;
184
185 /* TESTBIT is cleared on read. */
186 int_flags = inl(regaddr(dev, interruptControl));
187 if (int_flags & MIPSNET_INTCTL_TESTBIT) {
188 /* TESTBIT takes effect after a write with 0. */
189 outl(0, regaddr(dev, interruptControl));
190 ret = IRQ_HANDLED;
191 } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
192 /* Only one packet at a time, we are done. */
193 dev->stats.tx_packets++;
194 netif_wake_queue(dev);
195 outl(MIPSNET_INTCTL_TXDONE,
196 regaddr(dev, interruptControl));
197 ret = IRQ_HANDLED;
198 } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
199 mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
200 outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
201 ret = IRQ_HANDLED;
202 }
203 return ret;
204
205out_badirq:
206 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
207 dev->name, __func__, irq);
208 return ret;
209}
210
211static int mipsnet_open(struct net_device *dev)
212{
213 int err;
214
215 err = request_irq(dev->irq, mipsnet_interrupt,
216 IRQF_SHARED, dev->name, (void *) dev);
217 if (err) {
218 release_region(dev->base_addr, sizeof(struct mipsnet_regs));
219 return err;
220 }
221
222 netif_start_queue(dev);
223
224 /* test interrupt handler */
225 outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
226
227 return 0;
228}
229
230static int mipsnet_close(struct net_device *dev)
231{
232 netif_stop_queue(dev);
233 free_irq(dev->irq, dev);
234 return 0;
235}
236
237static void mipsnet_set_mclist(struct net_device *dev)
238{
239}
240
241static const struct net_device_ops mipsnet_netdev_ops = {
242 .ndo_open = mipsnet_open,
243 .ndo_stop = mipsnet_close,
244 .ndo_start_xmit = mipsnet_xmit,
245 .ndo_set_rx_mode = mipsnet_set_mclist,
246 .ndo_change_mtu = eth_change_mtu,
247 .ndo_validate_addr = eth_validate_addr,
248 .ndo_set_mac_address = eth_mac_addr,
249};
250
251static int __devinit mipsnet_probe(struct platform_device *dev)
252{
253 struct net_device *netdev;
254 int err;
255
256 netdev = alloc_etherdev(0);
257 if (!netdev) {
258 err = -ENOMEM;
259 goto out;
260 }
261
262 platform_set_drvdata(dev, netdev);
263
264 netdev->netdev_ops = &mipsnet_netdev_ops;
265
266 /*
267 * TODO: probe for these or load them from PARAM
268 */
269 netdev->base_addr = 0x4200;
270 netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
271 inl(regaddr(netdev, interruptInfo));
272
273 /* Get the io region now, get irq on open() */
274 if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
275 "mipsnet")) {
276 err = -EBUSY;
277 goto out_free_netdev;
278 }
279
280 /*
281 * Lacking any better mechanism to allocate a MAC address we use a
282 * random one ...
283 */
284 eth_hw_addr_random(netdev);
285
286 err = register_netdev(netdev);
287 if (err) {
288 printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
289 goto out_free_region;
290 }
291
292 return 0;
293
294out_free_region:
295 release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
296
297out_free_netdev:
298 free_netdev(netdev);
299
300out:
301 return err;
302}
303
304static int __devexit mipsnet_device_remove(struct platform_device *device)
305{
306 struct net_device *dev = platform_get_drvdata(device);
307
308 unregister_netdev(dev);
309 release_region(dev->base_addr, sizeof(struct mipsnet_regs));
310 free_netdev(dev);
311 platform_set_drvdata(device, NULL);
312
313 return 0;
314}
315
316static struct platform_driver mipsnet_driver = {
317 .driver = {
318 .name = mipsnet_string,
319 .owner = THIS_MODULE,
320 },
321 .probe = mipsnet_probe,
322 .remove = __devexit_p(mipsnet_device_remove),
323};
324
325static int __init mipsnet_init_module(void)
326{
327 int err;
328
329 printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
330 "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
331
332 err = platform_driver_register(&mipsnet_driver);
333 if (err)
334 printk(KERN_ERR "Driver registration failed\n");
335
336 return err;
337}
338
339static void __exit mipsnet_exit_module(void)
340{
341 platform_driver_unregister(&mipsnet_driver);
342}
343
344module_init(mipsnet_init_module);
345module_exit(mipsnet_exit_module);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f45def01a98e..876beceaf2d7 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3409,7 +3409,7 @@ set_speed:
3409 3409
3410 pause_flags = 0; 3410 pause_flags = 0;
3411 /* setup pause frame */ 3411 /* setup pause frame */
3412 if (np->duplex != 0) { 3412 if (netif_running(dev) && (np->duplex != 0)) {
3413 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3413 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3414 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3414 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3415 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 3415 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4435 4435
4436 regs->version = FORCEDETH_REGS_VER; 4436 regs->version = FORCEDETH_REGS_VER;
4437 spin_lock_irq(&np->lock); 4437 spin_lock_irq(&np->lock);
4438 for (i = 0; i <= np->register_size/sizeof(u32); i++) 4438 for (i = 0; i < np->register_size/sizeof(u32); i++)
4439 rbuf[i] = readl(base + i*sizeof(u32)); 4439 rbuf[i] = readl(base + i*sizeof(u32));
4440 spin_unlock_irq(&np->lock); 4440 spin_unlock_irq(&np->lock);
4441} 4441}
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
5455 5455
5456 netif_stop_queue(dev); 5456 netif_stop_queue(dev);
5457 spin_lock_irq(&np->lock); 5457 spin_lock_irq(&np->lock);
5458 nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5458 nv_stop_rxtx(dev); 5459 nv_stop_rxtx(dev);
5459 nv_txrx_reset(dev); 5460 nv_txrx_reset(dev);
5460 5461
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5904 goto out_error; 5905 goto out_error;
5905 } 5906 }
5906 5907
5908 netif_carrier_off(dev);
5909
5910 /* Some NICs freeze when TX pause is enabled while NIC is
5911 * down, and this stays across warm reboots. The sequence
5912 * below should be enough to recover from that state.
5913 */
5914 nv_update_pause(dev, 0);
5915 nv_start_tx(dev);
5916 nv_stop_tx(dev);
5917
5907 if (id->driver_data & DEV_HAS_VLAN) 5918 if (id->driver_data & DEV_HAS_VLAN)
5908 nv_vlan_mode(dev, dev->features); 5919 nv_vlan_mode(dev, dev->features);
5909 5920
5910 netif_carrier_off(dev);
5911
5912 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5921 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5913 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5922 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5914 5923
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a7cc56007b33..e7ff886e8047 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -77,7 +77,7 @@
77static const int multicast_filter_limit = 32; 77static const int multicast_filter_limit = 32;
78 78
79#define MAX_READ_REQUEST_SHIFT 12 79#define MAX_READ_REQUEST_SHIFT 12
80#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 80#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ 81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 83
@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_DLINK, 0x4300,
291 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
290 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 292 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
291 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, 293 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
292 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, 294 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index fb3cbc27063c..25906c1d1b15 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -34,3 +34,10 @@ config SFC_SRIOV
34 This enables support for the SFC9000 I/O Virtualization 34 This enables support for the SFC9000 I/O Virtualization
35 features, allowing accelerated network performance in 35 features, allowing accelerated network performance in
36 virtualized environments. 36 virtualized environments.
37config SFC_PTP
38 bool "Solarflare SFC9000-family PTP support"
39 depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
40 default y
41 ---help---
42 This enables support for the Precision Time Protocol (PTP)
43 on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ea1f8db57318..e11f2ecf69d9 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -5,5 +5,6 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o 5 mcdi.o mcdi_phy.o mcdi_mon.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o 7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
8sfc-$(CONFIG_SFC_PTP) += ptp.o
8 9
9obj-$(CONFIG_SFC) += sfc.o 10obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index b26a954c27fc..5400a33f254f 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -120,10 +120,10 @@ typedef union efx_oword {
120 * [0,high-low), with garbage in bits [high-low+1,...). 120 * [0,high-low), with garbage in bits [high-low+1,...).
121 */ 121 */
122#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ 122#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
123 (((low > max) || (high < min)) ? 0 : \ 123 ((low) > (max) || (high) < (min) ? 0 : \
124 ((low > min) ? \ 124 (low) > (min) ? \
125 ((native_element) >> (low - min)) : \ 125 (native_element) >> ((low) - (min)) : \
126 ((native_element) << (min - low)))) 126 (native_element) << ((min) - (low)))
127 127
128/* 128/*
129 * Extract bit field portion [low,high) from the 64-bit little-endian 129 * Extract bit field portion [low,high) from the 64-bit little-endian
@@ -142,27 +142,27 @@ typedef union efx_oword {
142#define EFX_EXTRACT_OWORD64(oword, low, high) \ 142#define EFX_EXTRACT_OWORD64(oword, low, high) \
143 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ 143 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
144 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ 144 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
145 EFX_MASK64(high + 1 - low)) 145 EFX_MASK64((high) + 1 - (low)))
146 146
147#define EFX_EXTRACT_QWORD64(qword, low, high) \ 147#define EFX_EXTRACT_QWORD64(qword, low, high) \
148 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ 148 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
149 EFX_MASK64(high + 1 - low)) 149 EFX_MASK64((high) + 1 - (low)))
150 150
151#define EFX_EXTRACT_OWORD32(oword, low, high) \ 151#define EFX_EXTRACT_OWORD32(oword, low, high) \
152 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ 152 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
153 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ 153 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
154 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ 154 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
155 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ 155 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
156 EFX_MASK32(high + 1 - low)) 156 EFX_MASK32((high) + 1 - (low)))
157 157
158#define EFX_EXTRACT_QWORD32(qword, low, high) \ 158#define EFX_EXTRACT_QWORD32(qword, low, high) \
159 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ 159 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
160 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ 160 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
161 EFX_MASK32(high + 1 - low)) 161 EFX_MASK32((high) + 1 - (low)))
162 162
163#define EFX_EXTRACT_DWORD(dword, low, high) \ 163#define EFX_EXTRACT_DWORD(dword, low, high) \
164 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ 164 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
165 EFX_MASK32(high + 1 - low)) 165 EFX_MASK32((high) + 1 - (low)))
166 166
167#define EFX_OWORD_FIELD64(oword, field) \ 167#define EFX_OWORD_FIELD64(oword, field) \
168 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ 168 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
@@ -442,10 +442,10 @@ typedef union efx_oword {
442 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) 442 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
443 443
444#define EFX_INPLACE_MASK64(min, max, low, high) \ 444#define EFX_INPLACE_MASK64(min, max, low, high) \
445 EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low)) 445 EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
446 446
447#define EFX_INPLACE_MASK32(min, max, low, high) \ 447#define EFX_INPLACE_MASK32(min, max, low, high) \
448 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low)) 448 EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
449 449
450#define EFX_SET_OWORD64(oword, low, high, value) do { \ 450#define EFX_SET_OWORD64(oword, low, high, value) do { \
451 (oword).u64[0] = (((oword).u64[0] \ 451 (oword).u64[0] = (((oword).u64[0] \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65a8d49106a4..96bd980e828d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
202 202
203#define EFX_ASSERT_RESET_SERIALISED(efx) \ 203#define EFX_ASSERT_RESET_SERIALISED(efx) \
204 do { \ 204 do { \
205 if ((efx->state == STATE_RUNNING) || \ 205 if ((efx->state == STATE_READY) || \
206 (efx->state == STATE_DISABLED)) \ 206 (efx->state == STATE_DISABLED)) \
207 ASSERT_RTNL(); \ 207 ASSERT_RTNL(); \
208 } while (0) 208 } while (0)
209 209
210static int efx_check_disabled(struct efx_nic *efx)
211{
212 if (efx->state == STATE_DISABLED) {
213 netif_err(efx, drv, efx->net_dev,
214 "device is disabled due to earlier errors\n");
215 return -EIO;
216 }
217 return 0;
218}
219
210/************************************************************************** 220/**************************************************************************
211 * 221 *
212 * Event queue processing 222 * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
630 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 640 efx->rx_buffer_order = get_order(efx->rx_buffer_len +
631 sizeof(struct efx_rx_page_state)); 641 sizeof(struct efx_rx_page_state));
632 642
643 /* We must keep at least one descriptor in a TX ring empty.
644 * We could avoid this when the queue size does not exactly
645 * match the hardware ring size, but it's not that important.
646 * Therefore we stop the queue when one more skb might fill
647 * the ring completely. We wake it when half way back to
648 * empty.
649 */
650 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
651 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
652
633 /* Initialise the channels */ 653 /* Initialise the channels */
634 efx_for_each_channel(channel, efx) { 654 efx_for_each_channel(channel, efx) {
635 efx_for_each_channel_tx_queue(tx_queue, channel) 655 efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -714,6 +734,7 @@ static void efx_remove_channel(struct efx_channel *channel)
714 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 734 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
715 efx_remove_tx_queue(tx_queue); 735 efx_remove_tx_queue(tx_queue);
716 efx_remove_eventq(channel); 736 efx_remove_eventq(channel);
737 channel->type->post_remove(channel);
717} 738}
718 739
719static void efx_remove_channels(struct efx_nic *efx) 740static void efx_remove_channels(struct efx_nic *efx)
@@ -730,7 +751,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
730 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 751 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
731 u32 old_rxq_entries, old_txq_entries; 752 u32 old_rxq_entries, old_txq_entries;
732 unsigned i, next_buffer_table = 0; 753 unsigned i, next_buffer_table = 0;
733 int rc = 0; 754 int rc;
755
756 rc = efx_check_disabled(efx);
757 if (rc)
758 return rc;
734 759
735 /* Not all channels should be reallocated. We must avoid 760 /* Not all channels should be reallocated. We must avoid
736 * reallocating their buffer table entries. 761 * reallocating their buffer table entries.
@@ -828,6 +853,7 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
828 853
829static const struct efx_channel_type efx_default_channel_type = { 854static const struct efx_channel_type efx_default_channel_type = {
830 .pre_probe = efx_channel_dummy_op_int, 855 .pre_probe = efx_channel_dummy_op_int,
856 .post_remove = efx_channel_dummy_op_void,
831 .get_name = efx_get_channel_name, 857 .get_name = efx_get_channel_name,
832 .copy = efx_copy_channel, 858 .copy = efx_copy_channel,
833 .keep_eventq = false, 859 .keep_eventq = false,
@@ -838,6 +864,10 @@ int efx_channel_dummy_op_int(struct efx_channel *channel)
838 return 0; 864 return 0;
839} 865}
840 866
867void efx_channel_dummy_op_void(struct efx_channel *channel)
868{
869}
870
841/************************************************************************** 871/**************************************************************************
842 * 872 *
843 * Port handling 873 * Port handling
@@ -1365,6 +1395,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1365{ 1395{
1366 struct efx_channel *channel; 1396 struct efx_channel *channel;
1367 1397
1398 BUG_ON(efx->state == STATE_DISABLED);
1399
1368 if (efx->legacy_irq) 1400 if (efx->legacy_irq)
1369 efx->legacy_irq_enabled = true; 1401 efx->legacy_irq_enabled = true;
1370 efx_nic_enable_interrupts(efx); 1402 efx_nic_enable_interrupts(efx);
@@ -1382,6 +1414,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1382{ 1414{
1383 struct efx_channel *channel; 1415 struct efx_channel *channel;
1384 1416
1417 if (efx->state == STATE_DISABLED)
1418 return;
1419
1385 efx_mcdi_mode_poll(efx); 1420 efx_mcdi_mode_poll(efx);
1386 1421
1387 efx_nic_disable_interrupts(efx); 1422 efx_nic_disable_interrupts(efx);
@@ -1422,10 +1457,16 @@ static void efx_set_channels(struct efx_nic *efx)
1422 efx->tx_channel_offset = 1457 efx->tx_channel_offset =
1423 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1458 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1424 1459
1425 /* We need to adjust the TX queue numbers if we have separate 1460 /* We need to mark which channels really have RX and TX
1461 * queues, and adjust the TX queue numbers if we have separate
1426 * RX-only and TX-only channels. 1462 * RX-only and TX-only channels.
1427 */ 1463 */
1428 efx_for_each_channel(channel, efx) { 1464 efx_for_each_channel(channel, efx) {
1465 if (channel->channel < efx->n_rx_channels)
1466 channel->rx_queue.core_index = channel->channel;
1467 else
1468 channel->rx_queue.core_index = -1;
1469
1429 efx_for_each_channel_tx_queue(tx_queue, channel) 1470 efx_for_each_channel_tx_queue(tx_queue, channel)
1430 tx_queue->queue -= (efx->tx_channel_offset * 1471 tx_queue->queue -= (efx->tx_channel_offset *
1431 EFX_TXQ_TYPES); 1472 EFX_TXQ_TYPES);
@@ -1533,22 +1574,21 @@ static int efx_probe_all(struct efx_nic *efx)
1533 return rc; 1574 return rc;
1534} 1575}
1535 1576
1536/* Called after previous invocation(s) of efx_stop_all, restarts the port, 1577/* If the interface is supposed to be running but is not, start
1537 * kernel transmit queues and NAPI processing, and ensures that the port is 1578 * the hardware and software data path, regular activity for the port
1538 * scheduled to be reconfigured. This function is safe to call multiple 1579 * (MAC statistics, link polling, etc.) and schedule the port to be
1539 * times when the NIC is in any state. 1580 * reconfigured. Interrupts must already be enabled. This function
1581 * is safe to call multiple times, so long as the NIC is not disabled.
1582 * Requires the RTNL lock.
1540 */ 1583 */
1541static void efx_start_all(struct efx_nic *efx) 1584static void efx_start_all(struct efx_nic *efx)
1542{ 1585{
1543 EFX_ASSERT_RESET_SERIALISED(efx); 1586 EFX_ASSERT_RESET_SERIALISED(efx);
1587 BUG_ON(efx->state == STATE_DISABLED);
1544 1588
1545 /* Check that it is appropriate to restart the interface. All 1589 /* Check that it is appropriate to restart the interface. All
1546 * of these flags are safe to read under just the rtnl lock */ 1590 * of these flags are safe to read under just the rtnl lock */
1547 if (efx->port_enabled) 1591 if (efx->port_enabled || !netif_running(efx->net_dev))
1548 return;
1549 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1550 return;
1551 if (!netif_running(efx->net_dev))
1552 return; 1592 return;
1553 1593
1554 efx_start_port(efx); 1594 efx_start_port(efx);
@@ -1582,11 +1622,11 @@ static void efx_flush_all(struct efx_nic *efx)
1582 cancel_work_sync(&efx->mac_work); 1622 cancel_work_sync(&efx->mac_work);
1583} 1623}
1584 1624
1585/* Quiesce hardware and software without bringing the link down. 1625/* Quiesce the hardware and software data path, and regular activity
1586 * Safe to call multiple times, when the nic and interface is in any 1626 * for the port without bringing the link down. Safe to call multiple
1587 * state. The caller is guaranteed to subsequently be in a position 1627 * times with the NIC in almost any state, but interrupts should be
1588 * to modify any hardware and software state they see fit without 1628 * enabled. Requires the RTNL lock.
1589 * taking locks. */ 1629 */
1590static void efx_stop_all(struct efx_nic *efx) 1630static void efx_stop_all(struct efx_nic *efx)
1591{ 1631{
1592 EFX_ASSERT_RESET_SERIALISED(efx); 1632 EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,7 +1779,8 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1739 struct efx_nic *efx = netdev_priv(net_dev); 1779 struct efx_nic *efx = netdev_priv(net_dev);
1740 struct mii_ioctl_data *data = if_mii(ifr); 1780 struct mii_ioctl_data *data = if_mii(ifr);
1741 1781
1742 EFX_ASSERT_RESET_SERIALISED(efx); 1782 if (cmd == SIOCSHWTSTAMP)
1783 return efx_ptp_ioctl(efx, ifr, cmd);
1743 1784
1744 /* Convert phy_id from older PRTAD/DEVAD format */ 1785 /* Convert phy_id from older PRTAD/DEVAD format */
1745 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1786 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -1820,13 +1861,14 @@ static void efx_netpoll(struct net_device *net_dev)
1820static int efx_net_open(struct net_device *net_dev) 1861static int efx_net_open(struct net_device *net_dev)
1821{ 1862{
1822 struct efx_nic *efx = netdev_priv(net_dev); 1863 struct efx_nic *efx = netdev_priv(net_dev);
1823 EFX_ASSERT_RESET_SERIALISED(efx); 1864 int rc;
1824 1865
1825 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 1866 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
1826 raw_smp_processor_id()); 1867 raw_smp_processor_id());
1827 1868
1828 if (efx->state == STATE_DISABLED) 1869 rc = efx_check_disabled(efx);
1829 return -EIO; 1870 if (rc)
1871 return rc;
1830 if (efx->phy_mode & PHY_MODE_SPECIAL) 1872 if (efx->phy_mode & PHY_MODE_SPECIAL)
1831 return -EBUSY; 1873 return -EBUSY;
1832 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 1874 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1894,8 @@ static int efx_net_stop(struct net_device *net_dev)
1852 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 1894 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
1853 raw_smp_processor_id()); 1895 raw_smp_processor_id());
1854 1896
1855 if (efx->state != STATE_DISABLED) { 1897 /* Stop the device and flush all the channels */
1856 /* Stop the device and flush all the channels */ 1898 efx_stop_all(efx);
1857 efx_stop_all(efx);
1858 }
1859 1899
1860 return 0; 1900 return 0;
1861} 1901}
@@ -1915,9 +1955,11 @@ static void efx_watchdog(struct net_device *net_dev)
1915static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1955static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1916{ 1956{
1917 struct efx_nic *efx = netdev_priv(net_dev); 1957 struct efx_nic *efx = netdev_priv(net_dev);
1958 int rc;
1918 1959
1919 EFX_ASSERT_RESET_SERIALISED(efx); 1960 rc = efx_check_disabled(efx);
1920 1961 if (rc)
1962 return rc;
1921 if (new_mtu > EFX_MAX_MTU) 1963 if (new_mtu > EFX_MAX_MTU)
1922 return -EINVAL; 1964 return -EINVAL;
1923 1965
@@ -1926,8 +1968,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1926 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1968 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1927 1969
1928 mutex_lock(&efx->mac_lock); 1970 mutex_lock(&efx->mac_lock);
1929 /* Reconfigure the MAC before enabling the dma queues so that
1930 * the RX buffers don't overflow */
1931 net_dev->mtu = new_mtu; 1971 net_dev->mtu = new_mtu;
1932 efx->type->reconfigure_mac(efx); 1972 efx->type->reconfigure_mac(efx);
1933 mutex_unlock(&efx->mac_lock); 1973 mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1982,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1942 struct sockaddr *addr = data; 1982 struct sockaddr *addr = data;
1943 char *new_addr = addr->sa_data; 1983 char *new_addr = addr->sa_data;
1944 1984
1945 EFX_ASSERT_RESET_SERIALISED(efx);
1946
1947 if (!is_valid_ether_addr(new_addr)) { 1985 if (!is_valid_ether_addr(new_addr)) {
1948 netif_err(efx, drv, efx->net_dev, 1986 netif_err(efx, drv, efx->net_dev,
1949 "invalid ethernet MAC address requested: %pM\n", 1987 "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2117,27 @@ static int efx_register_netdev(struct efx_nic *efx)
2079 2117
2080 rtnl_lock(); 2118 rtnl_lock();
2081 2119
2120 /* Enable resets to be scheduled and check whether any were
2121 * already requested. If so, the NIC is probably hosed so we
2122 * abort.
2123 */
2124 efx->state = STATE_READY;
2125 smp_mb(); /* ensure we change state before checking reset_pending */
2126 if (efx->reset_pending) {
2127 netif_err(efx, probe, efx->net_dev,
2128 "aborting probe due to scheduled reset\n");
2129 rc = -EIO;
2130 goto fail_locked;
2131 }
2132
2082 rc = dev_alloc_name(net_dev, net_dev->name); 2133 rc = dev_alloc_name(net_dev, net_dev->name);
2083 if (rc < 0) 2134 if (rc < 0)
2084 goto fail_locked; 2135 goto fail_locked;
2085 efx_update_name(efx); 2136 efx_update_name(efx);
2086 2137
2138 /* Always start with carrier off; PHY events will detect the link */
2139 netif_carrier_off(net_dev);
2140
2087 rc = register_netdevice(net_dev); 2141 rc = register_netdevice(net_dev);
2088 if (rc) 2142 if (rc)
2089 goto fail_locked; 2143 goto fail_locked;
@@ -2094,9 +2148,6 @@ static int efx_register_netdev(struct efx_nic *efx)
2094 efx_init_tx_queue_core_txq(tx_queue); 2148 efx_init_tx_queue_core_txq(tx_queue);
2095 } 2149 }
2096 2150
2097 /* Always start with carrier off; PHY events will detect the link */
2098 netif_carrier_off(net_dev);
2099
2100 rtnl_unlock(); 2151 rtnl_unlock();
2101 2152
2102 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2153 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2159,14 @@ static int efx_register_netdev(struct efx_nic *efx)
2108 2159
2109 return 0; 2160 return 0;
2110 2161
2162fail_registered:
2163 rtnl_lock();
2164 unregister_netdevice(net_dev);
2111fail_locked: 2165fail_locked:
2166 efx->state = STATE_UNINIT;
2112 rtnl_unlock(); 2167 rtnl_unlock();
2113 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2168 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2114 return rc; 2169 return rc;
2115
2116fail_registered:
2117 unregister_netdev(net_dev);
2118 return rc;
2119} 2170}
2120 2171
2121static void efx_unregister_netdev(struct efx_nic *efx) 2172static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2189,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
2138 2189
2139 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2190 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2140 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2191 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2141 unregister_netdev(efx->net_dev); 2192
2193 rtnl_lock();
2194 unregister_netdevice(efx->net_dev);
2195 efx->state = STATE_UNINIT;
2196 rtnl_unlock();
2142} 2197}
2143 2198
2144/************************************************************************** 2199/**************************************************************************
@@ -2154,9 +2209,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2154 EFX_ASSERT_RESET_SERIALISED(efx); 2209 EFX_ASSERT_RESET_SERIALISED(efx);
2155 2210
2156 efx_stop_all(efx); 2211 efx_stop_all(efx);
2157 mutex_lock(&efx->mac_lock);
2158
2159 efx_stop_interrupts(efx, false); 2212 efx_stop_interrupts(efx, false);
2213
2214 mutex_lock(&efx->mac_lock);
2160 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2215 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
2161 efx->phy_op->fini(efx); 2216 efx->phy_op->fini(efx);
2162 efx->type->fini(efx); 2217 efx->type->fini(efx);
@@ -2276,16 +2331,15 @@ static void efx_reset_work(struct work_struct *data)
2276 if (!pending) 2331 if (!pending)
2277 return; 2332 return;
2278 2333
2279 /* If we're not RUNNING then don't reset. Leave the reset_pending
2280 * flags set so that efx_pci_probe_main will be retried */
2281 if (efx->state != STATE_RUNNING) {
2282 netif_info(efx, drv, efx->net_dev,
2283 "scheduled reset quenched. NIC not RUNNING\n");
2284 return;
2285 }
2286
2287 rtnl_lock(); 2334 rtnl_lock();
2288 (void)efx_reset(efx, fls(pending) - 1); 2335
2336 /* We checked the state in efx_schedule_reset() but it may
2337 * have changed by now. Now that we have the RTNL lock,
2338 * it cannot change again.
2339 */
2340 if (efx->state == STATE_READY)
2341 (void)efx_reset(efx, fls(pending) - 1);
2342
2289 rtnl_unlock(); 2343 rtnl_unlock();
2290} 2344}
2291 2345
@@ -2311,6 +2365,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2311 } 2365 }
2312 2366
2313 set_bit(method, &efx->reset_pending); 2367 set_bit(method, &efx->reset_pending);
2368 smp_mb(); /* ensure we change reset_pending before checking state */
2369
2370 /* If we're not READY then just leave the flags set as the cue
2371 * to abort probing or reschedule the reset later.
2372 */
2373 if (ACCESS_ONCE(efx->state) != STATE_READY)
2374 return;
2314 2375
2315 /* efx_process_channel() will no longer read events once a 2376 /* efx_process_channel() will no longer read events once a
2316 * reset is scheduled. So switch back to poll'd MCDI completions. */ 2377 * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2437,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
2376/* This zeroes out and then fills in the invariants in a struct 2437/* This zeroes out and then fills in the invariants in a struct
2377 * efx_nic (including all sub-structures). 2438 * efx_nic (including all sub-structures).
2378 */ 2439 */
2379static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, 2440static int efx_init_struct(struct efx_nic *efx,
2380 struct pci_dev *pci_dev, struct net_device *net_dev) 2441 struct pci_dev *pci_dev, struct net_device *net_dev)
2381{ 2442{
2382 int i; 2443 int i;
2383 2444
2384 /* Initialise common structures */ 2445 /* Initialise common structures */
2385 memset(efx, 0, sizeof(*efx));
2386 spin_lock_init(&efx->biu_lock); 2446 spin_lock_init(&efx->biu_lock);
2387#ifdef CONFIG_SFC_MTD 2447#ifdef CONFIG_SFC_MTD
2388 INIT_LIST_HEAD(&efx->mtd_list); 2448 INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2452,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2392 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 2452 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2393 efx->pci_dev = pci_dev; 2453 efx->pci_dev = pci_dev;
2394 efx->msg_enable = debug; 2454 efx->msg_enable = debug;
2395 efx->state = STATE_INIT; 2455 efx->state = STATE_UNINIT;
2396 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2456 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2397 2457
2398 efx->net_dev = net_dev; 2458 efx->net_dev = net_dev;
@@ -2409,8 +2469,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2409 goto fail; 2469 goto fail;
2410 } 2470 }
2411 2471
2412 efx->type = type;
2413
2414 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2472 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2415 2473
2416 /* Higher numbered interrupt modes are less capable! */ 2474 /* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2513,12 @@ static void efx_fini_struct(struct efx_nic *efx)
2455 */ 2513 */
2456static void efx_pci_remove_main(struct efx_nic *efx) 2514static void efx_pci_remove_main(struct efx_nic *efx)
2457{ 2515{
2516 /* Flush reset_work. It can no longer be scheduled since we
2517 * are not READY.
2518 */
2519 BUG_ON(efx->state == STATE_READY);
2520 cancel_work_sync(&efx->reset_work);
2521
2458#ifdef CONFIG_RFS_ACCEL 2522#ifdef CONFIG_RFS_ACCEL
2459 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 2523 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2460 efx->net_dev->rx_cpu_rmap = NULL; 2524 efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2544,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2480 2544
2481 /* Mark the NIC as fini, then stop the interface */ 2545 /* Mark the NIC as fini, then stop the interface */
2482 rtnl_lock(); 2546 rtnl_lock();
2483 efx->state = STATE_FINI;
2484 dev_close(efx->net_dev); 2547 dev_close(efx->net_dev);
2485 2548 efx_stop_interrupts(efx, false);
2486 /* Allow any queued efx_resets() to complete */
2487 rtnl_unlock(); 2549 rtnl_unlock();
2488 2550
2489 efx_stop_interrupts(efx, false);
2490 efx_sriov_fini(efx); 2551 efx_sriov_fini(efx);
2491 efx_unregister_netdev(efx); 2552 efx_unregister_netdev(efx);
2492 2553
2493 efx_mtd_remove(efx); 2554 efx_mtd_remove(efx);
2494 2555
2495 /* Wait for any scheduled resets to complete. No more will be
2496 * scheduled from this point because efx_stop_all() has been
2497 * called, we are no longer registered with driverlink, and
2498 * the net_device's have been removed. */
2499 cancel_work_sync(&efx->reset_work);
2500
2501 efx_pci_remove_main(efx); 2556 efx_pci_remove_main(efx);
2502 2557
2503 efx_fini_io(efx); 2558 efx_fini_io(efx);
@@ -2617,7 +2672,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2617static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2672static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2618 const struct pci_device_id *entry) 2673 const struct pci_device_id *entry)
2619{ 2674{
2620 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2621 struct net_device *net_dev; 2675 struct net_device *net_dev;
2622 struct efx_nic *efx; 2676 struct efx_nic *efx;
2623 int rc; 2677 int rc;
@@ -2627,10 +2681,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2627 EFX_MAX_RX_QUEUES); 2681 EFX_MAX_RX_QUEUES);
2628 if (!net_dev) 2682 if (!net_dev)
2629 return -ENOMEM; 2683 return -ENOMEM;
2630 net_dev->features |= (type->offload_features | NETIF_F_SG | 2684 efx = netdev_priv(net_dev);
2685 efx->type = (const struct efx_nic_type *) entry->driver_data;
2686 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2631 NETIF_F_HIGHDMA | NETIF_F_TSO | 2687 NETIF_F_HIGHDMA | NETIF_F_TSO |
2632 NETIF_F_RXCSUM); 2688 NETIF_F_RXCSUM);
2633 if (type->offload_features & NETIF_F_V6_CSUM) 2689 if (efx->type->offload_features & NETIF_F_V6_CSUM)
2634 net_dev->features |= NETIF_F_TSO6; 2690 net_dev->features |= NETIF_F_TSO6;
2635 /* Mask for features that also apply to VLAN devices */ 2691 /* Mask for features that also apply to VLAN devices */
2636 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2692 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2694,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2638 NETIF_F_RXCSUM); 2694 NETIF_F_RXCSUM);
2639 /* All offloads can be toggled */ 2695 /* All offloads can be toggled */
2640 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; 2696 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2641 efx = netdev_priv(net_dev);
2642 pci_set_drvdata(pci_dev, efx); 2697 pci_set_drvdata(pci_dev, efx);
2643 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2698 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2644 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2699 rc = efx_init_struct(efx, pci_dev, net_dev);
2645 if (rc) 2700 if (rc)
2646 goto fail1; 2701 goto fail1;
2647 2702
@@ -2656,28 +2711,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2656 goto fail2; 2711 goto fail2;
2657 2712
2658 rc = efx_pci_probe_main(efx); 2713 rc = efx_pci_probe_main(efx);
2659
2660 /* Serialise against efx_reset(). No more resets will be
2661 * scheduled since efx_stop_all() has been called, and we have
2662 * not and never have been registered.
2663 */
2664 cancel_work_sync(&efx->reset_work);
2665
2666 if (rc) 2714 if (rc)
2667 goto fail3; 2715 goto fail3;
2668 2716
2669 /* If there was a scheduled reset during probe, the NIC is
2670 * probably hosed anyway.
2671 */
2672 if (efx->reset_pending) {
2673 rc = -EIO;
2674 goto fail4;
2675 }
2676
2677 /* Switch to the running state before we expose the device to the OS,
2678 * so that dev_open()|efx_start_all() will actually start the device */
2679 efx->state = STATE_RUNNING;
2680
2681 rc = efx_register_netdev(efx); 2717 rc = efx_register_netdev(efx);
2682 if (rc) 2718 if (rc)
2683 goto fail4; 2719 goto fail4;
@@ -2717,12 +2753,18 @@ static int efx_pm_freeze(struct device *dev)
2717{ 2753{
2718 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2754 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2719 2755
2720 efx->state = STATE_FINI; 2756 rtnl_lock();
2721 2757
2722 netif_device_detach(efx->net_dev); 2758 if (efx->state != STATE_DISABLED) {
2759 efx->state = STATE_UNINIT;
2723 2760
2724 efx_stop_all(efx); 2761 netif_device_detach(efx->net_dev);
2725 efx_stop_interrupts(efx, false); 2762
2763 efx_stop_all(efx);
2764 efx_stop_interrupts(efx, false);
2765 }
2766
2767 rtnl_unlock();
2726 2768
2727 return 0; 2769 return 0;
2728} 2770}
@@ -2731,21 +2773,25 @@ static int efx_pm_thaw(struct device *dev)
2731{ 2773{
2732 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2774 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2733 2775
2734 efx->state = STATE_INIT; 2776 rtnl_lock();
2735 2777
2736 efx_start_interrupts(efx, false); 2778 if (efx->state != STATE_DISABLED) {
2779 efx_start_interrupts(efx, false);
2737 2780
2738 mutex_lock(&efx->mac_lock); 2781 mutex_lock(&efx->mac_lock);
2739 efx->phy_op->reconfigure(efx); 2782 efx->phy_op->reconfigure(efx);
2740 mutex_unlock(&efx->mac_lock); 2783 mutex_unlock(&efx->mac_lock);
2741 2784
2742 efx_start_all(efx); 2785 efx_start_all(efx);
2743 2786
2744 netif_device_attach(efx->net_dev); 2787 netif_device_attach(efx->net_dev);
2745 2788
2746 efx->state = STATE_RUNNING; 2789 efx->state = STATE_READY;
2747 2790
2748 efx->type->resume_wol(efx); 2791 efx->type->resume_wol(efx);
2792 }
2793
2794 rtnl_unlock();
2749 2795
2750 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 2796 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
2751 queue_work(reset_workqueue, &efx->reset_work); 2797 queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 70755c97251a..f11170bc48bf 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -102,6 +102,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
102 102
103/* Channels */ 103/* Channels */
104extern int efx_channel_dummy_op_int(struct efx_channel *channel); 104extern int efx_channel_dummy_op_int(struct efx_channel *channel);
105extern void efx_channel_dummy_op_void(struct efx_channel *channel);
105extern void efx_process_channel_now(struct efx_channel *channel); 106extern void efx_process_channel_now(struct efx_channel *channel);
106extern int 107extern int
107efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); 108efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5faedd855b77..90f078eff8e6 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -337,7 +337,8 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
337 unsigned int test_index, 337 unsigned int test_index,
338 struct ethtool_string *strings, u64 *data) 338 struct ethtool_string *strings, u64 *data)
339{ 339{
340 struct efx_channel *channel = efx_get_channel(efx, 0); 340 struct efx_channel *channel =
341 efx_get_channel(efx, efx->tx_channel_offset);
341 struct efx_tx_queue *tx_queue; 342 struct efx_tx_queue *tx_queue;
342 343
343 efx_for_each_channel_tx_queue(tx_queue, channel) { 344 efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -529,9 +530,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
529 if (!efx_tests) 530 if (!efx_tests)
530 goto fail; 531 goto fail;
531 532
532 533 if (efx->state != STATE_READY) {
533 ASSERT_RTNL();
534 if (efx->state != STATE_RUNNING) {
535 rc = -EIO; 534 rc = -EIO;
536 goto fail1; 535 goto fail1;
537 } 536 }
@@ -962,9 +961,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
962 int rc; 961 int rc;
963 962
964 /* Check that user wants us to choose the location */ 963 /* Check that user wants us to choose the location */
965 if (rule->location != RX_CLS_LOC_ANY && 964 if (rule->location != RX_CLS_LOC_ANY)
966 rule->location != RX_CLS_LOC_FIRST &&
967 rule->location != RX_CLS_LOC_LAST)
968 return -EINVAL; 965 return -EINVAL;
969 966
970 /* Range-check ring_cookie */ 967 /* Range-check ring_cookie */
@@ -978,9 +975,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
978 rule->m_ext.data[1])) 975 rule->m_ext.data[1]))
979 return -EINVAL; 976 return -EINVAL;
980 977
981 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 978 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
982 (rule->location == RX_CLS_LOC_FIRST) ?
983 EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
984 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 979 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
985 0xfff : rule->ring_cookie); 980 0xfff : rule->ring_cookie);
986 981
@@ -1176,6 +1171,7 @@ const struct ethtool_ops efx_ethtool_ops = {
1176 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1171 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1177 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1172 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
1178 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1173 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
1174 .get_ts_info = efx_ptp_get_ts_info,
1179 .get_module_info = efx_ethtool_get_module_info, 1175 .get_module_info = efx_ethtool_get_module_info,
1180 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1176 .get_module_eeprom = efx_ethtool_get_module_eeprom,
1181}; 1177};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 8687a6c3db0d..ec1e99d0dcad 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
380 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
382 err = 0; 382 err = 0;
383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
384 err = -EBUSY; 384 err = -EBUSY;
385 } else { 385 } else {
386 /* Reset the PHY, reconfigure the MAC and enable/disable 386 /* Reset the PHY, reconfigure the MAC and enable/disable
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index c3fd61f0a95c..8af42cd1feda 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -162,20 +162,12 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags & 162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
163 EFX_FILTER_FLAG_RX_RSS)); 163 EFX_FILTER_FLAG_RX_RSS));
164 EFX_SET_OWORD_FIELD( 164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, 165 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id); 166 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD( 167 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 168 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & 169 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS)); 170 EFX_FILTER_FLAG_RX_RSS));
175 EFX_SET_OWORD_FIELD(
176 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
177 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
178 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
179 } 171 }
180 172
181 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 173 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -480,14 +472,12 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
480 472
481 case EFX_FILTER_TABLE_RX_MAC: { 473 case EFX_FILTER_TABLE_RX_MAC: {
482 bool is_wild = spec->type == EFX_FILTER_MAC_WILD; 474 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
483 EFX_POPULATE_OWORD_8( 475 EFX_POPULATE_OWORD_7(
484 *filter, 476 *filter,
485 FRF_CZ_RMFT_RSS_EN, 477 FRF_CZ_RMFT_RSS_EN,
486 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 478 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 FRF_CZ_RMFT_SCATTER_EN, 479 FRF_CZ_RMFT_SCATTER_EN,
488 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 480 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 FRF_CZ_RMFT_IP_OVERRIDE,
490 !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
491 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, 481 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
492 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, 482 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
493 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], 483 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
@@ -567,49 +557,62 @@ static int efx_filter_search(struct efx_filter_table *table,
567} 557}
568 558
569/* 559/*
570 * Construct/deconstruct external filter IDs. These must be ordered 560 * Construct/deconstruct external filter IDs. At least the RX filter
571 * by matching priority, for RX NFC semantics. 561 * IDs must be ordered by matching priority, for RX NFC semantics.
572 * 562 *
573 * Each RX MAC filter entry has a flag for whether it can override an 563 * Deconstruction needs to be robust against invalid IDs so that
574 * RX IP filter that also matches. So we assign locations for MAC 564 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
575 * filters with overriding behaviour, then for IP filters, then for 565 * accept user-provided IDs.
576 * MAC filters without overriding behaviour.
577 */ 566 */
578 567
579#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0 568#define EFX_FILTER_MATCH_PRI_COUNT 5
580#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1 569
581#define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2 570static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
571 [EFX_FILTER_TCP_FULL] = 0,
572 [EFX_FILTER_UDP_FULL] = 0,
573 [EFX_FILTER_TCP_WILD] = 1,
574 [EFX_FILTER_UDP_WILD] = 1,
575 [EFX_FILTER_MAC_FULL] = 2,
576 [EFX_FILTER_MAC_WILD] = 3,
577 [EFX_FILTER_UC_DEF] = 4,
578 [EFX_FILTER_MC_DEF] = 4,
579};
580
581static const enum efx_filter_table_id efx_filter_range_table[] = {
582 EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
583 EFX_FILTER_TABLE_RX_IP,
584 EFX_FILTER_TABLE_RX_MAC,
585 EFX_FILTER_TABLE_RX_MAC,
586 EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
587 EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
588 EFX_FILTER_TABLE_COUNT, /* invalid */
589 EFX_FILTER_TABLE_TX_MAC,
590 EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
591};
582 592
583#define EFX_FILTER_INDEX_WIDTH 13 593#define EFX_FILTER_INDEX_WIDTH 13
584#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) 594#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
585 595
586static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id, 596static inline u32
587 unsigned int index, u8 flags) 597efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
588{ 598{
589 unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id; 599 unsigned int range;
590 600
591 if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) { 601 range = efx_filter_type_match_pri[spec->type];
592 if (table_id == EFX_FILTER_TABLE_RX_MAC) 602 if (!(spec->flags & EFX_FILTER_FLAG_RX))
593 match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP; 603 range += EFX_FILTER_MATCH_PRI_COUNT;
594 else if (table_id == EFX_FILTER_TABLE_RX_DEF)
595 match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
596 }
597 604
598 return match_pri << EFX_FILTER_INDEX_WIDTH | index; 605 return range << EFX_FILTER_INDEX_WIDTH | index;
599} 606}
600 607
601static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) 608static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
602{ 609{
603 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH; 610 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
604 611
605 switch (match_pri) { 612 if (range < ARRAY_SIZE(efx_filter_range_table))
606 case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP: 613 return efx_filter_range_table[range];
607 return EFX_FILTER_TABLE_RX_MAC; 614 else
608 case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP: 615 return EFX_FILTER_TABLE_COUNT; /* invalid */
609 return EFX_FILTER_TABLE_RX_DEF;
610 default:
611 return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
612 }
613} 616}
614 617
615static inline unsigned int efx_filter_id_index(u32 id) 618static inline unsigned int efx_filter_id_index(u32 id)
@@ -619,12 +622,9 @@ static inline unsigned int efx_filter_id_index(u32 id)
619 622
620static inline u8 efx_filter_id_flags(u32 id) 623static inline u8 efx_filter_id_flags(u32 id)
621{ 624{
622 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH; 625 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
623 626
624 if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE) 627 if (range < EFX_FILTER_MATCH_PRI_COUNT)
625 return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
626 else if (match_pri <=
627 EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
628 return EFX_FILTER_FLAG_RX; 628 return EFX_FILTER_FLAG_RX;
629 else 629 else
630 return EFX_FILTER_FLAG_TX; 630 return EFX_FILTER_FLAG_TX;
@@ -633,14 +633,15 @@ static inline u8 efx_filter_id_flags(u32 id)
633u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) 633u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
634{ 634{
635 struct efx_filter_state *state = efx->filter_state; 635 struct efx_filter_state *state = efx->filter_state;
636 unsigned int table_id = EFX_FILTER_TABLE_RX_DEF; 636 unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
637 enum efx_filter_table_id table_id;
637 638
638 do { 639 do {
640 table_id = efx_filter_range_table[range];
639 if (state->table[table_id].size != 0) 641 if (state->table[table_id].size != 0)
640 return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id) 642 return range << EFX_FILTER_INDEX_WIDTH |
641 << EFX_FILTER_INDEX_WIDTH) +
642 state->table[table_id].size; 643 state->table[table_id].size;
643 } while (table_id--); 644 } while (range--);
644 645
645 return 0; 646 return 0;
646} 647}
@@ -718,7 +719,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
718 netif_vdbg(efx, hw, efx->net_dev, 719 netif_vdbg(efx, hw, efx->net_dev,
719 "%s: filter type %d index %d rxq %u set", 720 "%s: filter type %d index %d rxq %u set",
720 __func__, spec->type, filter_idx, spec->dmaq_id); 721 __func__, spec->type, filter_idx, spec->dmaq_id);
721 rc = efx_filter_make_id(table->id, filter_idx, spec->flags); 722 rc = efx_filter_make_id(spec, filter_idx);
722 723
723out: 724out:
724 spin_unlock_bh(&state->lock); 725 spin_unlock_bh(&state->lock);
@@ -781,8 +782,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
781 spin_lock_bh(&state->lock); 782 spin_lock_bh(&state->lock);
782 783
783 if (test_bit(filter_idx, table->used_bitmap) && 784 if (test_bit(filter_idx, table->used_bitmap) &&
784 spec->priority == priority && 785 spec->priority == priority) {
785 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
786 efx_filter_table_clear_entry(efx, table, filter_idx); 786 efx_filter_table_clear_entry(efx, table, filter_idx);
787 if (table->used == 0) 787 if (table->used == 0)
788 efx_filter_table_reset_search_depth(table); 788 efx_filter_table_reset_search_depth(table);
@@ -833,8 +833,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
833 spin_lock_bh(&state->lock); 833 spin_lock_bh(&state->lock);
834 834
835 if (test_bit(filter_idx, table->used_bitmap) && 835 if (test_bit(filter_idx, table->used_bitmap) &&
836 spec->priority == priority && 836 spec->priority == priority) {
837 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
838 *spec_buf = *spec; 837 *spec_buf = *spec;
839 rc = 0; 838 rc = 0;
840 } else { 839 } else {
@@ -927,8 +926,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
927 goto out; 926 goto out;
928 } 927 }
929 buf[count++] = efx_filter_make_id( 928 buf[count++] = efx_filter_make_id(
930 table_id, filter_idx, 929 &table->spec[filter_idx], filter_idx);
931 table->spec[filter_idx].flags);
932 } 930 }
933 } 931 }
934 } 932 }
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3c77802aed6c..5cb54723b824 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -61,16 +61,12 @@ enum efx_filter_priority {
61 * according to the indirection table. 61 * according to the indirection table.
62 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving 62 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
63 * queue. 63 * queue.
64 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
65 * any IP filter that matches the same packet. By default, IP
66 * filters take precedence.
67 * @EFX_FILTER_FLAG_RX: Filter is for RX 64 * @EFX_FILTER_FLAG_RX: Filter is for RX
68 * @EFX_FILTER_FLAG_TX: Filter is for TX 65 * @EFX_FILTER_FLAG_TX: Filter is for TX
69 */ 66 */
70enum efx_filter_flags { 67enum efx_filter_flags {
71 EFX_FILTER_FLAG_RX_RSS = 0x01, 68 EFX_FILTER_FLAG_RX_RSS = 0x01,
72 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 69 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
73 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
74 EFX_FILTER_FLAG_RX = 0x08, 70 EFX_FILTER_FLAG_RX = 0x08,
75 EFX_FILTER_FLAG_TX = 0x10, 71 EFX_FILTER_FLAG_TX = 0x10,
76}; 72};
@@ -88,8 +84,7 @@ enum efx_filter_flags {
88 * 84 *
89 * The @priority field is used by software to determine whether a new 85 * The @priority field is used by software to determine whether a new
90 * filter may replace an old one. The hardware priority of a filter 86 * filter may replace an old one. The hardware priority of a filter
91 * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP 87 * depends on the filter type.
92 * flag.
93 */ 88 */
94struct efx_filter_spec { 89struct efx_filter_spec {
95 u8 type:4; 90 u8 type:4;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index fc5e7bbcbc9e..aea43cbd0520 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -320,14 +320,20 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
320 efx_mcdi_complete(mcdi); 320 efx_mcdi_complete(mcdi);
321} 321}
322 322
323/* Issue the given command by writing the data into the shared memory PDU,
324 * ring the doorbell and wait for completion. Copyout the result. */
325int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 323int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
326 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 324 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
327 size_t *outlen_actual) 325 size_t *outlen_actual)
328{ 326{
327 efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
328 return efx_mcdi_rpc_finish(efx, cmd, inlen,
329 outbuf, outlen, outlen_actual);
330}
331
332void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
333 size_t inlen)
334{
329 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 335 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
330 int rc; 336
331 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 337 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
332 338
333 efx_mcdi_acquire(mcdi); 339 efx_mcdi_acquire(mcdi);
@@ -338,6 +344,15 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
338 spin_unlock_bh(&mcdi->iface_lock); 344 spin_unlock_bh(&mcdi->iface_lock);
339 345
340 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 346 efx_mcdi_copyin(efx, cmd, inbuf, inlen);
347}
348
349int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
350 u8 *outbuf, size_t outlen, size_t *outlen_actual)
351{
352 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
353 int rc;
354
355 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
341 356
342 if (mcdi->mode == MCDI_MODE_POLL) 357 if (mcdi->mode == MCDI_MODE_POLL)
343 rc = efx_mcdi_poll(efx); 358 rc = efx_mcdi_poll(efx);
@@ -563,6 +578,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
563 case MCDI_EVENT_CODE_FLR: 578 case MCDI_EVENT_CODE_FLR:
564 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 579 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
565 break; 580 break;
581 case MCDI_EVENT_CODE_PTP_RX:
582 case MCDI_EVENT_CODE_PTP_FAULT:
583 case MCDI_EVENT_CODE_PTP_PPS:
584 efx_ptp_event(efx, event);
585 break;
566 586
567 default: 587 default:
568 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 588 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -641,9 +661,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
641 u16 *fw_subtype_list, u32 *capabilities) 661 u16 *fw_subtype_list, u32 *capabilities)
642{ 662{
643 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; 663 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
644 size_t outlen; 664 size_t outlen, offset, i;
645 int port_num = efx_port_num(efx); 665 int port_num = efx_port_num(efx);
646 int offset;
647 int rc; 666 int rc;
648 667
649 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 668 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
@@ -663,11 +682,18 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
663 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; 682 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
664 if (mac_address) 683 if (mac_address)
665 memcpy(mac_address, outbuf + offset, ETH_ALEN); 684 memcpy(mac_address, outbuf + offset, ETH_ALEN);
666 if (fw_subtype_list) 685 if (fw_subtype_list) {
667 memcpy(fw_subtype_list, 686 /* Byte-swap and truncate or zero-pad as necessary */
668 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, 687 offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
669 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM * 688 for (i = 0;
670 sizeof(fw_subtype_list[0])); 689 i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
690 i++) {
691 fw_subtype_list[i] =
692 (offset + 2 <= outlen) ?
693 le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
694 offset += 2;
695 }
696 }
671 if (capabilities) { 697 if (capabilities) {
672 if (port_num) 698 if (port_num)
673 *capabilities = MCDI_DWORD(outbuf, 699 *capabilities = MCDI_DWORD(outbuf,
@@ -1169,6 +1195,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1169 __le32 *qid; 1195 __le32 *qid;
1170 int rc, count; 1196 int rc, count;
1171 1197
1198 BUILD_BUG_ON(EFX_MAX_CHANNELS >
1199 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1200
1172 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); 1201 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1173 if (qid == NULL) 1202 if (qid == NULL)
1174 return -ENOMEM; 1203 return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 0bdf3e331832..3ba2e5b5a9cc 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -71,6 +71,12 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
71 size_t inlen, u8 *outbuf, size_t outlen, 71 size_t inlen, u8 *outbuf, size_t outlen,
72 size_t *outlen_actual); 72 size_t *outlen_actual);
73 73
74extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
75 const u8 *inbuf, size_t inlen);
76extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
77 u8 *outbuf, size_t outlen,
78 size_t *outlen_actual);
79
74extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 80extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
75extern void efx_mcdi_mode_poll(struct efx_nic *efx); 81extern void efx_mcdi_mode_poll(struct efx_nic *efx);
76extern void efx_mcdi_mode_event(struct efx_nic *efx); 82extern void efx_mcdi_mode_event(struct efx_nic *efx);
@@ -107,11 +113,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
107#define MCDI_EVENT_FIELD(_ev, _field) \ 113#define MCDI_EVENT_FIELD(_ev, _field) \
108 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 114 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
109#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ 115#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
110 EFX_DWORD_FIELD( \ 116 EFX_EXTRACT_DWORD( \
111 *((efx_dword_t *) \ 117 *((efx_dword_t *) \
112 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \ 118 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
113 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \ 119 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
114 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2) 120 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
121 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
122 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
115 123
116extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 124extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
117extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 125extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index db4beed97669..9d426d0457bd 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -289,6 +289,7 @@
289#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */ 289#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
290#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */ 290#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
291#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */ 291#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
292#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
292#define MCDI_EVENT_CMDDONE_DATA_OFST 0 293#define MCDI_EVENT_CMDDONE_DATA_OFST 0
293#define MCDI_EVENT_CMDDONE_DATA_LBN 0 294#define MCDI_EVENT_CMDDONE_DATA_LBN 0
294#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 295#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -491,12 +492,12 @@
491 492
492/* MC_CMD_GET_FPGAREG_OUT msgresponse */ 493/* MC_CMD_GET_FPGAREG_OUT msgresponse */
493#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1 494#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
494#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255 495#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
495#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num)) 496#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
496#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0 497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1 498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1 499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255 500#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
500 501
501 502
502/***********************************/ 503/***********************************/
@@ -507,13 +508,13 @@
507 508
508/* MC_CMD_PUT_FPGAREG_IN msgrequest */ 509/* MC_CMD_PUT_FPGAREG_IN msgrequest */
509#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5 510#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
510#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255 511#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
511#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num)) 512#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
512#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0 513#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
513#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4 514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1 515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1 516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251 517#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
517 518
518/* MC_CMD_PUT_FPGAREG_OUT msgresponse */ 519/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
519#define MC_CMD_PUT_FPGAREG_OUT_LEN 0 520#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
@@ -560,7 +561,7 @@
560 561
561/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ 562/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
562#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 563#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
563#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255 564#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
564#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) 565#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
565/* MC_CMD_PTP_IN_CMD_OFST 0 */ 566/* MC_CMD_PTP_IN_CMD_OFST 0 */
566/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 567/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
@@ -568,7 +569,7 @@
568#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 569#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
569#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243 572#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
572 573
573/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ 574/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
574#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 575#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
@@ -1145,7 +1146,7 @@
1145 1146
1146/* MC_CMD_PUTS_IN msgrequest */ 1147/* MC_CMD_PUTS_IN msgrequest */
1147#define MC_CMD_PUTS_IN_LENMIN 13 1148#define MC_CMD_PUTS_IN_LENMIN 13
1148#define MC_CMD_PUTS_IN_LENMAX 255 1149#define MC_CMD_PUTS_IN_LENMAX 252
1149#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) 1150#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
1150#define MC_CMD_PUTS_IN_DEST_OFST 0 1151#define MC_CMD_PUTS_IN_DEST_OFST 0
1151#define MC_CMD_PUTS_IN_UART_LBN 0 1152#define MC_CMD_PUTS_IN_UART_LBN 0
@@ -1157,7 +1158,7 @@
1157#define MC_CMD_PUTS_IN_STRING_OFST 12 1158#define MC_CMD_PUTS_IN_STRING_OFST 12
1158#define MC_CMD_PUTS_IN_STRING_LEN 1 1159#define MC_CMD_PUTS_IN_STRING_LEN 1
1159#define MC_CMD_PUTS_IN_STRING_MINNUM 1 1160#define MC_CMD_PUTS_IN_STRING_MINNUM 1
1160#define MC_CMD_PUTS_IN_STRING_MAXNUM 243 1161#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
1161 1162
1162/* MC_CMD_PUTS_OUT msgresponse */ 1163/* MC_CMD_PUTS_OUT msgresponse */
1163#define MC_CMD_PUTS_OUT_LEN 0 1164#define MC_CMD_PUTS_OUT_LEN 0
@@ -1947,12 +1948,12 @@
1947 1948
1948/* MC_CMD_NVRAM_READ_OUT msgresponse */ 1949/* MC_CMD_NVRAM_READ_OUT msgresponse */
1949#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 1950#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
1950#define MC_CMD_NVRAM_READ_OUT_LENMAX 255 1951#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
1951#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) 1952#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
1952#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 1953#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
1953#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 1954#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
1954#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 1955#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
1955#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255 1956#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
1956 1957
1957 1958
1958/***********************************/ 1959/***********************************/
@@ -1963,7 +1964,7 @@
1963 1964
1964/* MC_CMD_NVRAM_WRITE_IN msgrequest */ 1965/* MC_CMD_NVRAM_WRITE_IN msgrequest */
1965#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 1966#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
1966#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255 1967#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
1967#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) 1968#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
1968#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 1969#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
1969/* Enum values, see field(s): */ 1970/* Enum values, see field(s): */
@@ -1973,7 +1974,7 @@
1973#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 1974#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
1974#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 1975#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
1975#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 1976#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
1976#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243 1977#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
1977 1978
1978/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ 1979/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
1979#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 1980#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
@@ -2305,13 +2306,13 @@
2305 2306
2306/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ 2307/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
2307#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 2308#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
2308#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255 2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) 2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251 2315#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
2315 2316
2316 2317
2317/***********************************/ 2318/***********************************/
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 758148379b0e..08f825b71ac8 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -585,6 +585,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, 585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, 586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, 587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
588 [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
588}; 589};
589 590
590static int siena_mtd_probe_partition(struct efx_nic *efx, 591static int siena_mtd_probe_partition(struct efx_nic *efx,
@@ -598,7 +599,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
598 bool protected; 599 bool protected;
599 int rc; 600 int rc;
600 601
601 if (type >= ARRAY_SIZE(siena_nvram_types)) 602 if (type >= ARRAY_SIZE(siena_nvram_types) ||
603 siena_nvram_types[type].name == NULL)
602 return -ENODEV; 604 return -ENODEV;
603 605
604 info = &siena_nvram_types[type]; 606 info = &siena_nvram_types[type];
@@ -627,7 +629,8 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
627 struct efx_mtd *efx_mtd) 629 struct efx_mtd *efx_mtd)
628{ 630{
629 struct efx_mtd_partition *part; 631 struct efx_mtd_partition *part;
630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM]; 632 uint16_t fw_subtype_list[
633 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
631 int rc; 634 int rc;
632 635
633 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); 636 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cd9c0a989692..c1a010cda89b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -37,7 +37,7 @@
37 * 37 *
38 **************************************************************************/ 38 **************************************************************************/
39 39
40#define EFX_DRIVER_VERSION "3.1" 40#define EFX_DRIVER_VERSION "3.2"
41 41
42#ifdef DEBUG 42#ifdef DEBUG
43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -56,7 +56,8 @@
56#define EFX_MAX_CHANNELS 32U 56#define EFX_MAX_CHANNELS 32U
57#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 57#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58#define EFX_EXTRA_CHANNEL_IOV 0 58#define EFX_EXTRA_CHANNEL_IOV 0
59#define EFX_MAX_EXTRA_CHANNELS 1U 59#define EFX_EXTRA_CHANNEL_PTP 1
60#define EFX_MAX_EXTRA_CHANNELS 2U
60 61
61/* Checksum generation is a per-queue option in hardware, so each 62/* Checksum generation is a per-queue option in hardware, so each
62 * queue visible to the networking core is backed by two hardware TX 63 * queue visible to the networking core is backed by two hardware TX
@@ -68,6 +69,9 @@
68#define EFX_TXQ_TYPES 4 69#define EFX_TXQ_TYPES 4
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 71
72/* Forward declare Precision Time Protocol (PTP) support structure. */
73struct efx_ptp_data;
74
71struct efx_self_tests; 75struct efx_self_tests;
72 76
73/** 77/**
@@ -91,29 +95,31 @@ struct efx_special_buffer {
91}; 95};
92 96
93/** 97/**
94 * struct efx_tx_buffer - An Efx TX buffer 98 * struct efx_tx_buffer - buffer state for a TX descriptor
95 * @skb: The associated socket buffer. 99 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
96 * Set only on the final fragment of a packet; %NULL for all other 100 * freed when descriptor completes
97 * fragments. When this fragment completes, then we can free this 101 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
98 * skb. 102 * freed when descriptor completes.
99 * @tsoh: The associated TSO header structure, or %NULL if this
100 * buffer is not a TSO header.
101 * @dma_addr: DMA address of the fragment. 103 * @dma_addr: DMA address of the fragment.
104 * @flags: Flags for allocation and DMA mapping type
102 * @len: Length of this fragment. 105 * @len: Length of this fragment.
103 * This field is zero when the queue slot is empty. 106 * This field is zero when the queue slot is empty.
104 * @continuation: True if this fragment is not the end of a packet.
105 * @unmap_single: True if dma_unmap_single should be used.
106 * @unmap_len: Length of this fragment to unmap 107 * @unmap_len: Length of this fragment to unmap
107 */ 108 */
108struct efx_tx_buffer { 109struct efx_tx_buffer {
109 const struct sk_buff *skb; 110 union {
110 struct efx_tso_header *tsoh; 111 const struct sk_buff *skb;
112 void *heap_buf;
113 };
111 dma_addr_t dma_addr; 114 dma_addr_t dma_addr;
115 unsigned short flags;
112 unsigned short len; 116 unsigned short len;
113 bool continuation;
114 bool unmap_single;
115 unsigned short unmap_len; 117 unsigned short unmap_len;
116}; 118};
119#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
120#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
121#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
122#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
117 123
118/** 124/**
119 * struct efx_tx_queue - An Efx TX queue 125 * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +139,7 @@ struct efx_tx_buffer {
133 * @channel: The associated channel 139 * @channel: The associated channel
134 * @core_txq: The networking core TX queue structure 140 * @core_txq: The networking core TX queue structure
135 * @buffer: The software buffer ring 141 * @buffer: The software buffer ring
142 * @tsoh_page: Array of pages of TSO header buffers
136 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
137 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
138 * @initialised: Has hardware queue been initialised? 145 * @initialised: Has hardware queue been initialised?
@@ -156,9 +163,6 @@ struct efx_tx_buffer {
156 * variable indicates that the queue is full. This is to 163 * variable indicates that the queue is full. This is to
157 * avoid cache-line ping-pong between the xmit path and the 164 * avoid cache-line ping-pong between the xmit path and the
158 * completion path. 165 * completion path.
159 * @tso_headers_free: A list of TSO headers allocated for this TX queue
160 * that are not in use, and so available for new TSO sends. The list
161 * is protected by the TX queue lock.
162 * @tso_bursts: Number of times TSO xmit invoked by kernel 166 * @tso_bursts: Number of times TSO xmit invoked by kernel
163 * @tso_long_headers: Number of packets with headers too long for standard 167 * @tso_long_headers: Number of packets with headers too long for standard
164 * blocks 168 * blocks
@@ -175,6 +179,7 @@ struct efx_tx_queue {
175 struct efx_channel *channel; 179 struct efx_channel *channel;
176 struct netdev_queue *core_txq; 180 struct netdev_queue *core_txq;
177 struct efx_tx_buffer *buffer; 181 struct efx_tx_buffer *buffer;
182 struct efx_buffer *tsoh_page;
178 struct efx_special_buffer txd; 183 struct efx_special_buffer txd;
179 unsigned int ptr_mask; 184 unsigned int ptr_mask;
180 bool initialised; 185 bool initialised;
@@ -187,7 +192,6 @@ struct efx_tx_queue {
187 unsigned int insert_count ____cacheline_aligned_in_smp; 192 unsigned int insert_count ____cacheline_aligned_in_smp;
188 unsigned int write_count; 193 unsigned int write_count;
189 unsigned int old_read_count; 194 unsigned int old_read_count;
190 struct efx_tso_header *tso_headers_free;
191 unsigned int tso_bursts; 195 unsigned int tso_bursts;
192 unsigned int tso_long_headers; 196 unsigned int tso_long_headers;
193 unsigned int tso_packets; 197 unsigned int tso_packets;
@@ -242,6 +246,8 @@ struct efx_rx_page_state {
242/** 246/**
243 * struct efx_rx_queue - An Efx RX queue 247 * struct efx_rx_queue - An Efx RX queue
244 * @efx: The associated Efx NIC 248 * @efx: The associated Efx NIC
249 * @core_index: Index of network core RX queue. Will be >= 0 iff this
250 * is associated with a real RX queue.
245 * @buffer: The software buffer ring 251 * @buffer: The software buffer ring
246 * @rxd: The hardware descriptor ring 252 * @rxd: The hardware descriptor ring
247 * @ptr_mask: The size of the ring minus 1. 253 * @ptr_mask: The size of the ring minus 1.
@@ -263,6 +269,7 @@ struct efx_rx_page_state {
263 */ 269 */
264struct efx_rx_queue { 270struct efx_rx_queue {
265 struct efx_nic *efx; 271 struct efx_nic *efx;
272 int core_index;
266 struct efx_rx_buffer *buffer; 273 struct efx_rx_buffer *buffer;
267 struct efx_special_buffer rxd; 274 struct efx_special_buffer rxd;
268 unsigned int ptr_mask; 275 unsigned int ptr_mask;
@@ -390,14 +397,17 @@ struct efx_channel {
390 * @get_name: Generate the channel's name (used for its IRQ handler) 397 * @get_name: Generate the channel's name (used for its IRQ handler)
391 * @copy: Copy the channel state prior to reallocation. May be %NULL if 398 * @copy: Copy the channel state prior to reallocation. May be %NULL if
392 * reallocation is not supported. 399 * reallocation is not supported.
400 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
393 * @keep_eventq: Flag for whether event queue should be kept initialised 401 * @keep_eventq: Flag for whether event queue should be kept initialised
394 * while the device is stopped 402 * while the device is stopped
395 */ 403 */
396struct efx_channel_type { 404struct efx_channel_type {
397 void (*handle_no_channel)(struct efx_nic *); 405 void (*handle_no_channel)(struct efx_nic *);
398 int (*pre_probe)(struct efx_channel *); 406 int (*pre_probe)(struct efx_channel *);
407 void (*post_remove)(struct efx_channel *);
399 void (*get_name)(struct efx_channel *, char *buf, size_t len); 408 void (*get_name)(struct efx_channel *, char *buf, size_t len);
400 struct efx_channel *(*copy)(const struct efx_channel *); 409 struct efx_channel *(*copy)(const struct efx_channel *);
410 void (*receive_skb)(struct efx_channel *, struct sk_buff *);
401 bool keep_eventq; 411 bool keep_eventq;
402}; 412};
403 413
@@ -430,11 +440,9 @@ enum efx_int_mode {
430#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 440#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
431 441
432enum nic_state { 442enum nic_state {
433 STATE_INIT = 0, 443 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
434 STATE_RUNNING = 1, 444 STATE_READY = 1, /* hardware ready and netdev registered */
435 STATE_FINI = 2, 445 STATE_DISABLED = 2, /* device disabled due to hardware errors */
436 STATE_DISABLED = 3,
437 STATE_MAX,
438}; 446};
439 447
440/* 448/*
@@ -654,7 +662,7 @@ struct vfdi_status;
654 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 662 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
655 * @irq_rx_moderation: IRQ moderation time for RX event queues 663 * @irq_rx_moderation: IRQ moderation time for RX event queues
656 * @msg_enable: Log message enable flags 664 * @msg_enable: Log message enable flags
657 * @state: Device state flag. Serialised by the rtnl_lock. 665 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
658 * @reset_pending: Bitmask for pending resets 666 * @reset_pending: Bitmask for pending resets
659 * @tx_queue: TX DMA queues 667 * @tx_queue: TX DMA queues
660 * @rx_queue: RX DMA queues 668 * @rx_queue: RX DMA queues
@@ -664,6 +672,8 @@ struct vfdi_status;
664 * should be allocated for this NIC 672 * should be allocated for this NIC
665 * @rxq_entries: Size of receive queues requested by user. 673 * @rxq_entries: Size of receive queues requested by user.
666 * @txq_entries: Size of transmit queues requested by user. 674 * @txq_entries: Size of transmit queues requested by user.
675 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
676 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
667 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 677 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
668 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 678 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
669 * @sram_lim_qw: Qword address limit of SRAM 679 * @sram_lim_qw: Qword address limit of SRAM
@@ -730,6 +740,7 @@ struct vfdi_status;
730 * %local_addr_list. Protected by %local_lock. 740 * %local_addr_list. Protected by %local_lock.
731 * @local_lock: Mutex protecting %local_addr_list and %local_page_list. 741 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
732 * @peer_work: Work item to broadcast peer addresses to VMs. 742 * @peer_work: Work item to broadcast peer addresses to VMs.
743 * @ptp_data: PTP state data
733 * @monitor_work: Hardware monitor workitem 744 * @monitor_work: Hardware monitor workitem
734 * @biu_lock: BIU (bus interface unit) lock 745 * @biu_lock: BIU (bus interface unit) lock
735 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 746 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -774,6 +785,9 @@ struct efx_nic {
774 785
775 unsigned rxq_entries; 786 unsigned rxq_entries;
776 unsigned txq_entries; 787 unsigned txq_entries;
788 unsigned int txq_stop_thresh;
789 unsigned int txq_wake_thresh;
790
777 unsigned tx_dc_base; 791 unsigned tx_dc_base;
778 unsigned rx_dc_base; 792 unsigned rx_dc_base;
779 unsigned sram_lim_qw; 793 unsigned sram_lim_qw;
@@ -854,6 +868,10 @@ struct efx_nic {
854 struct work_struct peer_work; 868 struct work_struct peer_work;
855#endif 869#endif
856 870
871#ifdef CONFIG_SFC_PTP
872 struct efx_ptp_data *ptp_data;
873#endif
874
857 /* The following fields may be written more often */ 875 /* The following fields may be written more often */
858 876
859 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 877 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -1044,7 +1062,7 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
1044 1062
1045static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) 1063static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
1046{ 1064{
1047 return channel->channel < channel->efx->n_rx_channels; 1065 return channel->rx_queue.core_index >= 0;
1048} 1066}
1049 1067
1050static inline struct efx_rx_queue * 1068static inline struct efx_rx_queue *
@@ -1116,5 +1134,13 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
1116#define EFX_MAX_FRAME_LEN(mtu) \ 1134#define EFX_MAX_FRAME_LEN(mtu) \
1117 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) 1135 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1118 1136
1137static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
1138{
1139 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
1140}
1141static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1142{
1143 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1144}
1119 1145
1120#endif /* EFX_NET_DRIVER_H */ 1146#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 326d799762d6..cdff40b65729 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
298/************************************************************************** 298/**************************************************************************
299 * 299 *
300 * Generic buffer handling 300 * Generic buffer handling
301 * These buffers are used for interrupt status and MAC stats 301 * These buffers are used for interrupt status, MAC stats, etc.
302 * 302 *
303 **************************************************************************/ 303 **************************************************************************/
304 304
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
401 ++tx_queue->write_count; 401 ++tx_queue->write_count;
402 402
403 /* Create TX descriptor ring entry */ 403 /* Create TX descriptor ring entry */
404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
404 EFX_POPULATE_QWORD_4(*txd, 405 EFX_POPULATE_QWORD_4(*txd,
405 FSF_AZ_TX_KER_CONT, buffer->continuation, 406 FSF_AZ_TX_KER_CONT,
407 buffer->flags & EFX_TX_BUF_CONT,
406 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
407 FSF_AZ_TX_KER_BUF_REGION, 0, 409 FSF_AZ_TX_KER_BUF_REGION, 0,
408 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index bab5cd9f5740..438cef11f727 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -11,6 +11,7 @@
11#ifndef EFX_NIC_H 11#ifndef EFX_NIC_H
12#define EFX_NIC_H 12#define EFX_NIC_H
13 13
14#include <linux/net_tstamp.h>
14#include <linux/i2c-algo-bit.h> 15#include <linux/i2c-algo-bit.h>
15#include "net_driver.h" 16#include "net_driver.h"
16#include "efx.h" 17#include "efx.h"
@@ -250,6 +251,41 @@ extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
250extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, 251extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
251 bool spoofchk); 252 bool spoofchk);
252 253
254struct ethtool_ts_info;
255#ifdef CONFIG_SFC_PTP
256extern void efx_ptp_probe(struct efx_nic *efx);
257extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
258extern int efx_ptp_get_ts_info(struct net_device *net_dev,
259 struct ethtool_ts_info *ts_info);
260extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
261extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
262extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
263#else
264static inline void efx_ptp_probe(struct efx_nic *efx) {}
265static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
266{
267 return -EOPNOTSUPP;
268}
269static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
270 struct ethtool_ts_info *ts_info)
271{
272 ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
273 SOF_TIMESTAMPING_RX_SOFTWARE);
274 ts_info->phc_index = -1;
275
276 return 0;
277}
278static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
279{
280 return false;
281}
282static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
283{
284 return NETDEV_TX_OK;
285}
286static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
287#endif
288
253extern const struct efx_nic_type falcon_a1_nic_type; 289extern const struct efx_nic_type falcon_a1_nic_type;
254extern const struct efx_nic_type falcon_b0_nic_type; 290extern const struct efx_nic_type falcon_b0_nic_type;
255extern const struct efx_nic_type siena_a0_nic_type; 291extern const struct efx_nic_type siena_a0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
new file mode 100644
index 000000000000..5b3dd028ce85
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -0,0 +1,1484 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/* Theory of operation:
11 *
12 * PTP support is assisted by firmware running on the MC, which provides
13 * the hardware timestamping capabilities. Both transmitted and received
14 * PTP event packets are queued onto internal queues for subsequent processing;
15 * this is because the MC operations are relatively long and would block
16 * block NAPI/interrupt operation.
17 *
18 * Receive event processing:
19 * The event contains the packet's UUID and sequence number, together
20 * with the hardware timestamp. The PTP receive packet queue is searched
21 * for this UUID/sequence number and, if found, put on a pending queue.
22 * Packets not matching are delivered without timestamps (MCDI events will
23 * always arrive after the actual packet).
24 * It is important for the operation of the PTP protocol that the ordering
25 * of packets between the event and general port is maintained.
26 *
27 * Work queue processing:
28 * If work waiting, synchronise host/hardware time
29 *
30 * Transmit: send packet through MC, which returns the transmission time
31 * that is converted to an appropriate timestamp.
32 *
33 * Receive: the packet's reception time is converted to an appropriate
34 * timestamp.
35 */
36#include <linux/ip.h>
37#include <linux/udp.h>
38#include <linux/time.h>
39#include <linux/ktime.h>
40#include <linux/module.h>
41#include <linux/net_tstamp.h>
42#include <linux/pps_kernel.h>
43#include <linux/ptp_clock_kernel.h>
44#include "net_driver.h"
45#include "efx.h"
46#include "mcdi.h"
47#include "mcdi_pcol.h"
48#include "io.h"
49#include "regs.h"
50#include "nic.h"
51
52/* Maximum number of events expected to make up a PTP event */
53#define MAX_EVENT_FRAGS 3
54
55/* Maximum delay, ms, to begin synchronisation */
56#define MAX_SYNCHRONISE_WAIT_MS 2
57
58/* How long, at most, to spend synchronising */
59#define SYNCHRONISE_PERIOD_NS 250000
60
61/* How often to update the shared memory time */
62#define SYNCHRONISATION_GRANULARITY_NS 200
63
64/* Minimum permitted length of a (corrected) synchronisation time */
65#define MIN_SYNCHRONISATION_NS 120
66
67/* Maximum permitted length of a (corrected) synchronisation time */
68#define MAX_SYNCHRONISATION_NS 1000
69
70/* How many (MC) receive events that can be queued */
71#define MAX_RECEIVE_EVENTS 8
72
73/* Length of (modified) moving average. */
74#define AVERAGE_LENGTH 16
75
76/* How long an unmatched event or packet can be held */
77#define PKT_EVENT_LIFETIME_MS 10
78
79/* Offsets into PTP packet for identification. These offsets are from the
80 * start of the IP header, not the MAC header. Note that neither PTP V1 nor
81 * PTP V2 permit the use of IPV4 options.
82 */
83#define PTP_DPORT_OFFSET 22
84
85#define PTP_V1_VERSION_LENGTH 2
86#define PTP_V1_VERSION_OFFSET 28
87
88#define PTP_V1_UUID_LENGTH 6
89#define PTP_V1_UUID_OFFSET 50
90
91#define PTP_V1_SEQUENCE_LENGTH 2
92#define PTP_V1_SEQUENCE_OFFSET 58
93
94/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
95 * includes IP header.
96 */
97#define PTP_V1_MIN_LENGTH 64
98
99#define PTP_V2_VERSION_LENGTH 1
100#define PTP_V2_VERSION_OFFSET 29
101
102/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
103 * the MC only captures the last six bytes of the clock identity. These values
104 * reflect those, not the ones used in the standard. The standard permits
105 * mapping of V1 UUIDs to V2 UUIDs with these same values.
106 */
107#define PTP_V2_MC_UUID_LENGTH 6
108#define PTP_V2_MC_UUID_OFFSET 50
109
110#define PTP_V2_SEQUENCE_LENGTH 2
111#define PTP_V2_SEQUENCE_OFFSET 58
112
113/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
114 * includes IP header.
115 */
116#define PTP_V2_MIN_LENGTH 63
117
118#define PTP_MIN_LENGTH 63
119
120#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
121#define PTP_EVENT_PORT 319
122#define PTP_GENERAL_PORT 320
123
124/* Annoyingly the format of the version numbers are different between
125 * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
126 */
127#define PTP_VERSION_V1 1
128
129#define PTP_VERSION_V2 2
130#define PTP_VERSION_V2_MASK 0x0f
131
132enum ptp_packet_state {
133 PTP_PACKET_STATE_UNMATCHED = 0,
134 PTP_PACKET_STATE_MATCHED,
135 PTP_PACKET_STATE_TIMED_OUT,
136 PTP_PACKET_STATE_MATCH_UNWANTED
137};
138
139/* NIC synchronised with single word of time only comprising
140 * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
141 */
142#define MC_NANOSECOND_BITS 30
143#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1)
144#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
145
146/* Maximum parts-per-billion adjustment that is acceptable */
147#define MAX_PPB 1000000
148
149/* Number of bits required to hold the above */
150#define MAX_PPB_BITS 20
151
152/* Number of extra bits allowed when calculating fractional ns.
153 * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
154 * be less than 63.
155 */
156#define PPB_EXTRA_BITS 2
157
158/* Precalculate scale word to avoid long long division at runtime */
159#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
160 MAX_PPB_BITS)) / 1000000000LL)
161
162#define PTP_SYNC_ATTEMPTS 4
163
164/**
165 * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
166 * @words: UUID and (partial) sequence number
167 * @expiry: Time after which the packet should be delivered irrespective of
168 * event arrival.
169 * @state: The state of the packet - whether it is ready for processing or
170 * whether that is of no interest.
171 */
172struct efx_ptp_match {
173 u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
174 unsigned long expiry;
175 enum ptp_packet_state state;
176};
177
178/**
179 * struct efx_ptp_event_rx - A PTP receive event (from MC)
180 * @seq0: First part of (PTP) UUID
181 * @seq1: Second part of (PTP) UUID and sequence number
182 * @hwtimestamp: Event timestamp
183 */
184struct efx_ptp_event_rx {
185 struct list_head link;
186 u32 seq0;
187 u32 seq1;
188 ktime_t hwtimestamp;
189 unsigned long expiry;
190};
191
192/**
193 * struct efx_ptp_timeset - Synchronisation between host and MC
194 * @host_start: Host time immediately before hardware timestamp taken
195 * @seconds: Hardware timestamp, seconds
196 * @nanoseconds: Hardware timestamp, nanoseconds
197 * @host_end: Host time immediately after hardware timestamp taken
198 * @waitns: Number of nanoseconds between hardware timestamp being read and
199 * host end time being seen
200 * @window: Difference of host_end and host_start
201 * @valid: Whether this timeset is valid
202 */
203struct efx_ptp_timeset {
204 u32 host_start;
205 u32 seconds;
206 u32 nanoseconds;
207 u32 host_end;
208 u32 waitns;
209 u32 window; /* Derived: end - start, allowing for wrap */
210};
211
212/**
213 * struct efx_ptp_data - Precision Time Protocol (PTP) state
214 * @channel: The PTP channel
215 * @rxq: Receive queue (awaiting timestamps)
216 * @txq: Transmit queue
217 * @evt_list: List of MC receive events awaiting packets
218 * @evt_free_list: List of free events
219 * @evt_lock: Lock for manipulating evt_list and evt_free_list
220 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
221 * @workwq: Work queue for processing pending PTP operations
222 * @work: Work task
223 * @reset_required: A serious error has occurred and the PTP task needs to be
224 * reset (disable, enable).
225 * @rxfilter_event: Receive filter when operating
226 * @rxfilter_general: Receive filter when operating
227 * @config: Current timestamp configuration
228 * @enabled: PTP operation enabled
229 * @mode: Mode in which PTP operating (PTP version)
230 * @evt_frags: Partly assembled PTP events
231 * @evt_frag_idx: Current fragment number
232 * @evt_code: Last event code
233 * @start: Address at which MC indicates ready for synchronisation
234 * @host_time_pps: Host time at last PPS
235 * @last_sync_ns: Last number of nanoseconds between readings when synchronising
236 * @base_sync_ns: Number of nanoseconds for last synchronisation.
237 * @base_sync_valid: Whether base_sync_time is valid.
238 * @current_adjfreq: Current ppb adjustment.
239 * @phc_clock: Pointer to registered phc device
240 * @phc_clock_info: Registration structure for phc device
241 * @pps_work: pps work task for handling pps events
242 * @pps_workwq: pps work queue
243 * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
244 * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
245 * allocations in main data path).
246 * @debug_ptp_dir: PTP debugfs directory
247 * @missed_rx_sync: Number of packets received without syncrhonisation.
248 * @good_syncs: Number of successful synchronisations.
249 * @no_time_syncs: Number of synchronisations with no good times.
250 * @bad_sync_durations: Number of synchronisations with bad durations.
251 * @bad_syncs: Number of failed synchronisations.
252 * @last_sync_time: Number of nanoseconds for last synchronisation.
253 * @sync_timeouts: Number of synchronisation timeouts
254 * @fast_syncs: Number of synchronisations requiring short delay
255 * @min_sync_delta: Minimum time between event and synchronisation
256 * @max_sync_delta: Maximum time between event and synchronisation
257 * @average_sync_delta: Average time between event and synchronisation.
258 * Modified moving average.
259 * @last_sync_delta: Last time between event and synchronisation
260 * @mc_stats: Context value for MC statistics
261 * @timeset: Last set of synchronisation statistics.
262 */
263struct efx_ptp_data {
264 struct efx_channel *channel;
265 struct sk_buff_head rxq;
266 struct sk_buff_head txq;
267 struct list_head evt_list;
268 struct list_head evt_free_list;
269 spinlock_t evt_lock;
270 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
271 struct workqueue_struct *workwq;
272 struct work_struct work;
273 bool reset_required;
274 u32 rxfilter_event;
275 u32 rxfilter_general;
276 bool rxfilter_installed;
277 struct hwtstamp_config config;
278 bool enabled;
279 unsigned int mode;
280 efx_qword_t evt_frags[MAX_EVENT_FRAGS];
281 int evt_frag_idx;
282 int evt_code;
283 struct efx_buffer start;
284 struct pps_event_time host_time_pps;
285 unsigned last_sync_ns;
286 unsigned base_sync_ns;
287 bool base_sync_valid;
288 s64 current_adjfreq;
289 struct ptp_clock *phc_clock;
290 struct ptp_clock_info phc_clock_info;
291 struct work_struct pps_work;
292 struct workqueue_struct *pps_workwq;
293 bool nic_ts_enabled;
294 u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
295 MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
296 struct efx_ptp_timeset
297 timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
298};
299
300static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
301static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
302static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
303static int efx_phc_settime(struct ptp_clock_info *ptp,
304 const struct timespec *e_ts);
305static int efx_phc_enable(struct ptp_clock_info *ptp,
306 struct ptp_clock_request *request, int on);
307
308/* Enable MCDI PTP support. */
309static int efx_ptp_enable(struct efx_nic *efx)
310{
311 u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
312
313 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
314 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
315 efx->ptp_data->channel->channel);
316 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
317
318 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
319 NULL, 0, NULL);
320}
321
322/* Disable MCDI PTP support.
323 *
324 * Note that this function should never rely on the presence of ptp_data -
325 * may be called before that exists.
326 */
327static int efx_ptp_disable(struct efx_nic *efx)
328{
329 u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
330
331 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
332 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
333 NULL, 0, NULL);
334}
335
336static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
337{
338 struct sk_buff *skb;
339
340 while ((skb = skb_dequeue(q))) {
341 local_bh_disable();
342 netif_receive_skb(skb);
343 local_bh_enable();
344 }
345}
346
347static void efx_ptp_handle_no_channel(struct efx_nic *efx)
348{
349 netif_err(efx, drv, efx->net_dev,
350 "ERROR: PTP requires MSI-X and 1 additional interrupt"
351 "vector. PTP disabled\n");
352}
353
354/* Repeatedly send the host time to the MC which will capture the hardware
355 * time.
356 */
357static void efx_ptp_send_times(struct efx_nic *efx,
358 struct pps_event_time *last_time)
359{
360 struct pps_event_time now;
361 struct timespec limit;
362 struct efx_ptp_data *ptp = efx->ptp_data;
363 struct timespec start;
364 int *mc_running = ptp->start.addr;
365
366 pps_get_ts(&now);
367 start = now.ts_real;
368 limit = now.ts_real;
369 timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
370
371 /* Write host time for specified period or until MC is done */
372 while ((timespec_compare(&now.ts_real, &limit) < 0) &&
373 ACCESS_ONCE(*mc_running)) {
374 struct timespec update_time;
375 unsigned int host_time;
376
377 /* Don't update continuously to avoid saturating the PCIe bus */
378 update_time = now.ts_real;
379 timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
380 do {
381 pps_get_ts(&now);
382 } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
383 ACCESS_ONCE(*mc_running));
384
385 /* Synchronise NIC with single word of time only */
386 host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
387 now.ts_real.tv_nsec);
388 /* Update host time in NIC memory */
389 _efx_writed(efx, cpu_to_le32(host_time),
390 FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
391 }
392 *last_time = now;
393}
394
395/* Read a timeset from the MC's results and partial process. */
396static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
397{
398 unsigned start_ns, end_ns;
399
400 timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
401 timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
402 timeset->nanoseconds = MCDI_DWORD(data,
403 PTP_OUT_SYNCHRONIZE_NANOSECONDS);
404 timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
405 timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
406
407 /* Ignore seconds */
408 start_ns = timeset->host_start & MC_NANOSECOND_MASK;
409 end_ns = timeset->host_end & MC_NANOSECOND_MASK;
410 /* Allow for rollover */
411 if (end_ns < start_ns)
412 end_ns += NSEC_PER_SEC;
413 /* Determine duration of operation */
414 timeset->window = end_ns - start_ns;
415}
416
417/* Process times received from MC.
418 *
419 * Extract times from returned results, and establish the minimum value
420 * seen. The minimum value represents the "best" possible time and events
421 * too much greater than this are rejected - the machine is, perhaps, too
422 * busy. A number of readings are taken so that, hopefully, at least one good
423 * synchronisation will be seen in the results.
424 */
425static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
426 size_t response_length,
427 const struct pps_event_time *last_time)
428{
429 unsigned number_readings = (response_length /
430 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
431 unsigned i;
432 unsigned min;
433 unsigned min_set = 0;
434 unsigned total;
435 unsigned ngood = 0;
436 unsigned last_good = 0;
437 struct efx_ptp_data *ptp = efx->ptp_data;
438 bool min_valid = false;
439 u32 last_sec;
440 u32 start_sec;
441 struct timespec delta;
442
443 if (number_readings == 0)
444 return -EAGAIN;
445
446 /* Find minimum value in this set of results, discarding clearly
447 * erroneous results.
448 */
449 for (i = 0; i < number_readings; i++) {
450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
452 if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
453 if (min_valid) {
454 if (ptp->timeset[i].window < min_set)
455 min_set = ptp->timeset[i].window;
456 } else {
457 min_valid = true;
458 min_set = ptp->timeset[i].window;
459 }
460 }
461 }
462
463 if (min_valid) {
464 if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
465 min = ptp->base_sync_ns;
466 else
467 min = min_set;
468 } else {
469 min = SYNCHRONISATION_GRANULARITY_NS;
470 }
471
472 /* Discard excessively long synchronise durations. The MC times
473 * when it finishes reading the host time so the corrected window
474 * time should be fairly constant for a given platform.
475 */
476 total = 0;
477 for (i = 0; i < number_readings; i++)
478 if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
479 unsigned win;
480
481 win = ptp->timeset[i].window - ptp->timeset[i].waitns;
482 if (win >= MIN_SYNCHRONISATION_NS &&
483 win < MAX_SYNCHRONISATION_NS) {
484 total += ptp->timeset[i].window;
485 ngood++;
486 last_good = i;
487 }
488 }
489
490 if (ngood == 0) {
491 netif_warn(efx, drv, efx->net_dev,
492 "PTP no suitable synchronisations %dns %dns\n",
493 ptp->base_sync_ns, min_set);
494 return -EAGAIN;
495 }
496
497 /* Average minimum this synchronisation */
498 ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
499 if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
500 ptp->base_sync_valid = true;
501 ptp->base_sync_ns = ptp->last_sync_ns;
502 }
503
504 /* Calculate delay from actual PPS to last_time */
505 delta.tv_nsec =
506 ptp->timeset[last_good].nanoseconds +
507 last_time->ts_real.tv_nsec -
508 (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
509
510 /* It is possible that the seconds rolled over between taking
511 * the start reading and the last value written by the host. The
512 * timescales are such that a gap of more than one second is never
513 * expected.
514 */
515 start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
516 last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
517 if (start_sec != last_sec) {
518 if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
519 netif_warn(efx, hw, efx->net_dev,
520 "PTP bad synchronisation seconds\n");
521 return -EAGAIN;
522 } else {
523 delta.tv_sec = 1;
524 }
525 } else {
526 delta.tv_sec = 0;
527 }
528
529 ptp->host_time_pps = *last_time;
530 pps_sub_ts(&ptp->host_time_pps, delta);
531
532 return 0;
533}
534
535/* Synchronize times between the host and the MC */
536static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
537{
538 struct efx_ptp_data *ptp = efx->ptp_data;
539 u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
540 size_t response_length;
541 int rc;
542 unsigned long timeout;
543 struct pps_event_time last_time = {};
544 unsigned int loops = 0;
545 int *start = ptp->start.addr;
546
547 MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
548 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
549 num_readings);
550 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
551 (u32)ptp->start.dma_addr);
552 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
553 (u32)((u64)ptp->start.dma_addr >> 32));
554
555 /* Clear flag that signals MC ready */
556 ACCESS_ONCE(*start) = 0;
557 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
558 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
559
560 /* Wait for start from MCDI (or timeout) */
561 timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
562 while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
563 udelay(20); /* Usually start MCDI execution quickly */
564 loops++;
565 }
566
567 if (ACCESS_ONCE(*start))
568 efx_ptp_send_times(efx, &last_time);
569
570 /* Collect results */
571 rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
572 MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
573 synch_buf, sizeof(synch_buf),
574 &response_length);
575 if (rc == 0)
576 rc = efx_ptp_process_times(efx, synch_buf, response_length,
577 &last_time);
578
579 return rc;
580}
581
582/* Transmit a PTP packet, via the MCDI interface, to the wire. */
583static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
584{
585 u8 *txbuf = efx->ptp_data->txbuf;
586 struct skb_shared_hwtstamps timestamps;
587 int rc = -EIO;
588 /* MCDI driver requires word aligned lengths */
589 size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
590 u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
591
592 MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
593 MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
594 if (skb_shinfo(skb)->nr_frags != 0) {
595 rc = skb_linearize(skb);
596 if (rc != 0)
597 goto fail;
598 }
599
600 if (skb->ip_summed == CHECKSUM_PARTIAL) {
601 rc = skb_checksum_help(skb);
602 if (rc != 0)
603 goto fail;
604 }
605 skb_copy_from_linear_data(skb,
606 &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
607 len);
608 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
609 sizeof(txtime), &len);
610 if (rc != 0)
611 goto fail;
612
613 memset(&timestamps, 0, sizeof(timestamps));
614 timestamps.hwtstamp = ktime_set(
615 MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
616 MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
617
618 skb_tstamp_tx(skb, &timestamps);
619
620 rc = 0;
621
622fail:
623 dev_kfree_skb(skb);
624
625 return rc;
626}
627
628static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
629{
630 struct efx_ptp_data *ptp = efx->ptp_data;
631 struct list_head *cursor;
632 struct list_head *next;
633
634 /* Drop time-expired events */
635 spin_lock_bh(&ptp->evt_lock);
636 if (!list_empty(&ptp->evt_list)) {
637 list_for_each_safe(cursor, next, &ptp->evt_list) {
638 struct efx_ptp_event_rx *evt;
639
640 evt = list_entry(cursor, struct efx_ptp_event_rx,
641 link);
642 if (time_after(jiffies, evt->expiry)) {
643 list_del(&evt->link);
644 list_add(&evt->link, &ptp->evt_free_list);
645 netif_warn(efx, hw, efx->net_dev,
646 "PTP rx event dropped\n");
647 }
648 }
649 }
650 spin_unlock_bh(&ptp->evt_lock);
651}
652
653static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
654 struct sk_buff *skb)
655{
656 struct efx_ptp_data *ptp = efx->ptp_data;
657 bool evts_waiting;
658 struct list_head *cursor;
659 struct list_head *next;
660 struct efx_ptp_match *match;
661 enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
662
663 spin_lock_bh(&ptp->evt_lock);
664 evts_waiting = !list_empty(&ptp->evt_list);
665 spin_unlock_bh(&ptp->evt_lock);
666
667 if (!evts_waiting)
668 return PTP_PACKET_STATE_UNMATCHED;
669
670 match = (struct efx_ptp_match *)skb->cb;
671 /* Look for a matching timestamp in the event queue */
672 spin_lock_bh(&ptp->evt_lock);
673 list_for_each_safe(cursor, next, &ptp->evt_list) {
674 struct efx_ptp_event_rx *evt;
675
676 evt = list_entry(cursor, struct efx_ptp_event_rx, link);
677 if ((evt->seq0 == match->words[0]) &&
678 (evt->seq1 == match->words[1])) {
679 struct skb_shared_hwtstamps *timestamps;
680
681 /* Match - add in hardware timestamp */
682 timestamps = skb_hwtstamps(skb);
683 timestamps->hwtstamp = evt->hwtimestamp;
684
685 match->state = PTP_PACKET_STATE_MATCHED;
686 rc = PTP_PACKET_STATE_MATCHED;
687 list_del(&evt->link);
688 list_add(&evt->link, &ptp->evt_free_list);
689 break;
690 }
691 }
692 spin_unlock_bh(&ptp->evt_lock);
693
694 return rc;
695}
696
697/* Process any queued receive events and corresponding packets
698 *
699 * q is returned with all the packets that are ready for delivery.
700 * true is returned if at least one of those packets requires
701 * synchronisation.
702 */
703static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
704{
705 struct efx_ptp_data *ptp = efx->ptp_data;
706 bool rc = false;
707 struct sk_buff *skb;
708
709 while ((skb = skb_dequeue(&ptp->rxq))) {
710 struct efx_ptp_match *match;
711
712 match = (struct efx_ptp_match *)skb->cb;
713 if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
714 __skb_queue_tail(q, skb);
715 } else if (efx_ptp_match_rx(efx, skb) ==
716 PTP_PACKET_STATE_MATCHED) {
717 rc = true;
718 __skb_queue_tail(q, skb);
719 } else if (time_after(jiffies, match->expiry)) {
720 match->state = PTP_PACKET_STATE_TIMED_OUT;
721 netif_warn(efx, rx_err, efx->net_dev,
722 "PTP packet - no timestamp seen\n");
723 __skb_queue_tail(q, skb);
724 } else {
725 /* Replace unprocessed entry and stop */
726 skb_queue_head(&ptp->rxq, skb);
727 break;
728 }
729 }
730
731 return rc;
732}
733
734/* Complete processing of a received packet */
735static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
736{
737 local_bh_disable();
738 netif_receive_skb(skb);
739 local_bh_enable();
740}
741
742static int efx_ptp_start(struct efx_nic *efx)
743{
744 struct efx_ptp_data *ptp = efx->ptp_data;
745 struct efx_filter_spec rxfilter;
746 int rc;
747
748 ptp->reset_required = false;
749
750 /* Must filter on both event and general ports to ensure
751 * that there is no packet re-ordering.
752 */
753 efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
754 efx_rx_queue_index(
755 efx_channel_get_rx_queue(ptp->channel)));
756 rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
757 htonl(PTP_ADDRESS),
758 htons(PTP_EVENT_PORT));
759 if (rc != 0)
760 return rc;
761
762 rc = efx_filter_insert_filter(efx, &rxfilter, true);
763 if (rc < 0)
764 return rc;
765 ptp->rxfilter_event = rc;
766
767 efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
768 efx_rx_queue_index(
769 efx_channel_get_rx_queue(ptp->channel)));
770 rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
771 htonl(PTP_ADDRESS),
772 htons(PTP_GENERAL_PORT));
773 if (rc != 0)
774 goto fail;
775
776 rc = efx_filter_insert_filter(efx, &rxfilter, true);
777 if (rc < 0)
778 goto fail;
779 ptp->rxfilter_general = rc;
780
781 rc = efx_ptp_enable(efx);
782 if (rc != 0)
783 goto fail2;
784
785 ptp->evt_frag_idx = 0;
786 ptp->current_adjfreq = 0;
787 ptp->rxfilter_installed = true;
788
789 return 0;
790
791fail2:
792 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
793 ptp->rxfilter_general);
794fail:
795 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
796 ptp->rxfilter_event);
797
798 return rc;
799}
800
801static int efx_ptp_stop(struct efx_nic *efx)
802{
803 struct efx_ptp_data *ptp = efx->ptp_data;
804 int rc = efx_ptp_disable(efx);
805 struct list_head *cursor;
806 struct list_head *next;
807
808 if (ptp->rxfilter_installed) {
809 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
810 ptp->rxfilter_general);
811 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
812 ptp->rxfilter_event);
813 ptp->rxfilter_installed = false;
814 }
815
816 /* Make sure RX packets are really delivered */
817 efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
818 skb_queue_purge(&efx->ptp_data->txq);
819
820 /* Drop any pending receive events */
821 spin_lock_bh(&efx->ptp_data->evt_lock);
822 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
823 list_del(cursor);
824 list_add(cursor, &efx->ptp_data->evt_free_list);
825 }
826 spin_unlock_bh(&efx->ptp_data->evt_lock);
827
828 return rc;
829}
830
831static void efx_ptp_pps_worker(struct work_struct *work)
832{
833 struct efx_ptp_data *ptp =
834 container_of(work, struct efx_ptp_data, pps_work);
835 struct efx_nic *efx = ptp->channel->efx;
836 struct ptp_clock_event ptp_evt;
837
838 if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
839 return;
840
841 ptp_evt.type = PTP_CLOCK_PPSUSR;
842 ptp_evt.pps_times = ptp->host_time_pps;
843 ptp_clock_event(ptp->phc_clock, &ptp_evt);
844}
845
846/* Process any pending transmissions and timestamp any received packets.
847 */
848static void efx_ptp_worker(struct work_struct *work)
849{
850 struct efx_ptp_data *ptp_data =
851 container_of(work, struct efx_ptp_data, work);
852 struct efx_nic *efx = ptp_data->channel->efx;
853 struct sk_buff *skb;
854 struct sk_buff_head tempq;
855
856 if (ptp_data->reset_required) {
857 efx_ptp_stop(efx);
858 efx_ptp_start(efx);
859 return;
860 }
861
862 efx_ptp_drop_time_expired_events(efx);
863
864 __skb_queue_head_init(&tempq);
865 if (efx_ptp_process_events(efx, &tempq) ||
866 !skb_queue_empty(&ptp_data->txq)) {
867
868 while ((skb = skb_dequeue(&ptp_data->txq)))
869 efx_ptp_xmit_skb(efx, skb);
870 }
871
872 while ((skb = __skb_dequeue(&tempq)))
873 efx_ptp_process_rx(efx, skb);
874}
875
876/* Initialise PTP channel and state.
877 *
878 * Setting core_index to zero causes the queue to be initialised and doesn't
879 * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
880 */
881static int efx_ptp_probe_channel(struct efx_channel *channel)
882{
883 struct efx_nic *efx = channel->efx;
884 struct efx_ptp_data *ptp;
885 int rc = 0;
886 unsigned int pos;
887
888 channel->irq_moderation = 0;
889 channel->rx_queue.core_index = 0;
890
891 ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
892 efx->ptp_data = ptp;
893 if (!efx->ptp_data)
894 return -ENOMEM;
895
896 rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
897 if (rc != 0)
898 goto fail1;
899
900 ptp->channel = channel;
901 skb_queue_head_init(&ptp->rxq);
902 skb_queue_head_init(&ptp->txq);
903 ptp->workwq = create_singlethread_workqueue("sfc_ptp");
904 if (!ptp->workwq) {
905 rc = -ENOMEM;
906 goto fail2;
907 }
908
909 INIT_WORK(&ptp->work, efx_ptp_worker);
910 ptp->config.flags = 0;
911 ptp->config.tx_type = HWTSTAMP_TX_OFF;
912 ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
913 INIT_LIST_HEAD(&ptp->evt_list);
914 INIT_LIST_HEAD(&ptp->evt_free_list);
915 spin_lock_init(&ptp->evt_lock);
916 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
917 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
918
919 ptp->phc_clock_info.owner = THIS_MODULE;
920 snprintf(ptp->phc_clock_info.name,
921 sizeof(ptp->phc_clock_info.name),
922 "%pm", efx->net_dev->perm_addr);
923 ptp->phc_clock_info.max_adj = MAX_PPB;
924 ptp->phc_clock_info.n_alarm = 0;
925 ptp->phc_clock_info.n_ext_ts = 0;
926 ptp->phc_clock_info.n_per_out = 0;
927 ptp->phc_clock_info.pps = 1;
928 ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
929 ptp->phc_clock_info.adjtime = efx_phc_adjtime;
930 ptp->phc_clock_info.gettime = efx_phc_gettime;
931 ptp->phc_clock_info.settime = efx_phc_settime;
932 ptp->phc_clock_info.enable = efx_phc_enable;
933
934 ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
935 &efx->pci_dev->dev);
936 if (!ptp->phc_clock)
937 goto fail3;
938
939 INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
940 ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
941 if (!ptp->pps_workwq) {
942 rc = -ENOMEM;
943 goto fail4;
944 }
945 ptp->nic_ts_enabled = false;
946
947 return 0;
948fail4:
949 ptp_clock_unregister(efx->ptp_data->phc_clock);
950
951fail3:
952 destroy_workqueue(efx->ptp_data->workwq);
953
954fail2:
955 efx_nic_free_buffer(efx, &ptp->start);
956
957fail1:
958 kfree(efx->ptp_data);
959 efx->ptp_data = NULL;
960
961 return rc;
962}
963
964static void efx_ptp_remove_channel(struct efx_channel *channel)
965{
966 struct efx_nic *efx = channel->efx;
967
968 if (!efx->ptp_data)
969 return;
970
971 (void)efx_ptp_disable(channel->efx);
972
973 cancel_work_sync(&efx->ptp_data->work);
974 cancel_work_sync(&efx->ptp_data->pps_work);
975
976 skb_queue_purge(&efx->ptp_data->rxq);
977 skb_queue_purge(&efx->ptp_data->txq);
978
979 ptp_clock_unregister(efx->ptp_data->phc_clock);
980
981 destroy_workqueue(efx->ptp_data->workwq);
982 destroy_workqueue(efx->ptp_data->pps_workwq);
983
984 efx_nic_free_buffer(efx, &efx->ptp_data->start);
985 kfree(efx->ptp_data);
986}
987
988static void efx_ptp_get_channel_name(struct efx_channel *channel,
989 char *buf, size_t len)
990{
991 snprintf(buf, len, "%s-ptp", channel->efx->name);
992}
993
994/* Determine whether this packet should be processed by the PTP module
995 * or transmitted conventionally.
996 */
997bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
998{
999 return efx->ptp_data &&
1000 efx->ptp_data->enabled &&
1001 skb->len >= PTP_MIN_LENGTH &&
1002 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
1003 likely(skb->protocol == htons(ETH_P_IP)) &&
1004 ip_hdr(skb)->protocol == IPPROTO_UDP &&
1005 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
1006}
1007
1008/* Receive a PTP packet. Packets are queued until the arrival of
1009 * the receive timestamp from the MC - this will probably occur after the
1010 * packet arrival because of the processing in the MC.
1011 */
1012static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1013{
1014 struct efx_nic *efx = channel->efx;
1015 struct efx_ptp_data *ptp = efx->ptp_data;
1016 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
1017 u8 *data;
1018 unsigned int version;
1019
1020 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1021
1022 /* Correct version? */
1023 if (ptp->mode == MC_CMD_PTP_MODE_V1) {
1024 if (skb->len < PTP_V1_MIN_LENGTH) {
1025 netif_receive_skb(skb);
1026 return;
1027 }
1028 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
1029 if (version != PTP_VERSION_V1) {
1030 netif_receive_skb(skb);
1031 return;
1032 }
1033 } else {
1034 if (skb->len < PTP_V2_MIN_LENGTH) {
1035 netif_receive_skb(skb);
1036 return;
1037 }
1038 version = skb->data[PTP_V2_VERSION_OFFSET];
1039
1040 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
1041 BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
1042 BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
1043 BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
1044 BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
1045
1046 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
1047 netif_receive_skb(skb);
1048 return;
1049 }
1050 }
1051
1052 /* Does this packet require timestamping? */
1053 if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
1054 struct skb_shared_hwtstamps *timestamps;
1055
1056 match->state = PTP_PACKET_STATE_UNMATCHED;
1057
1058 /* Clear all timestamps held: filled in later */
1059 timestamps = skb_hwtstamps(skb);
1060 memset(timestamps, 0, sizeof(*timestamps));
1061
1062 /* Extract UUID/Sequence information */
1063 data = skb->data + PTP_V1_UUID_OFFSET;
1064 match->words[0] = (data[0] |
1065 (data[1] << 8) |
1066 (data[2] << 16) |
1067 (data[3] << 24));
1068 match->words[1] = (data[4] |
1069 (data[5] << 8) |
1070 (skb->data[PTP_V1_SEQUENCE_OFFSET +
1071 PTP_V1_SEQUENCE_LENGTH - 1] <<
1072 16));
1073 } else {
1074 match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
1075 }
1076
1077 skb_queue_tail(&ptp->rxq, skb);
1078 queue_work(ptp->workwq, &ptp->work);
1079}
1080
1081/* Transmit a PTP packet. This has to be transmitted by the MC
1082 * itself, through an MCDI call. MCDI calls aren't permitted
1083 * in the transmit path so defer the actual transmission to a suitable worker.
1084 */
1085int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
1086{
1087 struct efx_ptp_data *ptp = efx->ptp_data;
1088
1089 skb_queue_tail(&ptp->txq, skb);
1090
1091 if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
1092 (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
1093 efx_xmit_hwtstamp_pending(skb);
1094 queue_work(ptp->workwq, &ptp->work);
1095
1096 return NETDEV_TX_OK;
1097}
1098
1099static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1100 unsigned int new_mode)
1101{
1102 if ((enable_wanted != efx->ptp_data->enabled) ||
1103 (enable_wanted && (efx->ptp_data->mode != new_mode))) {
1104 int rc;
1105
1106 if (enable_wanted) {
1107 /* Change of mode requires disable */
1108 if (efx->ptp_data->enabled &&
1109 (efx->ptp_data->mode != new_mode)) {
1110 efx->ptp_data->enabled = false;
1111 rc = efx_ptp_stop(efx);
1112 if (rc != 0)
1113 return rc;
1114 }
1115
1116 /* Set new operating mode and establish
1117 * baseline synchronisation, which must
1118 * succeed.
1119 */
1120 efx->ptp_data->mode = new_mode;
1121 rc = efx_ptp_start(efx);
1122 if (rc == 0) {
1123 rc = efx_ptp_synchronize(efx,
1124 PTP_SYNC_ATTEMPTS * 2);
1125 if (rc != 0)
1126 efx_ptp_stop(efx);
1127 }
1128 } else {
1129 rc = efx_ptp_stop(efx);
1130 }
1131
1132 if (rc != 0)
1133 return rc;
1134
1135 efx->ptp_data->enabled = enable_wanted;
1136 }
1137
1138 return 0;
1139}
1140
1141static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1142{
1143 bool enable_wanted = false;
1144 unsigned int new_mode;
1145 int rc;
1146
1147 if (init->flags)
1148 return -EINVAL;
1149
1150 if ((init->tx_type != HWTSTAMP_TX_OFF) &&
1151 (init->tx_type != HWTSTAMP_TX_ON))
1152 return -ERANGE;
1153
1154 new_mode = efx->ptp_data->mode;
1155 /* Determine whether any PTP HW operations are required */
1156 switch (init->rx_filter) {
1157 case HWTSTAMP_FILTER_NONE:
1158 break;
1159 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1160 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1161 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1162 init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1163 new_mode = MC_CMD_PTP_MODE_V1;
1164 enable_wanted = true;
1165 break;
1166 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1167 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1168 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1169 /* Although these three are accepted only IPV4 packets will be
1170 * timestamped
1171 */
1172 init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
1173 new_mode = MC_CMD_PTP_MODE_V2;
1174 enable_wanted = true;
1175 break;
1176 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1177 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1178 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1179 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1180 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1181 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1182 /* Non-IP + IPv6 timestamping not supported */
1183 return -ERANGE;
1184 break;
1185 default:
1186 return -ERANGE;
1187 }
1188
1189 if (init->tx_type != HWTSTAMP_TX_OFF)
1190 enable_wanted = true;
1191
1192 rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
1193 if (rc != 0)
1194 return rc;
1195
1196 efx->ptp_data->config = *init;
1197
1198 return 0;
1199}
1200
1201int
1202efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
1203{
1204 struct efx_nic *efx = netdev_priv(net_dev);
1205 struct efx_ptp_data *ptp = efx->ptp_data;
1206
1207 if (!ptp)
1208 return -EOPNOTSUPP;
1209
1210 ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE |
1211 SOF_TIMESTAMPING_RX_HARDWARE |
1212 SOF_TIMESTAMPING_RAW_HARDWARE);
1213 ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
1214 ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
1215 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
1216 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1217 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
1218 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
1219 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
1220 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
1221 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1222 return 0;
1223}
1224
1225int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
1226{
1227 struct hwtstamp_config config;
1228 int rc;
1229
1230 /* Not a PTP enabled port */
1231 if (!efx->ptp_data)
1232 return -EOPNOTSUPP;
1233
1234 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1235 return -EFAULT;
1236
1237 rc = efx_ptp_ts_init(efx, &config);
1238 if (rc != 0)
1239 return rc;
1240
1241 return copy_to_user(ifr->ifr_data, &config, sizeof(config))
1242 ? -EFAULT : 0;
1243}
1244
1245static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
1246{
1247 struct efx_ptp_data *ptp = efx->ptp_data;
1248
1249 netif_err(efx, hw, efx->net_dev,
1250 "PTP unexpected event length: got %d expected %d\n",
1251 ptp->evt_frag_idx, expected_frag_len);
1252 ptp->reset_required = true;
1253 queue_work(ptp->workwq, &ptp->work);
1254}
1255
1256/* Process a completed receive event. Put it on the event queue and
1257 * start worker thread. This is required because event and their
1258 * correspoding packets may come in either order.
1259 */
1260static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1261{
1262 struct efx_ptp_event_rx *evt = NULL;
1263
1264 if (ptp->evt_frag_idx != 3) {
1265 ptp_event_failure(efx, 3);
1266 return;
1267 }
1268
1269 spin_lock_bh(&ptp->evt_lock);
1270 if (!list_empty(&ptp->evt_free_list)) {
1271 evt = list_first_entry(&ptp->evt_free_list,
1272 struct efx_ptp_event_rx, link);
1273 list_del(&evt->link);
1274
1275 evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
1276 evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
1277 MCDI_EVENT_SRC) |
1278 (EFX_QWORD_FIELD(ptp->evt_frags[1],
1279 MCDI_EVENT_SRC) << 8) |
1280 (EFX_QWORD_FIELD(ptp->evt_frags[0],
1281 MCDI_EVENT_SRC) << 16));
1282 evt->hwtimestamp = ktime_set(
1283 EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
1284 EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
1285 evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1286 list_add_tail(&evt->link, &ptp->evt_list);
1287
1288 queue_work(ptp->workwq, &ptp->work);
1289 } else {
1290 netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
1291 }
1292 spin_unlock_bh(&ptp->evt_lock);
1293}
1294
1295static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
1296{
1297 int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
1298 if (ptp->evt_frag_idx != 1) {
1299 ptp_event_failure(efx, 1);
1300 return;
1301 }
1302
1303 netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
1304}
1305
1306static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
1307{
1308 if (ptp->nic_ts_enabled)
1309 queue_work(ptp->pps_workwq, &ptp->pps_work);
1310}
1311
1312void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
1313{
1314 struct efx_ptp_data *ptp = efx->ptp_data;
1315 int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
1316
1317 if (!ptp->enabled)
1318 return;
1319
1320 if (ptp->evt_frag_idx == 0) {
1321 ptp->evt_code = code;
1322 } else if (ptp->evt_code != code) {
1323 netif_err(efx, hw, efx->net_dev,
1324 "PTP out of sequence event %d\n", code);
1325 ptp->evt_frag_idx = 0;
1326 }
1327
1328 ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
1329 if (!MCDI_EVENT_FIELD(*ev, CONT)) {
1330 /* Process resulting event */
1331 switch (code) {
1332 case MCDI_EVENT_CODE_PTP_RX:
1333 ptp_event_rx(efx, ptp);
1334 break;
1335 case MCDI_EVENT_CODE_PTP_FAULT:
1336 ptp_event_fault(efx, ptp);
1337 break;
1338 case MCDI_EVENT_CODE_PTP_PPS:
1339 ptp_event_pps(efx, ptp);
1340 break;
1341 default:
1342 netif_err(efx, hw, efx->net_dev,
1343 "PTP unknown event %d\n", code);
1344 break;
1345 }
1346 ptp->evt_frag_idx = 0;
1347 } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
1348 netif_err(efx, hw, efx->net_dev,
1349 "PTP too many event fragments\n");
1350 ptp->evt_frag_idx = 0;
1351 }
1352}
1353
1354static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1355{
1356 struct efx_ptp_data *ptp_data = container_of(ptp,
1357 struct efx_ptp_data,
1358 phc_clock_info);
1359 struct efx_nic *efx = ptp_data->channel->efx;
1360 u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
1361 s64 adjustment_ns;
1362 int rc;
1363
1364 if (delta > MAX_PPB)
1365 delta = MAX_PPB;
1366 else if (delta < -MAX_PPB)
1367 delta = -MAX_PPB;
1368
1369 /* Convert ppb to fixed point ns. */
1370 adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
1371 (PPB_EXTRA_BITS + MAX_PPB_BITS));
1372
1373 MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1374 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
1375 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
1376 (u32)(adjustment_ns >> 32));
1377 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
1378 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
1379 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
1380 NULL, 0, NULL);
1381 if (rc != 0)
1382 return rc;
1383
1384 ptp_data->current_adjfreq = delta;
1385 return 0;
1386}
1387
1388static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1389{
1390 struct efx_ptp_data *ptp_data = container_of(ptp,
1391 struct efx_ptp_data,
1392 phc_clock_info);
1393 struct efx_nic *efx = ptp_data->channel->efx;
1394 struct timespec delta_ts = ns_to_timespec(delta);
1395 u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
1396
1397 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1398 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
1399 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
1400 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1401 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1402 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1403 NULL, 0, NULL);
1404}
1405
1406static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
1407{
1408 struct efx_ptp_data *ptp_data = container_of(ptp,
1409 struct efx_ptp_data,
1410 phc_clock_info);
1411 struct efx_nic *efx = ptp_data->channel->efx;
1412 u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
1413 u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
1414 int rc;
1415
1416 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
1417
1418 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1419 outbuf, sizeof(outbuf), NULL);
1420 if (rc != 0)
1421 return rc;
1422
1423 ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
1424 ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
1425 return 0;
1426}
1427
1428static int efx_phc_settime(struct ptp_clock_info *ptp,
1429 const struct timespec *e_ts)
1430{
1431 /* Get the current NIC time, efx_phc_gettime.
1432 * Subtract from the desired time to get the offset
1433 * call efx_phc_adjtime with the offset
1434 */
1435 int rc;
1436 struct timespec time_now;
1437 struct timespec delta;
1438
1439 rc = efx_phc_gettime(ptp, &time_now);
1440 if (rc != 0)
1441 return rc;
1442
1443 delta = timespec_sub(*e_ts, time_now);
1444
1445 efx_phc_adjtime(ptp, timespec_to_ns(&delta));
1446 if (rc != 0)
1447 return rc;
1448
1449 return 0;
1450}
1451
1452static int efx_phc_enable(struct ptp_clock_info *ptp,
1453 struct ptp_clock_request *request,
1454 int enable)
1455{
1456 struct efx_ptp_data *ptp_data = container_of(ptp,
1457 struct efx_ptp_data,
1458 phc_clock_info);
1459 if (request->type != PTP_CLK_REQ_PPS)
1460 return -EOPNOTSUPP;
1461
1462 ptp_data->nic_ts_enabled = !!enable;
1463 return 0;
1464}
1465
1466static const struct efx_channel_type efx_ptp_channel_type = {
1467 .handle_no_channel = efx_ptp_handle_no_channel,
1468 .pre_probe = efx_ptp_probe_channel,
1469 .post_remove = efx_ptp_remove_channel,
1470 .get_name = efx_ptp_get_channel_name,
1471 /* no copy operation; there is no need to reallocate this channel */
1472 .receive_skb = efx_ptp_rx,
1473 .keep_eventq = false,
1474};
1475
1476void efx_ptp_probe(struct efx_nic *efx)
1477{
1478 /* Check whether PTP is implemented on this NIC. The DISABLE
1479 * operation will succeed if and only if it is implemented.
1480 */
1481 if (efx_ptp_disable(efx) == 0)
1482 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
1483 &efx_ptp_channel_type;
1484}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 719319b89d7a..9e0ad1b75c33 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
481 481
482 skb_record_rx_queue(skb, channel->channel); 482 skb_record_rx_queue(skb, channel->rx_queue.core_index);
483 483
484 gro_result = napi_gro_frags(napi); 484 gro_result = napi_gro_frags(napi);
485 } else { 485 } else {
@@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel,
571 /* Set the SKB flags */ 571 /* Set the SKB flags */
572 skb_checksum_none_assert(skb); 572 skb_checksum_none_assert(skb);
573 573
574 /* Record the rx_queue */
575 skb_record_rx_queue(skb, channel->rx_queue.core_index);
576
574 /* Pass the packet up */ 577 /* Pass the packet up */
575 netif_receive_skb(skb); 578 if (channel->type->receive_skb)
579 channel->type->receive_skb(channel, skb);
580 else
581 netif_receive_skb(skb);
576 582
577 /* Update allocation strategy method */ 583 /* Update allocation strategy method */
578 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 584 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
608 * at the ethernet header */ 614 * at the ethernet header */
609 skb->protocol = eth_type_trans(skb, efx->net_dev); 615 skb->protocol = eth_type_trans(skb, efx->net_dev);
610 616
611 skb_record_rx_queue(skb, channel->channel); 617 skb_record_rx_queue(skb, channel->rx_queue.core_index);
612 } 618 }
613 619
614 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 620 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
615 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 621 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
616 622
617 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) 623 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
624 !channel->type->receive_skb)
618 efx_rx_packet_gro(channel, rx_buf, eh); 625 efx_rx_packet_gro(channel, rx_buf, eh);
619 else 626 else
620 efx_rx_deliver(channel, rx_buf); 627 efx_rx_deliver(channel, rx_buf);
@@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
624{ 631{
625 enum efx_rx_alloc_method method = rx_alloc_method; 632 enum efx_rx_alloc_method method = rx_alloc_method;
626 633
634 if (channel->type->receive_skb) {
635 channel->rx_alloc_push_pages = false;
636 return;
637 }
638
627 /* Only makes sense to use page based allocation if GRO is enabled */ 639 /* Only makes sense to use page based allocation if GRO is enabled */
628 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 640 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
629 method = RX_ALLOC_METHOD_SKB; 641 method = RX_ALLOC_METHOD_SKB;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 96068d15b601..ce72ae4f399f 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -614,7 +614,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
614{ 614{
615 enum efx_loopback_mode mode; 615 enum efx_loopback_mode mode;
616 struct efx_loopback_state *state; 616 struct efx_loopback_state *state;
617 struct efx_channel *channel = efx_get_channel(efx, 0); 617 struct efx_channel *channel =
618 efx_get_channel(efx, efx->tx_channel_offset);
618 struct efx_tx_queue *tx_queue; 619 struct efx_tx_queue *tx_queue;
619 int rc = 0; 620 int rc = 0;
620 621
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 6bafd216e55e..84b41bf08a38 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -335,6 +335,7 @@ static int siena_probe_nic(struct efx_nic *efx)
335 goto fail5; 335 goto fail5;
336 336
337 efx_sriov_probe(efx); 337 efx_sriov_probe(efx);
338 efx_ptp_probe(efx);
338 339
339 return 0; 340 return 0;
340 341
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9cb3b84ecae9..d49b53dc2a50 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -21,6 +21,9 @@
21/* Number of longs required to track all the VIs in a VF */ 21/* Number of longs required to track all the VIs in a VF */
22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) 22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
23 23
24/* Maximum number of RX queues supported */
25#define VF_MAX_RX_QUEUES 63
26
24/** 27/**
25 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour 28 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
26 * @VF_TX_FILTER_OFF: Disabled 29 * @VF_TX_FILTER_OFF: Disabled
@@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
578 efx_oword_t reg; 581 efx_oword_t reg;
579 582
580 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || 583 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
584 vf_rxq >= VF_MAX_RX_QUEUES ||
581 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { 585 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
582 if (net_ratelimit()) 586 if (net_ratelimit())
583 netif_err(efx, hw, efx->net_dev, 587 netif_err(efx, hw, efx->net_dev,
@@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
683 __le32 *rxqs; 687 __le32 *rxqs;
684 int rc; 688 int rc;
685 689
690 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
691 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
692
686 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL); 693 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
687 if (rxqs == NULL) 694 if (rxqs == NULL)
688 return VFDI_RC_ENOMEM; 695 return VFDI_RC_ENOMEM;
@@ -1028,6 +1035,7 @@ efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
1028static const struct efx_channel_type efx_sriov_channel_type = { 1035static const struct efx_channel_type efx_sriov_channel_type = {
1029 .handle_no_channel = efx_sriov_handle_no_channel, 1036 .handle_no_channel = efx_sriov_handle_no_channel,
1030 .pre_probe = efx_sriov_probe_channel, 1037 .pre_probe = efx_sriov_probe_channel,
1038 .post_remove = efx_channel_dummy_op_void,
1031 .get_name = efx_sriov_get_channel_name, 1039 .get_name = efx_sriov_get_channel_name,
1032 /* no copy operation; channel must not be reallocated */ 1040 /* no copy operation; channel must not be reallocated */
1033 .keep_eventq = true, 1041 .keep_eventq = true,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 18713436b443..5e090e54298e 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,14 +22,6 @@
22#include "nic.h" 22#include "nic.h"
23#include "workarounds.h" 23#include "workarounds.h"
24 24
25/*
26 * TX descriptor ring full threshold
27 *
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 25static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer, 26 struct efx_tx_buffer *buffer,
35 unsigned int *pkts_compl, 27 unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 31 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 32 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len); 33 buffer->unmap_len);
42 if (buffer->unmap_single) 34 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 35 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
44 DMA_TO_DEVICE); 36 DMA_TO_DEVICE);
45 else 37 else
46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 38 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
47 DMA_TO_DEVICE); 39 DMA_TO_DEVICE);
48 buffer->unmap_len = 0; 40 buffer->unmap_len = 0;
49 buffer->unmap_single = false;
50 } 41 }
51 42
52 if (buffer->skb) { 43 if (buffer->flags & EFX_TX_BUF_SKB) {
53 (*pkts_compl)++; 44 (*pkts_compl)++;
54 (*bytes_compl) += buffer->skb->len; 45 (*bytes_compl) += buffer->skb->len;
55 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 46 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
56 buffer->skb = NULL;
57 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 47 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58 "TX queue %d transmission id %x complete\n", 48 "TX queue %d transmission id %x complete\n",
59 tx_queue->queue, tx_queue->read_count); 49 tx_queue->queue, tx_queue->read_count);
50 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
51 kfree(buffer->heap_buf);
60 } 52 }
61}
62 53
63/** 54 buffer->len = 0;
64 * struct efx_tso_header - a DMA mapped buffer for packet headers 55 buffer->flags = 0;
65 * @next: Linked list of free ones. 56}
66 * The list is protected by the TX queue lock.
67 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
68 * @dma_addr: The DMA address of the header below.
69 *
70 * This controls the memory used for a TSO header. Use TSOH_DATA()
71 * to find the packet header data. Use TSOH_SIZE() to calculate the
72 * total size required for a given packet header length. TSO headers
73 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
74 */
75struct efx_tso_header {
76 union {
77 struct efx_tso_header *next;
78 size_t unmap_len;
79 };
80 dma_addr_t dma_addr;
81};
82 57
83static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 58static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
84 struct sk_buff *skb); 59 struct sk_buff *skb);
85static void efx_fini_tso(struct efx_tx_queue *tx_queue);
86static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
87 struct efx_tso_header *tsoh);
88
89static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90 struct efx_tx_buffer *buffer)
91{
92 if (buffer->tsoh) {
93 if (likely(!buffer->tsoh->unmap_len)) {
94 buffer->tsoh->next = tx_queue->tso_headers_free;
95 tx_queue->tso_headers_free = buffer->tsoh;
96 } else {
97 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
98 }
99 buffer->tsoh = NULL;
100 }
101}
102
103 60
104static inline unsigned 61static inline unsigned
105efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 62efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
138 return max_descs; 95 return max_descs;
139} 96}
140 97
98/* Get partner of a TX queue, seen as part of the same net core queue */
99static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
100{
101 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
102 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
103 else
104 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
105}
106
107static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
108{
109 /* We need to consider both queues that the net core sees as one */
110 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
111 struct efx_nic *efx = txq1->efx;
112 unsigned int fill_level;
113
114 fill_level = max(txq1->insert_count - txq1->old_read_count,
115 txq2->insert_count - txq2->old_read_count);
116 if (likely(fill_level < efx->txq_stop_thresh))
117 return;
118
119 /* We used the stale old_read_count above, which gives us a
120 * pessimistic estimate of the fill level (which may even
121 * validly be >= efx->txq_entries). Now try again using
122 * read_count (more likely to be a cache miss).
123 *
124 * If we read read_count and then conditionally stop the
125 * queue, it is possible for the completion path to race with
126 * us and complete all outstanding descriptors in the middle,
127 * after which there will be no more completions to wake it.
128 * Therefore we stop the queue first, then read read_count
129 * (with a memory barrier to ensure the ordering), then
130 * restart the queue if the fill level turns out to be low
131 * enough.
132 */
133 netif_tx_stop_queue(txq1->core_txq);
134 smp_mb();
135 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
136 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
137
138 fill_level = max(txq1->insert_count - txq1->old_read_count,
139 txq2->insert_count - txq2->old_read_count);
140 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
141 if (likely(fill_level < efx->txq_stop_thresh)) {
142 smp_mb();
143 if (likely(!efx->loopback_selftest))
144 netif_tx_start_queue(txq1->core_txq);
145 }
146}
147
141/* 148/*
142 * Add a socket buffer to a TX queue 149 * Add a socket buffer to a TX queue
143 * 150 *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
151 * This function is split out from efx_hard_start_xmit to allow the 158 * This function is split out from efx_hard_start_xmit to allow the
152 * loopback test to direct packets via specific TX queues. 159 * loopback test to direct packets via specific TX queues.
153 * 160 *
154 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 161 * Returns NETDEV_TX_OK.
155 * You must hold netif_tx_lock() to call this function. 162 * You must hold netif_tx_lock() to call this function.
156 */ 163 */
157netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 164netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
160 struct device *dma_dev = &efx->pci_dev->dev; 167 struct device *dma_dev = &efx->pci_dev->dev;
161 struct efx_tx_buffer *buffer; 168 struct efx_tx_buffer *buffer;
162 skb_frag_t *fragment; 169 skb_frag_t *fragment;
163 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 170 unsigned int len, unmap_len = 0, insert_ptr;
164 dma_addr_t dma_addr, unmap_addr = 0; 171 dma_addr_t dma_addr, unmap_addr = 0;
165 unsigned int dma_len; 172 unsigned int dma_len;
166 bool unmap_single; 173 unsigned short dma_flags;
167 int q_space, i = 0; 174 int i = 0;
168 netdev_tx_t rc = NETDEV_TX_OK;
169 175
170 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 176 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
171 177
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
183 return NETDEV_TX_OK; 189 return NETDEV_TX_OK;
184 } 190 }
185 191
186 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
187 q_space = efx->txq_entries - 1 - fill_level;
188
189 /* Map for DMA. Use dma_map_single rather than dma_map_page 192 /* Map for DMA. Use dma_map_single rather than dma_map_page
190 * since this is more efficient on machines with sparse 193 * since this is more efficient on machines with sparse
191 * memory. 194 * memory.
192 */ 195 */
193 unmap_single = true; 196 dma_flags = EFX_TX_BUF_MAP_SINGLE;
194 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 197 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
195 198
196 /* Process all fragments */ 199 /* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
205 208
206 /* Add to TX queue, splitting across DMA boundaries */ 209 /* Add to TX queue, splitting across DMA boundaries */
207 do { 210 do {
208 if (unlikely(q_space-- <= 0)) {
209 /* It might be that completions have
210 * happened since the xmit path last
211 * checked. Update the xmit path's
212 * copy of read_count.
213 */
214 netif_tx_stop_queue(tx_queue->core_txq);
215 /* This memory barrier protects the
216 * change of queue state from the access
217 * of read_count. */
218 smp_mb();
219 tx_queue->old_read_count =
220 ACCESS_ONCE(tx_queue->read_count);
221 fill_level = (tx_queue->insert_count
222 - tx_queue->old_read_count);
223 q_space = efx->txq_entries - 1 - fill_level;
224 if (unlikely(q_space-- <= 0)) {
225 rc = NETDEV_TX_BUSY;
226 goto unwind;
227 }
228 smp_mb();
229 if (likely(!efx->loopback_selftest))
230 netif_tx_start_queue(
231 tx_queue->core_txq);
232 }
233
234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
235 buffer = &tx_queue->buffer[insert_ptr]; 212 buffer = &tx_queue->buffer[insert_ptr];
236 efx_tsoh_free(tx_queue, buffer); 213 EFX_BUG_ON_PARANOID(buffer->flags);
237 EFX_BUG_ON_PARANOID(buffer->tsoh);
238 EFX_BUG_ON_PARANOID(buffer->skb);
239 EFX_BUG_ON_PARANOID(buffer->len); 214 EFX_BUG_ON_PARANOID(buffer->len);
240 EFX_BUG_ON_PARANOID(!buffer->continuation);
241 EFX_BUG_ON_PARANOID(buffer->unmap_len); 215 EFX_BUG_ON_PARANOID(buffer->unmap_len);
242 216
243 dma_len = efx_max_tx_len(efx, dma_addr); 217 dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
247 /* Fill out per descriptor fields */ 221 /* Fill out per descriptor fields */
248 buffer->len = dma_len; 222 buffer->len = dma_len;
249 buffer->dma_addr = dma_addr; 223 buffer->dma_addr = dma_addr;
224 buffer->flags = EFX_TX_BUF_CONT;
250 len -= dma_len; 225 len -= dma_len;
251 dma_addr += dma_len; 226 dma_addr += dma_len;
252 ++tx_queue->insert_count; 227 ++tx_queue->insert_count;
253 } while (len); 228 } while (len);
254 229
255 /* Transfer ownership of the unmapping to the final buffer */ 230 /* Transfer ownership of the unmapping to the final buffer */
256 buffer->unmap_single = unmap_single; 231 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
257 buffer->unmap_len = unmap_len; 232 buffer->unmap_len = unmap_len;
258 unmap_len = 0; 233 unmap_len = 0;
259 234
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
264 len = skb_frag_size(fragment); 239 len = skb_frag_size(fragment);
265 i++; 240 i++;
266 /* Map for DMA */ 241 /* Map for DMA */
267 unmap_single = false; 242 dma_flags = 0;
268 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 243 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
269 DMA_TO_DEVICE); 244 DMA_TO_DEVICE);
270 } 245 }
271 246
272 /* Transfer ownership of the skb to the final buffer */ 247 /* Transfer ownership of the skb to the final buffer */
273 buffer->skb = skb; 248 buffer->skb = skb;
274 buffer->continuation = false; 249 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
275 250
276 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 251 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
277 252
278 /* Pass off to hardware */ 253 /* Pass off to hardware */
279 efx_nic_push_buffers(tx_queue); 254 efx_nic_push_buffers(tx_queue);
280 255
256 efx_tx_maybe_stop_queue(tx_queue);
257
281 return NETDEV_TX_OK; 258 return NETDEV_TX_OK;
282 259
283 dma_err: 260 dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
289 /* Mark the packet as transmitted, and free the SKB ourselves */ 266 /* Mark the packet as transmitted, and free the SKB ourselves */
290 dev_kfree_skb_any(skb); 267 dev_kfree_skb_any(skb);
291 268
292 unwind:
293 /* Work backwards until we hit the original insert pointer value */ 269 /* Work backwards until we hit the original insert pointer value */
294 while (tx_queue->insert_count != tx_queue->write_count) { 270 while (tx_queue->insert_count != tx_queue->write_count) {
295 unsigned int pkts_compl = 0, bytes_compl = 0; 271 unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
297 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
298 buffer = &tx_queue->buffer[insert_ptr]; 274 buffer = &tx_queue->buffer[insert_ptr];
299 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
300 buffer->len = 0;
301 } 276 }
302 277
303 /* Free the fragment we were mid-way through pushing */ 278 /* Free the fragment we were mid-way through pushing */
304 if (unmap_len) { 279 if (unmap_len) {
305 if (unmap_single) 280 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
306 dma_unmap_single(dma_dev, unmap_addr, unmap_len, 281 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
307 DMA_TO_DEVICE); 282 DMA_TO_DEVICE);
308 else 283 else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
310 DMA_TO_DEVICE); 285 DMA_TO_DEVICE);
311 } 286 }
312 287
313 return rc; 288 return NETDEV_TX_OK;
314} 289}
315 290
316/* Remove packets from the TX queue 291/* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
340 } 315 }
341 316
342 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 317 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
343 buffer->continuation = true;
344 buffer->len = 0;
345 318
346 ++tx_queue->read_count; 319 ++tx_queue->read_count;
347 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 320 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -366,6 +339,12 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
366 339
367 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 340 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
368 341
342 /* PTP "event" packet */
343 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
344 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
345 return efx_ptp_tx(efx, skb);
346 }
347
369 index = skb_get_queue_mapping(skb); 348 index = skb_get_queue_mapping(skb);
370 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 349 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
371 if (index >= efx->n_tx_channels) { 350 if (index >= efx->n_tx_channels) {
@@ -450,6 +429,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
450{ 429{
451 unsigned fill_level; 430 unsigned fill_level;
452 struct efx_nic *efx = tx_queue->efx; 431 struct efx_nic *efx = tx_queue->efx;
432 struct efx_tx_queue *txq2;
453 unsigned int pkts_compl = 0, bytes_compl = 0; 433 unsigned int pkts_compl = 0, bytes_compl = 0;
454 434
455 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 435 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +437,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
457 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 437 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
458 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 438 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
459 439
460 /* See if we need to restart the netif queue. This barrier 440 /* See if we need to restart the netif queue. This memory
461 * separates the update of read_count from the test of the 441 * barrier ensures that we write read_count (inside
462 * queue state. */ 442 * efx_dequeue_buffers()) before reading the queue status.
443 */
463 smp_mb(); 444 smp_mb();
464 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 445 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
465 likely(efx->port_enabled) && 446 likely(efx->port_enabled) &&
466 likely(netif_device_present(efx->net_dev))) { 447 likely(netif_device_present(efx->net_dev))) {
467 fill_level = tx_queue->insert_count - tx_queue->read_count; 448 txq2 = efx_tx_queue_partner(tx_queue);
468 if (fill_level < EFX_TXQ_THRESHOLD(efx)) 449 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
450 txq2->insert_count - txq2->read_count);
451 if (fill_level <= efx->txq_wake_thresh)
469 netif_tx_wake_queue(tx_queue->core_txq); 452 netif_tx_wake_queue(tx_queue->core_txq);
470 } 453 }
471 454
@@ -480,11 +463,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
480 } 463 }
481} 464}
482 465
466/* Size of page-based TSO header buffers. Larger blocks must be
467 * allocated from the heap.
468 */
469#define TSOH_STD_SIZE 128
470#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
471
472/* At most half the descriptors in the queue at any time will refer to
473 * a TSO header buffer, since they must always be followed by a
474 * payload descriptor referring to an skb.
475 */
476static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
477{
478 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
479}
480
483int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 481int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
484{ 482{
485 struct efx_nic *efx = tx_queue->efx; 483 struct efx_nic *efx = tx_queue->efx;
486 unsigned int entries; 484 unsigned int entries;
487 int i, rc; 485 int rc;
488 486
489 /* Create the smallest power-of-two aligned ring */ 487 /* Create the smallest power-of-two aligned ring */
490 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 488 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +498,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
500 GFP_KERNEL); 498 GFP_KERNEL);
501 if (!tx_queue->buffer) 499 if (!tx_queue->buffer)
502 return -ENOMEM; 500 return -ENOMEM;
503 for (i = 0; i <= tx_queue->ptr_mask; ++i) 501
504 tx_queue->buffer[i].continuation = true; 502 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
503 tx_queue->tsoh_page =
504 kcalloc(efx_tsoh_page_count(tx_queue),
505 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
506 if (!tx_queue->tsoh_page) {
507 rc = -ENOMEM;
508 goto fail1;
509 }
510 }
505 511
506 /* Allocate hardware ring */ 512 /* Allocate hardware ring */
507 rc = efx_nic_probe_tx(tx_queue); 513 rc = efx_nic_probe_tx(tx_queue);
508 if (rc) 514 if (rc)
509 goto fail; 515 goto fail2;
510 516
511 return 0; 517 return 0;
512 518
513 fail: 519fail2:
520 kfree(tx_queue->tsoh_page);
521 tx_queue->tsoh_page = NULL;
522fail1:
514 kfree(tx_queue->buffer); 523 kfree(tx_queue->buffer);
515 tx_queue->buffer = NULL; 524 tx_queue->buffer = NULL;
516 return rc; 525 return rc;
@@ -546,8 +555,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
546 unsigned int pkts_compl = 0, bytes_compl = 0; 555 unsigned int pkts_compl = 0, bytes_compl = 0;
547 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 556 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
548 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 557 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
549 buffer->continuation = true;
550 buffer->len = 0;
551 558
552 ++tx_queue->read_count; 559 ++tx_queue->read_count;
553 } 560 }
@@ -568,13 +575,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
568 efx_nic_fini_tx(tx_queue); 575 efx_nic_fini_tx(tx_queue);
569 576
570 efx_release_tx_buffers(tx_queue); 577 efx_release_tx_buffers(tx_queue);
571
572 /* Free up TSO header cache */
573 efx_fini_tso(tx_queue);
574} 578}
575 579
576void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 580void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
577{ 581{
582 int i;
583
578 if (!tx_queue->buffer) 584 if (!tx_queue->buffer)
579 return; 585 return;
580 586
@@ -582,6 +588,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
582 "destroying TX queue %d\n", tx_queue->queue); 588 "destroying TX queue %d\n", tx_queue->queue);
583 efx_nic_remove_tx(tx_queue); 589 efx_nic_remove_tx(tx_queue);
584 590
591 if (tx_queue->tsoh_page) {
592 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
593 efx_nic_free_buffer(tx_queue->efx,
594 &tx_queue->tsoh_page[i]);
595 kfree(tx_queue->tsoh_page);
596 tx_queue->tsoh_page = NULL;
597 }
598
585 kfree(tx_queue->buffer); 599 kfree(tx_queue->buffer);
586 tx_queue->buffer = NULL; 600 tx_queue->buffer = NULL;
587} 601}
@@ -604,22 +618,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
604#define TSOH_OFFSET NET_IP_ALIGN 618#define TSOH_OFFSET NET_IP_ALIGN
605#endif 619#endif
606 620
607#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
608
609/* Total size of struct efx_tso_header, buffer and padding */
610#define TSOH_SIZE(hdr_len) \
611 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
612
613/* Size of blocks on free list. Larger blocks must be allocated from
614 * the heap.
615 */
616#define TSOH_STD_SIZE 128
617
618#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 621#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
619#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
620#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
621#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
622#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
623 622
624/** 623/**
625 * struct tso_state - TSO state for an SKB 624 * struct tso_state - TSO state for an SKB
@@ -631,10 +630,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
631 * @in_len: Remaining length in current SKB fragment 630 * @in_len: Remaining length in current SKB fragment
632 * @unmap_len: Length of SKB fragment 631 * @unmap_len: Length of SKB fragment
633 * @unmap_addr: DMA address of SKB fragment 632 * @unmap_addr: DMA address of SKB fragment
634 * @unmap_single: DMA single vs page mapping flag 633 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
635 * @protocol: Network protocol (after any VLAN header) 634 * @protocol: Network protocol (after any VLAN header)
635 * @ip_off: Offset of IP header
636 * @tcp_off: Offset of TCP header
636 * @header_len: Number of bytes of header 637 * @header_len: Number of bytes of header
637 * @full_packet_size: Number of bytes to put in each outgoing segment 638 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
638 * 639 *
639 * The state used during segmentation. It is put into this data structure 640 * The state used during segmentation. It is put into this data structure
640 * just to make it easy to pass into inline functions. 641 * just to make it easy to pass into inline functions.
@@ -651,11 +652,13 @@ struct tso_state {
651 unsigned in_len; 652 unsigned in_len;
652 unsigned unmap_len; 653 unsigned unmap_len;
653 dma_addr_t unmap_addr; 654 dma_addr_t unmap_addr;
654 bool unmap_single; 655 unsigned short dma_flags;
655 656
656 __be16 protocol; 657 __be16 protocol;
658 unsigned int ip_off;
659 unsigned int tcp_off;
657 unsigned header_len; 660 unsigned header_len;
658 int full_packet_size; 661 unsigned int ip_base_len;
659}; 662};
660 663
661 664
@@ -687,91 +690,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
687 return protocol; 690 return protocol;
688} 691}
689 692
690 693static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
691/* 694 struct efx_tx_buffer *buffer, unsigned int len)
692 * Allocate a page worth of efx_tso_header structures, and string them
693 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
694 */
695static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
696{ 695{
697 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 696 u8 *result;
698 struct efx_tso_header *tsoh;
699 dma_addr_t dma_addr;
700 u8 *base_kva, *kva;
701 697
702 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); 698 EFX_BUG_ON_PARANOID(buffer->len);
703 if (base_kva == NULL) { 699 EFX_BUG_ON_PARANOID(buffer->flags);
704 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, 700 EFX_BUG_ON_PARANOID(buffer->unmap_len);
705 "Unable to allocate page for TSO headers\n");
706 return -ENOMEM;
707 }
708
709 /* dma_alloc_coherent() allocates pages. */
710 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
711
712 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
713 tsoh = (struct efx_tso_header *)kva;
714 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
715 tsoh->next = tx_queue->tso_headers_free;
716 tx_queue->tso_headers_free = tsoh;
717 }
718
719 return 0;
720}
721
722
723/* Free up a TSO header, and all others in the same page. */
724static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
725 struct efx_tso_header *tsoh,
726 struct device *dma_dev)
727{
728 struct efx_tso_header **p;
729 unsigned long base_kva;
730 dma_addr_t base_dma;
731
732 base_kva = (unsigned long)tsoh & PAGE_MASK;
733 base_dma = tsoh->dma_addr & PAGE_MASK;
734
735 p = &tx_queue->tso_headers_free;
736 while (*p != NULL) {
737 if (((unsigned long)*p & PAGE_MASK) == base_kva)
738 *p = (*p)->next;
739 else
740 p = &(*p)->next;
741 }
742 701
743 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); 702 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
744} 703 unsigned index =
704 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
705 struct efx_buffer *page_buf =
706 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
707 unsigned offset =
708 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
709
710 if (unlikely(!page_buf->addr) &&
711 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
712 return NULL;
713
714 result = (u8 *)page_buf->addr + offset;
715 buffer->dma_addr = page_buf->dma_addr + offset;
716 buffer->flags = EFX_TX_BUF_CONT;
717 } else {
718 tx_queue->tso_long_headers++;
745 719
746static struct efx_tso_header * 720 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
747efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 721 if (unlikely(!buffer->heap_buf))
748{ 722 return NULL;
749 struct efx_tso_header *tsoh; 723 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
750 724 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
751 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
752 if (unlikely(!tsoh))
753 return NULL;
754
755 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
756 TSOH_BUFFER(tsoh), header_len,
757 DMA_TO_DEVICE);
758 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
759 tsoh->dma_addr))) {
760 kfree(tsoh);
761 return NULL;
762 } 725 }
763 726
764 tsoh->unmap_len = header_len; 727 buffer->len = len;
765 return tsoh;
766}
767 728
768static void 729 return result;
769efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
770{
771 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
772 tsoh->dma_addr, tsoh->unmap_len,
773 DMA_TO_DEVICE);
774 kfree(tsoh);
775} 730}
776 731
777/** 732/**
@@ -781,47 +736,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
781 * @len: Length of fragment 736 * @len: Length of fragment
782 * @final_buffer: The final buffer inserted into the queue 737 * @final_buffer: The final buffer inserted into the queue
783 * 738 *
784 * Push descriptors onto the TX queue. Return 0 on success or 1 if 739 * Push descriptors onto the TX queue.
785 * @tx_queue full.
786 */ 740 */
787static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 741static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
788 dma_addr_t dma_addr, unsigned len, 742 dma_addr_t dma_addr, unsigned len,
789 struct efx_tx_buffer **final_buffer) 743 struct efx_tx_buffer **final_buffer)
790{ 744{
791 struct efx_tx_buffer *buffer; 745 struct efx_tx_buffer *buffer;
792 struct efx_nic *efx = tx_queue->efx; 746 struct efx_nic *efx = tx_queue->efx;
793 unsigned dma_len, fill_level, insert_ptr; 747 unsigned dma_len, insert_ptr;
794 int q_space;
795 748
796 EFX_BUG_ON_PARANOID(len <= 0); 749 EFX_BUG_ON_PARANOID(len <= 0);
797 750
798 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
799 /* -1 as there is no way to represent all descriptors used */
800 q_space = efx->txq_entries - 1 - fill_level;
801
802 while (1) { 751 while (1) {
803 if (unlikely(q_space-- <= 0)) {
804 /* It might be that completions have happened
805 * since the xmit path last checked. Update
806 * the xmit path's copy of read_count.
807 */
808 netif_tx_stop_queue(tx_queue->core_txq);
809 /* This memory barrier protects the change of
810 * queue state from the access of read_count. */
811 smp_mb();
812 tx_queue->old_read_count =
813 ACCESS_ONCE(tx_queue->read_count);
814 fill_level = (tx_queue->insert_count
815 - tx_queue->old_read_count);
816 q_space = efx->txq_entries - 1 - fill_level;
817 if (unlikely(q_space-- <= 0)) {
818 *final_buffer = NULL;
819 return 1;
820 }
821 smp_mb();
822 netif_tx_start_queue(tx_queue->core_txq);
823 }
824
825 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 752 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
826 buffer = &tx_queue->buffer[insert_ptr]; 753 buffer = &tx_queue->buffer[insert_ptr];
827 ++tx_queue->insert_count; 754 ++tx_queue->insert_count;
@@ -830,12 +757,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
830 tx_queue->read_count >= 757 tx_queue->read_count >=
831 efx->txq_entries); 758 efx->txq_entries);
832 759
833 efx_tsoh_free(tx_queue, buffer);
834 EFX_BUG_ON_PARANOID(buffer->len); 760 EFX_BUG_ON_PARANOID(buffer->len);
835 EFX_BUG_ON_PARANOID(buffer->unmap_len); 761 EFX_BUG_ON_PARANOID(buffer->unmap_len);
836 EFX_BUG_ON_PARANOID(buffer->skb); 762 EFX_BUG_ON_PARANOID(buffer->flags);
837 EFX_BUG_ON_PARANOID(!buffer->continuation);
838 EFX_BUG_ON_PARANOID(buffer->tsoh);
839 763
840 buffer->dma_addr = dma_addr; 764 buffer->dma_addr = dma_addr;
841 765
@@ -845,7 +769,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
845 if (dma_len >= len) 769 if (dma_len >= len)
846 break; 770 break;
847 771
848 buffer->len = dma_len; /* Don't set the other members */ 772 buffer->len = dma_len;
773 buffer->flags = EFX_TX_BUF_CONT;
849 dma_addr += dma_len; 774 dma_addr += dma_len;
850 len -= dma_len; 775 len -= dma_len;
851 } 776 }
@@ -853,7 +778,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
853 EFX_BUG_ON_PARANOID(!len); 778 EFX_BUG_ON_PARANOID(!len);
854 buffer->len = len; 779 buffer->len = len;
855 *final_buffer = buffer; 780 *final_buffer = buffer;
856 return 0;
857} 781}
858 782
859 783
@@ -864,54 +788,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
864 * a single fragment, and we know it doesn't cross a page boundary. It 788 * a single fragment, and we know it doesn't cross a page boundary. It
865 * also allows us to not worry about end-of-packet etc. 789 * also allows us to not worry about end-of-packet etc.
866 */ 790 */
867static void efx_tso_put_header(struct efx_tx_queue *tx_queue, 791static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
868 struct efx_tso_header *tsoh, unsigned len) 792 struct efx_tx_buffer *buffer, u8 *header)
869{ 793{
870 struct efx_tx_buffer *buffer; 794 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
871 795 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
872 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 796 header, buffer->len,
873 efx_tsoh_free(tx_queue, buffer); 797 DMA_TO_DEVICE);
874 EFX_BUG_ON_PARANOID(buffer->len); 798 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
875 EFX_BUG_ON_PARANOID(buffer->unmap_len); 799 buffer->dma_addr))) {
876 EFX_BUG_ON_PARANOID(buffer->skb); 800 kfree(buffer->heap_buf);
877 EFX_BUG_ON_PARANOID(!buffer->continuation); 801 buffer->len = 0;
878 EFX_BUG_ON_PARANOID(buffer->tsoh); 802 buffer->flags = 0;
879 buffer->len = len; 803 return -ENOMEM;
880 buffer->dma_addr = tsoh->dma_addr; 804 }
881 buffer->tsoh = tsoh; 805 buffer->unmap_len = buffer->len;
806 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
807 }
882 808
883 ++tx_queue->insert_count; 809 ++tx_queue->insert_count;
810 return 0;
884} 811}
885 812
886 813
887/* Remove descriptors put into a tx_queue. */ 814/* Remove buffers put into a tx_queue. None of the buffers must have
815 * an skb attached.
816 */
888static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 817static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
889{ 818{
890 struct efx_tx_buffer *buffer; 819 struct efx_tx_buffer *buffer;
891 dma_addr_t unmap_addr;
892 820
893 /* Work backwards until we hit the original insert pointer value */ 821 /* Work backwards until we hit the original insert pointer value */
894 while (tx_queue->insert_count != tx_queue->write_count) { 822 while (tx_queue->insert_count != tx_queue->write_count) {
895 --tx_queue->insert_count; 823 --tx_queue->insert_count;
896 buffer = &tx_queue->buffer[tx_queue->insert_count & 824 buffer = &tx_queue->buffer[tx_queue->insert_count &
897 tx_queue->ptr_mask]; 825 tx_queue->ptr_mask];
898 efx_tsoh_free(tx_queue, buffer); 826 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
899 EFX_BUG_ON_PARANOID(buffer->skb);
900 if (buffer->unmap_len) {
901 unmap_addr = (buffer->dma_addr + buffer->len -
902 buffer->unmap_len);
903 if (buffer->unmap_single)
904 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
905 unmap_addr, buffer->unmap_len,
906 DMA_TO_DEVICE);
907 else
908 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
909 unmap_addr, buffer->unmap_len,
910 DMA_TO_DEVICE);
911 buffer->unmap_len = 0;
912 }
913 buffer->len = 0;
914 buffer->continuation = true;
915 } 827 }
916} 828}
917 829
@@ -919,17 +831,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
919/* Parse the SKB header and initialise state. */ 831/* Parse the SKB header and initialise state. */
920static void tso_start(struct tso_state *st, const struct sk_buff *skb) 832static void tso_start(struct tso_state *st, const struct sk_buff *skb)
921{ 833{
922 /* All ethernet/IP/TCP headers combined size is TCP header size 834 st->ip_off = skb_network_header(skb) - skb->data;
923 * plus offset of TCP header relative to start of packet. 835 st->tcp_off = skb_transport_header(skb) - skb->data;
924 */ 836 st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
925 st->header_len = ((tcp_hdr(skb)->doff << 2u) 837 if (st->protocol == htons(ETH_P_IP)) {
926 + PTR_DIFF(tcp_hdr(skb), skb->data)); 838 st->ip_base_len = st->header_len - st->ip_off;
927 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
928
929 if (st->protocol == htons(ETH_P_IP))
930 st->ipv4_id = ntohs(ip_hdr(skb)->id); 839 st->ipv4_id = ntohs(ip_hdr(skb)->id);
931 else 840 } else {
841 st->ip_base_len = st->header_len - st->tcp_off;
932 st->ipv4_id = 0; 842 st->ipv4_id = 0;
843 }
933 st->seqnum = ntohl(tcp_hdr(skb)->seq); 844 st->seqnum = ntohl(tcp_hdr(skb)->seq);
934 845
935 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 846 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +849,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
938 849
939 st->out_len = skb->len - st->header_len; 850 st->out_len = skb->len - st->header_len;
940 st->unmap_len = 0; 851 st->unmap_len = 0;
941 st->unmap_single = false; 852 st->dma_flags = 0;
942} 853}
943 854
944static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 855static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +858,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
947 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 858 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
948 skb_frag_size(frag), DMA_TO_DEVICE); 859 skb_frag_size(frag), DMA_TO_DEVICE);
949 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 860 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
950 st->unmap_single = false; 861 st->dma_flags = 0;
951 st->unmap_len = skb_frag_size(frag); 862 st->unmap_len = skb_frag_size(frag);
952 st->in_len = skb_frag_size(frag); 863 st->in_len = skb_frag_size(frag);
953 st->dma_addr = st->unmap_addr; 864 st->dma_addr = st->unmap_addr;
@@ -965,7 +876,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
965 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, 876 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
966 len, DMA_TO_DEVICE); 877 len, DMA_TO_DEVICE);
967 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 878 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
968 st->unmap_single = true; 879 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
969 st->unmap_len = len; 880 st->unmap_len = len;
970 st->in_len = len; 881 st->in_len = len;
971 st->dma_addr = st->unmap_addr; 882 st->dma_addr = st->unmap_addr;
@@ -982,20 +893,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
982 * @st: TSO state 893 * @st: TSO state
983 * 894 *
984 * Form descriptors for the current fragment, until we reach the end 895 * Form descriptors for the current fragment, until we reach the end
985 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 896 * of fragment or end-of-packet.
986 * space in @tx_queue.
987 */ 897 */
988static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 898static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
989 const struct sk_buff *skb, 899 const struct sk_buff *skb,
990 struct tso_state *st) 900 struct tso_state *st)
991{ 901{
992 struct efx_tx_buffer *buffer; 902 struct efx_tx_buffer *buffer;
993 int n, end_of_packet, rc; 903 int n;
994 904
995 if (st->in_len == 0) 905 if (st->in_len == 0)
996 return 0; 906 return;
997 if (st->packet_space == 0) 907 if (st->packet_space == 0)
998 return 0; 908 return;
999 909
1000 EFX_BUG_ON_PARANOID(st->in_len <= 0); 910 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1001 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 911 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +916,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1006 st->out_len -= n; 916 st->out_len -= n;
1007 st->in_len -= n; 917 st->in_len -= n;
1008 918
1009 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 919 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1010 if (likely(rc == 0)) {
1011 if (st->out_len == 0)
1012 /* Transfer ownership of the skb */
1013 buffer->skb = skb;
1014 920
1015 end_of_packet = st->out_len == 0 || st->packet_space == 0; 921 if (st->out_len == 0) {
1016 buffer->continuation = !end_of_packet; 922 /* Transfer ownership of the skb */
923 buffer->skb = skb;
924 buffer->flags = EFX_TX_BUF_SKB;
925 } else if (st->packet_space != 0) {
926 buffer->flags = EFX_TX_BUF_CONT;
927 }
1017 928
1018 if (st->in_len == 0) { 929 if (st->in_len == 0) {
1019 /* Transfer ownership of the DMA mapping */ 930 /* Transfer ownership of the DMA mapping */
1020 buffer->unmap_len = st->unmap_len; 931 buffer->unmap_len = st->unmap_len;
1021 buffer->unmap_single = st->unmap_single; 932 buffer->flags |= st->dma_flags;
1022 st->unmap_len = 0; 933 st->unmap_len = 0;
1023 }
1024 } 934 }
1025 935
1026 st->dma_addr += n; 936 st->dma_addr += n;
1027 return rc;
1028} 937}
1029 938
1030 939
@@ -1035,36 +944,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1035 * @st: TSO state 944 * @st: TSO state
1036 * 945 *
1037 * Generate a new header and prepare for the new packet. Return 0 on 946 * Generate a new header and prepare for the new packet. Return 0 on
1038 * success, or -1 if failed to alloc header. 947 * success, or -%ENOMEM if failed to alloc header.
1039 */ 948 */
1040static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 949static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1041 const struct sk_buff *skb, 950 const struct sk_buff *skb,
1042 struct tso_state *st) 951 struct tso_state *st)
1043{ 952{
1044 struct efx_tso_header *tsoh; 953 struct efx_tx_buffer *buffer =
954 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
1045 struct tcphdr *tsoh_th; 955 struct tcphdr *tsoh_th;
1046 unsigned ip_length; 956 unsigned ip_length;
1047 u8 *header; 957 u8 *header;
958 int rc;
1048 959
1049 /* Allocate a DMA-mapped header buffer. */ 960 /* Allocate and insert a DMA-mapped header buffer. */
1050 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { 961 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1051 if (tx_queue->tso_headers_free == NULL) { 962 if (!header)
1052 if (efx_tsoh_block_alloc(tx_queue)) 963 return -ENOMEM;
1053 return -1;
1054 }
1055 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1056 tsoh = tx_queue->tso_headers_free;
1057 tx_queue->tso_headers_free = tsoh->next;
1058 tsoh->unmap_len = 0;
1059 } else {
1060 tx_queue->tso_long_headers++;
1061 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1062 if (unlikely(!tsoh))
1063 return -1;
1064 }
1065 964
1066 header = TSOH_BUFFER(tsoh); 965 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1067 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1068 966
1069 /* Copy and update the headers. */ 967 /* Copy and update the headers. */
1070 memcpy(header, skb->data, st->header_len); 968 memcpy(header, skb->data, st->header_len);
@@ -1073,19 +971,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1073 st->seqnum += skb_shinfo(skb)->gso_size; 971 st->seqnum += skb_shinfo(skb)->gso_size;
1074 if (st->out_len > skb_shinfo(skb)->gso_size) { 972 if (st->out_len > skb_shinfo(skb)->gso_size) {
1075 /* This packet will not finish the TSO burst. */ 973 /* This packet will not finish the TSO burst. */
1076 ip_length = st->full_packet_size - ETH_HDR_LEN(skb); 974 st->packet_space = skb_shinfo(skb)->gso_size;
1077 tsoh_th->fin = 0; 975 tsoh_th->fin = 0;
1078 tsoh_th->psh = 0; 976 tsoh_th->psh = 0;
1079 } else { 977 } else {
1080 /* This packet will be the last in the TSO burst. */ 978 /* This packet will be the last in the TSO burst. */
1081 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; 979 st->packet_space = st->out_len;
1082 tsoh_th->fin = tcp_hdr(skb)->fin; 980 tsoh_th->fin = tcp_hdr(skb)->fin;
1083 tsoh_th->psh = tcp_hdr(skb)->psh; 981 tsoh_th->psh = tcp_hdr(skb)->psh;
1084 } 982 }
983 ip_length = st->ip_base_len + st->packet_space;
1085 984
1086 if (st->protocol == htons(ETH_P_IP)) { 985 if (st->protocol == htons(ETH_P_IP)) {
1087 struct iphdr *tsoh_iph = 986 struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
1088 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1089 987
1090 tsoh_iph->tot_len = htons(ip_length); 988 tsoh_iph->tot_len = htons(ip_length);
1091 989
@@ -1094,16 +992,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1094 st->ipv4_id++; 992 st->ipv4_id++;
1095 } else { 993 } else {
1096 struct ipv6hdr *tsoh_iph = 994 struct ipv6hdr *tsoh_iph =
1097 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); 995 (struct ipv6hdr *)(header + st->ip_off);
1098 996
1099 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); 997 tsoh_iph->payload_len = htons(ip_length);
1100 } 998 }
1101 999
1102 st->packet_space = skb_shinfo(skb)->gso_size; 1000 rc = efx_tso_put_header(tx_queue, buffer, header);
1103 ++tx_queue->tso_packets; 1001 if (unlikely(rc))
1002 return rc;
1104 1003
1105 /* Form a descriptor for this header. */ 1004 ++tx_queue->tso_packets;
1106 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1107 1005
1108 return 0; 1006 return 0;
1109} 1007}
@@ -1118,13 +1016,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1118 * 1016 *
1119 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1017 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1120 * @skb was not enqueued. In all cases @skb is consumed. Return 1018 * @skb was not enqueued. In all cases @skb is consumed. Return
1121 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1019 * %NETDEV_TX_OK.
1122 */ 1020 */
1123static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1021static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 struct sk_buff *skb) 1022 struct sk_buff *skb)
1125{ 1023{
1126 struct efx_nic *efx = tx_queue->efx; 1024 struct efx_nic *efx = tx_queue->efx;
1127 int frag_i, rc, rc2 = NETDEV_TX_OK; 1025 int frag_i, rc;
1128 struct tso_state state; 1026 struct tso_state state;
1129 1027
1130 /* Find the packet protocol and sanity-check it */ 1028 /* Find the packet protocol and sanity-check it */
@@ -1156,11 +1054,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1156 goto mem_err; 1054 goto mem_err;
1157 1055
1158 while (1) { 1056 while (1) {
1159 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1057 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1160 if (unlikely(rc)) {
1161 rc2 = NETDEV_TX_BUSY;
1162 goto unwind;
1163 }
1164 1058
1165 /* Move onto the next fragment? */ 1059 /* Move onto the next fragment? */
1166 if (state.in_len == 0) { 1060 if (state.in_len == 0) {
@@ -1184,6 +1078,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1184 /* Pass off to hardware */ 1078 /* Pass off to hardware */
1185 efx_nic_push_buffers(tx_queue); 1079 efx_nic_push_buffers(tx_queue);
1186 1080
1081 efx_tx_maybe_stop_queue(tx_queue);
1082
1187 tx_queue->tso_bursts++; 1083 tx_queue->tso_bursts++;
1188 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;
1189 1085
@@ -1192,10 +1088,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1192 "Out of memory for TSO headers, or DMA mapping error\n"); 1088 "Out of memory for TSO headers, or DMA mapping error\n");
1193 dev_kfree_skb_any(skb); 1089 dev_kfree_skb_any(skb);
1194 1090
1195 unwind:
1196 /* Free the DMA mapping we were in the process of writing out */ 1091 /* Free the DMA mapping we were in the process of writing out */
1197 if (state.unmap_len) { 1092 if (state.unmap_len) {
1198 if (state.unmap_single) 1093 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1199 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 1094 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1200 state.unmap_len, DMA_TO_DEVICE); 1095 state.unmap_len, DMA_TO_DEVICE);
1201 else 1096 else
@@ -1204,25 +1099,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1204 } 1099 }
1205 1100
1206 efx_enqueue_unwind(tx_queue); 1101 efx_enqueue_unwind(tx_queue);
1207 return rc2; 1102 return NETDEV_TX_OK;
1208}
1209
1210
1211/*
1212 * Free up all TSO datastructures associated with tx_queue. This
1213 * routine should be called only once the tx_queue is both empty and
1214 * will no longer be used.
1215 */
1216static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1217{
1218 unsigned i;
1219
1220 if (tx_queue->buffer) {
1221 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1222 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1223 }
1224
1225 while (tx_queue->tso_headers_free != NULL)
1226 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1227 &tx_queue->efx->pci_dev->dev);
1228} 1103}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ade108232048..0376a5e6b2bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
177 new_bus->write = &stmmac_mdio_write; 177 new_bus->write = &stmmac_mdio_write;
178 new_bus->reset = &stmmac_mdio_reset; 178 new_bus->reset = &stmmac_mdio_reset;
179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", 179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
180 new_bus->name, mdio_bus_data->bus_id); 180 new_bus->name, priv->plat->bus_id);
181 new_bus->priv = ndev; 181 new_bus->priv = ndev;
182 new_bus->irq = irqlist; 182 new_bus->irq = irqlist;
183 new_bus->phy_mask = mdio_bus_data->phy_mask; 183 new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
213 * and no PHY number was provided to the MAC, 213 * and no PHY number was provided to the MAC,
214 * use the one probed here. 214 * use the one probed here.
215 */ 215 */
216 if ((priv->plat->bus_id == mdio_bus_data->bus_id) && 216 if (priv->plat->phy_addr == -1)
217 (priv->plat->phy_addr == -1))
218 priv->plat->phy_addr = addr; 217 priv->plat->phy_addr = addr;
219 218
220 act = (priv->plat->bus_id == mdio_bus_data->bus_id) && 219 act = (priv->plat->phy_addr == addr);
221 (priv->plat->phy_addr == addr);
222 switch (phydev->irq) { 220 switch (phydev->irq) {
223 case PHY_POLL: 221 case PHY_POLL:
224 irq_str = "POLL"; 222 irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
258{ 256{
259 struct stmmac_priv *priv = netdev_priv(ndev); 257 struct stmmac_priv *priv = netdev_priv(ndev);
260 258
259 if (!priv->mii)
260 return 0;
261
261 mdiobus_unregister(priv->mii); 262 mdiobus_unregister(priv->mii);
262 priv->mii->priv = NULL; 263 priv->mii->priv = NULL;
263 mdiobus_free(priv->mii); 264 mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 13afb8edfadc..1f069b0f6af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
40 plat_dat.has_gmac = 1; 40 plat_dat.has_gmac = 1;
41 plat_dat.force_sf_dma_mode = 1; 41 plat_dat.force_sf_dma_mode = 1;
42 42
43 mdio_data.bus_id = 1;
44 mdio_data.phy_reset = NULL; 43 mdio_data.phy_reset = NULL;
45 mdio_data.phy_mask = 0; 44 mdio_data.phy_mask = 0;
46 plat_dat.mdio_bus_data = &mdio_data; 45 plat_dat.mdio_bus_data = &mdio_data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b93245c11995..ed112b55ae7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
78{ 78{
79 int ret = 0; 79 int ret = 0;
80 struct resource *res; 80 struct resource *res;
81 struct device *dev = &pdev->dev;
81 void __iomem *addr = NULL; 82 void __iomem *addr = NULL;
82 struct stmmac_priv *priv = NULL; 83 struct stmmac_priv *priv = NULL;
83 struct plat_stmmacenet_data *plat_dat = NULL; 84 struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
87 if (!res) 88 if (!res)
88 return -ENODEV; 89 return -ENODEV;
89 90
90 if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 91 addr = devm_request_and_ioremap(dev, res);
91 pr_err("%s: ERROR: memory allocation failed"
92 "cannot get the I/O addr 0x%x\n",
93 __func__, (unsigned int)res->start);
94 return -EBUSY;
95 }
96
97 addr = ioremap(res->start, resource_size(res));
98 if (!addr) { 92 if (!addr) {
99 pr_err("%s: ERROR: memory mapping failed", __func__); 93 pr_err("%s: ERROR: memory mapping failed", __func__);
100 ret = -ENOMEM; 94 return -ENOMEM;
101 goto out_release_region;
102 } 95 }
103 96
104 if (pdev->dev.of_node) { 97 if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
107 GFP_KERNEL); 100 GFP_KERNEL);
108 if (!plat_dat) { 101 if (!plat_dat) {
109 pr_err("%s: ERROR: no memory", __func__); 102 pr_err("%s: ERROR: no memory", __func__);
110 ret = -ENOMEM; 103 return -ENOMEM;
111 goto out_unmap;
112 } 104 }
113 105
114 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); 106 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
115 if (ret) { 107 if (ret) {
116 pr_err("%s: main dt probe failed", __func__); 108 pr_err("%s: main dt probe failed", __func__);
117 goto out_unmap; 109 return ret;
118 } 110 }
119 } else { 111 } else {
120 plat_dat = pdev->dev.platform_data; 112 plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
124 if (plat_dat->init) { 116 if (plat_dat->init) {
125 ret = plat_dat->init(pdev); 117 ret = plat_dat->init(pdev);
126 if (unlikely(ret)) 118 if (unlikely(ret))
127 goto out_unmap; 119 return ret;
128 } 120 }
129 121
130 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); 122 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
131 if (!priv) { 123 if (!priv) {
132 pr_err("%s: main driver probe failed", __func__); 124 pr_err("%s: main driver probe failed", __func__);
133 goto out_unmap; 125 return -ENODEV;
134 } 126 }
135 127
136 /* Get MAC address if available (DT) */ 128 /* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
142 if (priv->dev->irq == -ENXIO) { 134 if (priv->dev->irq == -ENXIO) {
143 pr_err("%s: ERROR: MAC IRQ configuration " 135 pr_err("%s: ERROR: MAC IRQ configuration "
144 "information not found\n", __func__); 136 "information not found\n", __func__);
145 ret = -ENXIO; 137 return -ENXIO;
146 goto out_unmap;
147 } 138 }
148 139
149 /* 140 /*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
165 pr_debug("STMMAC platform driver registration completed"); 156 pr_debug("STMMAC platform driver registration completed");
166 157
167 return 0; 158 return 0;
168
169out_unmap:
170 iounmap(addr);
171 platform_set_drvdata(pdev, NULL);
172
173out_release_region:
174 release_mem_region(res->start, resource_size(res));
175
176 return ret;
177} 159}
178 160
179/** 161/**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
186{ 168{
187 struct net_device *ndev = platform_get_drvdata(pdev); 169 struct net_device *ndev = platform_get_drvdata(pdev);
188 struct stmmac_priv *priv = netdev_priv(ndev); 170 struct stmmac_priv *priv = netdev_priv(ndev);
189 struct resource *res;
190 int ret = stmmac_dvr_remove(ndev); 171 int ret = stmmac_dvr_remove(ndev);
191 172
192 if (priv->plat->exit) 173 if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
194 175
195 platform_set_drvdata(pdev, NULL); 176 platform_set_drvdata(pdev, NULL);
196 177
197 iounmap((void __force __iomem *)priv->ioaddr);
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
199 release_mem_region(res->start, resource_size(res));
200
201 return ret; 178 return ret;
202} 179}
203 180
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 967fe8cb476e..c9c977bf02ac 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -212,7 +212,6 @@ static void bigmac_clean_rings(struct bigmac *bp)
212static void bigmac_init_rings(struct bigmac *bp, int from_irq) 212static void bigmac_init_rings(struct bigmac *bp, int from_irq)
213{ 213{
214 struct bmac_init_block *bb = bp->bmac_block; 214 struct bmac_init_block *bb = bp->bmac_block;
215 struct net_device *dev = bp->dev;
216 int i; 215 int i;
217 gfp_t gfp_flags = GFP_KERNEL; 216 gfp_t gfp_flags = GFP_KERNEL;
218 217
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1b173a6145d6..b26cbda5efa9 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
32 32
33config TI_DAVINCI_MDIO 33config TI_DAVINCI_MDIO
34 tristate "TI DaVinci MDIO Support" 34 tristate "TI DaVinci MDIO Support"
35 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) 35 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
36 select PHYLIB 36 select PHYLIB
37 ---help--- 37 ---help---
38 This driver supports TI's DaVinci MDIO module. 38 This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
42 42
43config TI_DAVINCI_CPDMA 43config TI_DAVINCI_CPDMA
44 tristate "TI DaVinci CPDMA Support" 44 tristate "TI DaVinci CPDMA Support"
45 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) 45 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
46 ---help--- 46 ---help---
47 This driver supports TI's DaVinci CPDMA dma engine. 47 This driver supports TI's DaVinci CPDMA dma engine.
48 48
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1e5d85b06e71..df55e2403746 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -28,6 +28,9 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31#include <linux/of.h>
32#include <linux/of_net.h>
33#include <linux/of_device.h>
31 34
32#include <linux/platform_data/cpsw.h> 35#include <linux/platform_data/cpsw.h>
33 36
@@ -383,6 +386,11 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
383 mac_control |= BIT(7); /* GIGABITEN */ 386 mac_control |= BIT(7); /* GIGABITEN */
384 if (phy->duplex) 387 if (phy->duplex)
385 mac_control |= BIT(0); /* FULLDUPLEXEN */ 388 mac_control |= BIT(0); /* FULLDUPLEXEN */
389
390 /* set speed_in input in case RMII mode is used in 100Mbps */
391 if (phy->speed == 100)
392 mac_control |= BIT(15);
393
386 *link = true; 394 *link = true;
387 } else { 395 } else {
388 mac_control = 0; 396 mac_control = 0;
@@ -709,6 +717,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
709 slave->sliver = regs + data->sliver_reg_ofs; 717 slave->sliver = regs + data->sliver_reg_ofs;
710} 718}
711 719
720static int cpsw_probe_dt(struct cpsw_platform_data *data,
721 struct platform_device *pdev)
722{
723 struct device_node *node = pdev->dev.of_node;
724 struct device_node *slave_node;
725 int i = 0, ret;
726 u32 prop;
727
728 if (!node)
729 return -EINVAL;
730
731 if (of_property_read_u32(node, "slaves", &prop)) {
732 pr_err("Missing slaves property in the DT.\n");
733 return -EINVAL;
734 }
735 data->slaves = prop;
736
737 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
738 data->slaves, GFP_KERNEL);
739 if (!data->slave_data) {
740 pr_err("Could not allocate slave memory.\n");
741 return -EINVAL;
742 }
743
744 data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
745
746 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
747 pr_err("Missing cpdma_channels property in the DT.\n");
748 ret = -EINVAL;
749 goto error_ret;
750 }
751 data->channels = prop;
752
753 if (of_property_read_u32(node, "host_port_no", &prop)) {
754 pr_err("Missing host_port_no property in the DT.\n");
755 ret = -EINVAL;
756 goto error_ret;
757 }
758 data->host_port_num = prop;
759
760 if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
761 pr_err("Missing cpdma_reg_ofs property in the DT.\n");
762 ret = -EINVAL;
763 goto error_ret;
764 }
765 data->cpdma_reg_ofs = prop;
766
767 if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
768 pr_err("Missing cpdma_sram_ofs property in the DT.\n");
769 ret = -EINVAL;
770 goto error_ret;
771 }
772 data->cpdma_sram_ofs = prop;
773
774 if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
775 pr_err("Missing ale_reg_ofs property in the DT.\n");
776 ret = -EINVAL;
777 goto error_ret;
778 }
779 data->ale_reg_ofs = prop;
780
781 if (of_property_read_u32(node, "ale_entries", &prop)) {
782 pr_err("Missing ale_entries property in the DT.\n");
783 ret = -EINVAL;
784 goto error_ret;
785 }
786 data->ale_entries = prop;
787
788 if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
789 pr_err("Missing host_port_reg_ofs property in the DT.\n");
790 ret = -EINVAL;
791 goto error_ret;
792 }
793 data->host_port_reg_ofs = prop;
794
795 if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
796 pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
797 ret = -EINVAL;
798 goto error_ret;
799 }
800 data->hw_stats_reg_ofs = prop;
801
802 if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
803 pr_err("Missing bd_ram_ofs property in the DT.\n");
804 ret = -EINVAL;
805 goto error_ret;
806 }
807 data->bd_ram_ofs = prop;
808
809 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
810 pr_err("Missing bd_ram_size property in the DT.\n");
811 ret = -EINVAL;
812 goto error_ret;
813 }
814 data->bd_ram_size = prop;
815
816 if (of_property_read_u32(node, "rx_descs", &prop)) {
817 pr_err("Missing rx_descs property in the DT.\n");
818 ret = -EINVAL;
819 goto error_ret;
820 }
821 data->rx_descs = prop;
822
823 if (of_property_read_u32(node, "mac_control", &prop)) {
824 pr_err("Missing mac_control property in the DT.\n");
825 ret = -EINVAL;
826 goto error_ret;
827 }
828 data->mac_control = prop;
829
830 for_each_child_of_node(node, slave_node) {
831 struct cpsw_slave_data *slave_data = data->slave_data + i;
832 const char *phy_id = NULL;
833 const void *mac_addr = NULL;
834
835 if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
836 pr_err("Missing slave[%d] phy_id property\n", i);
837 ret = -EINVAL;
838 goto error_ret;
839 }
840 slave_data->phy_id = phy_id;
841
842 if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
843 pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
844 ret = -EINVAL;
845 goto error_ret;
846 }
847 slave_data->slave_reg_ofs = prop;
848
849 if (of_property_read_u32(slave_node, "sliver_reg_ofs",
850 &prop)) {
851 pr_err("Missing slave[%d] sliver_reg_ofs property\n",
852 i);
853 ret = -EINVAL;
854 goto error_ret;
855 }
856 slave_data->sliver_reg_ofs = prop;
857
858 mac_addr = of_get_mac_address(slave_node);
859 if (mac_addr)
860 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
861
862 i++;
863 }
864
865 return 0;
866
867error_ret:
868 kfree(data->slave_data);
869 return ret;
870}
871
712static int __devinit cpsw_probe(struct platform_device *pdev) 872static int __devinit cpsw_probe(struct platform_device *pdev)
713{ 873{
714 struct cpsw_platform_data *data = pdev->dev.platform_data; 874 struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -720,11 +880,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
720 struct resource *res; 880 struct resource *res;
721 int ret = 0, i, k = 0; 881 int ret = 0, i, k = 0;
722 882
723 if (!data) {
724 pr_err("platform data missing\n");
725 return -ENODEV;
726 }
727
728 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 883 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
729 if (!ndev) { 884 if (!ndev) {
730 pr_err("error allocating net_device\n"); 885 pr_err("error allocating net_device\n");
@@ -734,13 +889,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
734 platform_set_drvdata(pdev, ndev); 889 platform_set_drvdata(pdev, ndev);
735 priv = netdev_priv(ndev); 890 priv = netdev_priv(ndev);
736 spin_lock_init(&priv->lock); 891 spin_lock_init(&priv->lock);
737 priv->data = *data;
738 priv->pdev = pdev; 892 priv->pdev = pdev;
739 priv->ndev = ndev; 893 priv->ndev = ndev;
740 priv->dev = &ndev->dev; 894 priv->dev = &ndev->dev;
741 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 895 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
742 priv->rx_packet_max = max(rx_packet_max, 128); 896 priv->rx_packet_max = max(rx_packet_max, 128);
743 897
898 if (cpsw_probe_dt(&priv->data, pdev)) {
899 pr_err("cpsw: platform data missing\n");
900 ret = -ENODEV;
901 goto clean_ndev_ret;
902 }
903 data = &priv->data;
904
744 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 905 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
745 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 906 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
746 pr_info("Detected MACID = %pM", priv->mac_addr); 907 pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1157,17 @@ static const struct dev_pm_ops cpsw_pm_ops = {
996 .resume = cpsw_resume, 1157 .resume = cpsw_resume,
997}; 1158};
998 1159
1160static const struct of_device_id cpsw_of_mtable[] = {
1161 { .compatible = "ti,cpsw", },
1162 { /* sentinel */ },
1163};
1164
999static struct platform_driver cpsw_driver = { 1165static struct platform_driver cpsw_driver = {
1000 .driver = { 1166 .driver = {
1001 .name = "cpsw", 1167 .name = "cpsw",
1002 .owner = THIS_MODULE, 1168 .owner = THIS_MODULE,
1003 .pm = &cpsw_pm_ops, 1169 .pm = &cpsw_pm_ops,
1170 .of_match_table = of_match_ptr(cpsw_of_mtable),
1004 }, 1171 },
1005 .probe = cpsw_probe, 1172 .probe = cpsw_probe,
1006 .remove = __devexit_p(cpsw_remove), 1173 .remove = __devexit_p(cpsw_remove),
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a9ca4a03d31b..51a96dbee9ac 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -36,6 +36,8 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/pm_runtime.h> 37#include <linux/pm_runtime.h>
38#include <linux/davinci_emac.h> 38#include <linux/davinci_emac.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
39 41
40/* 42/*
41 * This timeout definition is a worst-case ultra defensive measure against 43 * This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
289 return 0; 291 return 0;
290} 292}
291 293
294static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
295 struct platform_device *pdev)
296{
297 struct device_node *node = pdev->dev.of_node;
298 u32 prop;
299
300 if (!node)
301 return -EINVAL;
302
303 if (of_property_read_u32(node, "bus_freq", &prop)) {
304 pr_err("Missing bus_freq property in the DT.\n");
305 return -EINVAL;
306 }
307 data->bus_freq = prop;
308
309 return 0;
310}
311
312
292static int __devinit davinci_mdio_probe(struct platform_device *pdev) 313static int __devinit davinci_mdio_probe(struct platform_device *pdev)
293{ 314{
294 struct mdio_platform_data *pdata = pdev->dev.platform_data; 315 struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
304 return -ENOMEM; 325 return -ENOMEM;
305 } 326 }
306 327
307 data->pdata = pdata ? (*pdata) : default_pdata;
308
309 data->bus = mdiobus_alloc(); 328 data->bus = mdiobus_alloc();
310 if (!data->bus) { 329 if (!data->bus) {
311 dev_err(dev, "failed to alloc mii bus\n"); 330 dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
313 goto bail_out; 332 goto bail_out;
314 } 333 }
315 334
335 if (dev->of_node) {
336 if (davinci_mdio_probe_dt(&data->pdata, pdev))
337 data->pdata = default_pdata;
338 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
339 } else {
340 data->pdata = pdata ? (*pdata) : default_pdata;
341 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
342 pdev->name, pdev->id);
343 }
344
316 data->bus->name = dev_name(dev); 345 data->bus->name = dev_name(dev);
317 data->bus->read = davinci_mdio_read, 346 data->bus->read = davinci_mdio_read,
318 data->bus->write = davinci_mdio_write, 347 data->bus->write = davinci_mdio_write,
319 data->bus->reset = davinci_mdio_reset, 348 data->bus->reset = davinci_mdio_reset,
320 data->bus->parent = dev; 349 data->bus->parent = dev;
321 data->bus->priv = data; 350 data->bus->priv = data;
322 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
323 pdev->name, pdev->id);
324 351
325 pm_runtime_enable(&pdev->dev); 352 pm_runtime_enable(&pdev->dev);
326 pm_runtime_get_sync(&pdev->dev); 353 pm_runtime_get_sync(&pdev->dev);
@@ -456,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
456 .resume = davinci_mdio_resume, 483 .resume = davinci_mdio_resume,
457}; 484};
458 485
486static const struct of_device_id davinci_mdio_of_mtable[] = {
487 { .compatible = "ti,davinci_mdio", },
488 { /* sentinel */ },
489};
490
459static struct platform_driver davinci_mdio_driver = { 491static struct platform_driver davinci_mdio_driver = {
460 .driver = { 492 .driver = {
461 .name = "davinci_mdio", 493 .name = "davinci_mdio",
462 .owner = THIS_MODULE, 494 .owner = THIS_MODULE,
463 .pm = &davinci_mdio_pm_ops, 495 .pm = &davinci_mdio_pm_ops,
496 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
464 }, 497 },
465 .probe = davinci_mdio_probe, 498 .probe = davinci_mdio_probe,
466 .remove = __devexit_p(davinci_mdio_remove), 499 .remove = __devexit_p(davinci_mdio_remove),
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 277c93e9ff4d..8fa947a2d929 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev)
1359 } 1359 }
1360 1360
1361 data->rxskbs[i] = skb; 1361 data->rxskbs[i] = skb;
1362 data->rxskbs[i] = skb;
1363 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); 1362 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1364 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; 1363 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1365 } 1364 }
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a5826a3111a6..2c08bf6e7bf3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
637 if (data && is_valid_ether_addr(data->mac_addr)) { 637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else { 639 } else {
640 eth_random_addr(ndev->dev_addr); 640 eth_hw_addr_random(ndev);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 } 641 }
643 642
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 643 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index bdd8891c215a..88943d90c765 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
557 if (data && is_valid_ether_addr(data->mac_addr)) { 557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else { 559 } else {
560 eth_random_addr(ndev->dev_addr); 560 eth_hw_addr_random(ndev);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 } 561 }
563 562
564 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 563 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 95ceb3593043..5fd6f4674326 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -35,6 +35,7 @@ struct hv_netvsc_packet;
35/* Represent the xfer page packet which contains 1 or more netvsc packet */ 35/* Represent the xfer page packet which contains 1 or more netvsc packet */
36struct xferpage_packet { 36struct xferpage_packet {
37 struct list_head list_ent; 37 struct list_head list_ent;
38 u32 status;
38 39
39 /* # of netvsc packets this xfer packet contains */ 40 /* # of netvsc packets this xfer packet contains */
40 u32 count; 41 u32 count;
@@ -47,6 +48,7 @@ struct xferpage_packet {
47struct hv_netvsc_packet { 48struct hv_netvsc_packet {
48 /* Bookkeeping stuff */ 49 /* Bookkeeping stuff */
49 struct list_head list_ent; 50 struct list_head list_ent;
51 u32 status;
50 52
51 struct hv_device *device; 53 struct hv_device *device;
52 bool is_data_pkt; 54 bool is_data_pkt;
@@ -465,8 +467,6 @@ struct nvsp_message {
465 467
466#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 468#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
467 469
468#define NETVSC_RECEIVE_SG_COUNT 1
469
470/* Preallocated receive packets */ 470/* Preallocated receive packets */
471#define NETVSC_RECEIVE_PACKETLIST_COUNT 256 471#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
472 472
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4a1a5f58fa73..1cd77483da50 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -558,7 +558,7 @@ int netvsc_send(struct hv_device *device,
558} 558}
559 559
560static void netvsc_send_recv_completion(struct hv_device *device, 560static void netvsc_send_recv_completion(struct hv_device *device,
561 u64 transaction_id) 561 u64 transaction_id, u32 status)
562{ 562{
563 struct nvsp_message recvcompMessage; 563 struct nvsp_message recvcompMessage;
564 int retries = 0; 564 int retries = 0;
@@ -571,9 +571,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
571 recvcompMessage.hdr.msg_type = 571 recvcompMessage.hdr.msg_type =
572 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; 572 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
573 573
574 /* FIXME: Pass in the status */ 574 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
575 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
576 NVSP_STAT_SUCCESS;
577 575
578retry_send_cmplt: 576retry_send_cmplt:
579 /* Send the completion */ 577 /* Send the completion */
@@ -613,6 +611,7 @@ static void netvsc_receive_completion(void *context)
613 bool fsend_receive_comp = false; 611 bool fsend_receive_comp = false;
614 unsigned long flags; 612 unsigned long flags;
615 struct net_device *ndev; 613 struct net_device *ndev;
614 u32 status = NVSP_STAT_NONE;
616 615
617 /* 616 /*
618 * Even though it seems logical to do a GetOutboundNetDevice() here to 617 * Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -627,6 +626,9 @@ static void netvsc_receive_completion(void *context)
627 /* Overloading use of the lock. */ 626 /* Overloading use of the lock. */
628 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags); 627 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
629 628
629 if (packet->status != NVSP_STAT_SUCCESS)
630 packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
631
630 packet->xfer_page_pkt->count--; 632 packet->xfer_page_pkt->count--;
631 633
632 /* 634 /*
@@ -636,6 +638,7 @@ static void netvsc_receive_completion(void *context)
636 if (packet->xfer_page_pkt->count == 0) { 638 if (packet->xfer_page_pkt->count == 0) {
637 fsend_receive_comp = true; 639 fsend_receive_comp = true;
638 transaction_id = packet->completion.recv.recv_completion_tid; 640 transaction_id = packet->completion.recv.recv_completion_tid;
641 status = packet->xfer_page_pkt->status;
639 list_add_tail(&packet->xfer_page_pkt->list_ent, 642 list_add_tail(&packet->xfer_page_pkt->list_ent,
640 &net_device->recv_pkt_list); 643 &net_device->recv_pkt_list);
641 644
@@ -647,7 +650,7 @@ static void netvsc_receive_completion(void *context)
647 650
648 /* Send a receive completion for the xfer page packet */ 651 /* Send a receive completion for the xfer page packet */
649 if (fsend_receive_comp) 652 if (fsend_receive_comp)
650 netvsc_send_recv_completion(device, transaction_id); 653 netvsc_send_recv_completion(device, transaction_id, status);
651 654
652} 655}
653 656
@@ -736,7 +739,8 @@ static void netvsc_receive(struct hv_device *device,
736 flags); 739 flags);
737 740
738 netvsc_send_recv_completion(device, 741 netvsc_send_recv_completion(device,
739 vmxferpage_packet->d.trans_id); 742 vmxferpage_packet->d.trans_id,
743 NVSP_STAT_FAIL);
740 744
741 return; 745 return;
742 } 746 }
@@ -744,6 +748,7 @@ static void netvsc_receive(struct hv_device *device,
744 /* Remove the 1st packet to represent the xfer page packet itself */ 748 /* Remove the 1st packet to represent the xfer page packet itself */
745 xferpage_packet = (struct xferpage_packet *)listHead.next; 749 xferpage_packet = (struct xferpage_packet *)listHead.next;
746 list_del(&xferpage_packet->list_ent); 750 list_del(&xferpage_packet->list_ent);
751 xferpage_packet->status = NVSP_STAT_SUCCESS;
747 752
748 /* This is how much we can satisfy */ 753 /* This is how much we can satisfy */
749 xferpage_packet->count = count - 1; 754 xferpage_packet->count = count - 1;
@@ -760,6 +765,7 @@ static void netvsc_receive(struct hv_device *device,
760 list_del(&netvsc_packet->list_ent); 765 list_del(&netvsc_packet->list_ent);
761 766
762 /* Initialize the netvsc packet */ 767 /* Initialize the netvsc packet */
768 netvsc_packet->status = NVSP_STAT_SUCCESS;
763 netvsc_packet->xfer_page_pkt = xferpage_packet; 769 netvsc_packet->xfer_page_pkt = xferpage_packet;
764 netvsc_packet->completion.recv.recv_completion = 770 netvsc_packet->completion.recv.recv_completion =
765 netvsc_receive_completion; 771 netvsc_receive_completion;
@@ -904,9 +910,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
904 INIT_LIST_HEAD(&net_device->recv_pkt_list); 910 INIT_LIST_HEAD(&net_device->recv_pkt_list);
905 911
906 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) { 912 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
907 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 913 packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
908 (NETVSC_RECEIVE_SG_COUNT *
909 sizeof(struct hv_page_buffer)), GFP_KERNEL);
910 if (!packet) 914 if (!packet)
911 break; 915 break;
912 916
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8c5a1c43c81d..f825a629a699 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -265,6 +265,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
265 if (!net) { 265 if (!net) {
266 netdev_err(net, "got receive callback but net device" 266 netdev_err(net, "got receive callback but net device"
267 " not initialized yet\n"); 267 " not initialized yet\n");
268 packet->status = NVSP_STAT_FAIL;
268 return 0; 269 return 0;
269 } 270 }
270 271
@@ -272,6 +273,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
272 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); 273 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
273 if (unlikely(!skb)) { 274 if (unlikely(!skb)) {
274 ++net->stats.rx_dropped; 275 ++net->stats.rx_dropped;
276 packet->status = NVSP_STAT_FAIL;
275 return 0; 277 return 0;
276 } 278 }
277 279
@@ -400,7 +402,7 @@ static void netvsc_send_garp(struct work_struct *w)
400 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 402 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
401 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 403 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
402 net = net_device->ndev; 404 net = net_device->ndev;
403 netif_notify_peers(net); 405 netdev_notify_peers(net);
404} 406}
405 407
406 408
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1e88a1095934..928148cc3220 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -32,23 +32,31 @@
32#include "hyperv_net.h" 32#include "hyperv_net.h"
33 33
34 34
35#define RNDIS_EXT_LEN 100
35struct rndis_request { 36struct rndis_request {
36 struct list_head list_ent; 37 struct list_head list_ent;
37 struct completion wait_event; 38 struct completion wait_event;
38 39
40 struct rndis_message response_msg;
39 /* 41 /*
40 * FIXME: We assumed a fixed size response here. If we do ever need to 42 * The buffer for extended info after the RNDIS response message. It's
41 * handle a bigger response, we can either define a max response 43 * referenced based on the data offset in the RNDIS message. Its size
42 * message or add a response buffer variable above this field 44 * is enough for current needs, and should be sufficient for the near
45 * future.
43 */ 46 */
44 struct rndis_message response_msg; 47 u8 response_ext[RNDIS_EXT_LEN];
45 48
46 /* Simplify allocation by having a netvsc packet inline */ 49 /* Simplify allocation by having a netvsc packet inline */
47 struct hv_netvsc_packet pkt; 50 struct hv_netvsc_packet pkt;
48 struct hv_page_buffer buf; 51 /* Set 2 pages for rndis requests crossing page boundary */
49 /* FIXME: We assumed a fixed size request here. */ 52 struct hv_page_buffer buf[2];
53
50 struct rndis_message request_msg; 54 struct rndis_message request_msg;
51 u8 ext[100]; 55 /*
56 * The buffer for the extended info after the RNDIS request message.
57 * It is referenced and sized in a similar way as response_ext.
58 */
59 u8 request_ext[RNDIS_EXT_LEN];
52}; 60};
53 61
54static void rndis_filter_send_completion(void *ctx); 62static void rndis_filter_send_completion(void *ctx);
@@ -221,6 +229,18 @@ static int rndis_filter_send_request(struct rndis_device *dev,
221 packet->page_buf[0].offset = 229 packet->page_buf[0].offset =
222 (unsigned long)&req->request_msg & (PAGE_SIZE - 1); 230 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
223 231
232 /* Add one page_buf when request_msg crossing page boundary */
233 if (packet->page_buf[0].offset + packet->page_buf[0].len > PAGE_SIZE) {
234 packet->page_buf_cnt++;
235 packet->page_buf[0].len = PAGE_SIZE -
236 packet->page_buf[0].offset;
237 packet->page_buf[1].pfn = virt_to_phys((void *)&req->request_msg
238 + packet->page_buf[0].len) >> PAGE_SHIFT;
239 packet->page_buf[1].offset = 0;
240 packet->page_buf[1].len = req->request_msg.msg_len -
241 packet->page_buf[0].len;
242 }
243
224 packet->completion.send.send_completion_ctx = req;/* packet; */ 244 packet->completion.send.send_completion_ctx = req;/* packet; */
225 packet->completion.send.send_completion = 245 packet->completion.send.send_completion =
226 rndis_filter_send_request_completion; 246 rndis_filter_send_request_completion;
@@ -255,7 +275,8 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
255 spin_unlock_irqrestore(&dev->request_lock, flags); 275 spin_unlock_irqrestore(&dev->request_lock, flags);
256 276
257 if (found) { 277 if (found) {
258 if (resp->msg_len <= sizeof(struct rndis_message)) { 278 if (resp->msg_len <=
279 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
259 memcpy(&request->response_msg, resp, 280 memcpy(&request->response_msg, resp,
260 resp->msg_len); 281 resp->msg_len);
261 } else { 282 } else {
@@ -392,9 +413,12 @@ int rndis_filter_receive(struct hv_device *dev,
392 struct rndis_device *rndis_dev; 413 struct rndis_device *rndis_dev;
393 struct rndis_message *rndis_msg; 414 struct rndis_message *rndis_msg;
394 struct net_device *ndev; 415 struct net_device *ndev;
416 int ret = 0;
395 417
396 if (!net_dev) 418 if (!net_dev) {
397 return -EINVAL; 419 ret = -EINVAL;
420 goto exit;
421 }
398 422
399 ndev = net_dev->ndev; 423 ndev = net_dev->ndev;
400 424
@@ -402,14 +426,16 @@ int rndis_filter_receive(struct hv_device *dev,
402 if (!net_dev->extension) { 426 if (!net_dev->extension) {
403 netdev_err(ndev, "got rndis message but no rndis device - " 427 netdev_err(ndev, "got rndis message but no rndis device - "
404 "dropping this message!\n"); 428 "dropping this message!\n");
405 return -ENODEV; 429 ret = -ENODEV;
430 goto exit;
406 } 431 }
407 432
408 rndis_dev = (struct rndis_device *)net_dev->extension; 433 rndis_dev = (struct rndis_device *)net_dev->extension;
409 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { 434 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
410 netdev_err(ndev, "got rndis message but rndis device " 435 netdev_err(ndev, "got rndis message but rndis device "
411 "uninitialized...dropping this message!\n"); 436 "uninitialized...dropping this message!\n");
412 return -ENODEV; 437 ret = -ENODEV;
438 goto exit;
413 } 439 }
414 440
415 rndis_msg = pkt->data; 441 rndis_msg = pkt->data;
@@ -441,7 +467,11 @@ int rndis_filter_receive(struct hv_device *dev,
441 break; 467 break;
442 } 468 }
443 469
444 return 0; 470exit:
471 if (ret != 0)
472 pkt->status = NVSP_STAT_FAIL;
473
474 return ret;
445} 475}
446 476
447static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, 477static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -641,6 +671,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
641 if (t == 0) { 671 if (t == 0) {
642 netdev_err(ndev, 672 netdev_err(ndev,
643 "timeout before we got a set response...\n"); 673 "timeout before we got a set response...\n");
674 ret = -ETIMEDOUT;
644 /* 675 /*
645 * We can't deallocate the request since we may still receive a 676 * We can't deallocate the request since we may still receive a
646 * send completion for it. 677 * send completion for it.
@@ -678,8 +709,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
678 init = &request->request_msg.msg.init_req; 709 init = &request->request_msg.msg.init_req;
679 init->major_ver = RNDIS_MAJOR_VERSION; 710 init->major_ver = RNDIS_MAJOR_VERSION;
680 init->minor_ver = RNDIS_MINOR_VERSION; 711 init->minor_ver = RNDIS_MINOR_VERSION;
681 /* FIXME: Use 1536 - rounded ethernet frame size */ 712 init->max_xfer_size = 0x4000;
682 init->max_xfer_size = 2048;
683 713
684 dev->state = RNDIS_DEV_INITIALIZING; 714 dev->state = RNDIS_DEV_INITIALIZING;
685 715
diff --git a/drivers/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1fc4eefc20ed..08ae4655423a 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -34,3 +34,14 @@ config IEEE802154_AT86RF230
34 depends on IEEE802154_DRIVERS && MAC802154 34 depends on IEEE802154_DRIVERS && MAC802154
35 tristate "AT86RF230/231 transceiver driver" 35 tristate "AT86RF230/231 transceiver driver"
36 depends on SPI 36 depends on SPI
37
38config IEEE802154_MRF24J40
39 tristate "Microchip MRF24J40 transceiver driver"
40 depends on IEEE802154_DRIVERS && MAC802154
41 depends on SPI
42 ---help---
43 Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
44 controller.
45
46 This driver can also be built as a module. To do so, say M here.
47 the module will be called 'mrf24j40'.
diff --git a/drivers/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 4f4371d3aa7d..abb0c08decb0 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o 1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o 2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
3obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o 3obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
4obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 5d309408395d..ba753d87a32f 100644
--- a/drivers/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
952 .resume = at86rf230_resume, 952 .resume = at86rf230_resume,
953}; 953};
954 954
955static int __init at86rf230_init(void) 955module_spi_driver(at86rf230_driver);
956{
957 return spi_register_driver(&at86rf230_driver);
958}
959module_init(at86rf230_init);
960
961static void __exit at86rf230_exit(void)
962{
963 spi_unregister_driver(&at86rf230_driver);
964}
965module_exit(at86rf230_exit);
966 956
967MODULE_DESCRIPTION("AT86RF230 Transceiver Driver"); 957MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
968MODULE_LICENSE("GPL v2"); 958MODULE_LICENSE("GPL v2");
diff --git a/drivers/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 73d453159408..7d39add7d467 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -446,4 +446,3 @@ static __exit void fake_exit(void)
446module_init(fake_init); 446module_init(fake_init);
447module_exit(fake_exit); 447module_exit(fake_exit);
448MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
449
diff --git a/drivers/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index e7456fcd0913..e7456fcd0913 100644
--- a/drivers/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
new file mode 100644
index 000000000000..0e53d4f431d2
--- /dev/null
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -0,0 +1,767 @@
1/*
2 * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller
3 *
4 * Copyright (C) 2012 Alan Ott <alan@signal11.us>
5 * Signal 11 Software
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/spi/spi.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <net/wpan-phy.h>
26#include <net/mac802154.h>
27
28/* MRF24J40 Short Address Registers */
29#define REG_RXMCR 0x00 /* Receive MAC control */
30#define REG_PANIDL 0x01 /* PAN ID (low) */
31#define REG_PANIDH 0x02 /* PAN ID (high) */
32#define REG_SADRL 0x03 /* Short address (low) */
33#define REG_SADRH 0x04 /* Short address (high) */
34#define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */
35#define REG_TXMCR 0x11 /* Transmit MAC control */
36#define REG_PACON0 0x16 /* Power Amplifier Control */
37#define REG_PACON1 0x17 /* Power Amplifier Control */
38#define REG_PACON2 0x18 /* Power Amplifier Control */
39#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */
40#define REG_TXSTAT 0x24 /* TX MAC Status Register */
41#define REG_SOFTRST 0x2A /* Soft Reset */
42#define REG_TXSTBL 0x2E /* TX Stabilization */
43#define REG_INTSTAT 0x31 /* Interrupt Status */
44#define REG_INTCON 0x32 /* Interrupt Control */
45#define REG_RFCTL 0x36 /* RF Control Mode Register */
46#define REG_BBREG1 0x39 /* Baseband Registers */
47#define REG_BBREG2 0x3A /* */
48#define REG_BBREG6 0x3E /* */
49#define REG_CCAEDTH 0x3F /* Energy Detection Threshold */
50
51/* MRF24J40 Long Address Registers */
52#define REG_RFCON0 0x200 /* RF Control Registers */
53#define REG_RFCON1 0x201
54#define REG_RFCON2 0x202
55#define REG_RFCON3 0x203
56#define REG_RFCON5 0x205
57#define REG_RFCON6 0x206
58#define REG_RFCON7 0x207
59#define REG_RFCON8 0x208
60#define REG_RSSI 0x210
61#define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */
62#define REG_SLPCON1 0x220
63#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */
64#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */
65#define REG_RX_FIFO 0x300 /* Receive FIFO */
66
67/* Device configuration: Only channels 11-26 on page 0 are supported. */
68#define MRF24J40_CHAN_MIN 11
69#define MRF24J40_CHAN_MAX 26
70#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \
71 - ((u32)1 << MRF24J40_CHAN_MIN))
72
73#define TX_FIFO_SIZE 128 /* From datasheet */
74#define RX_FIFO_SIZE 144 /* From datasheet */
75#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
76
77/* Device Private Data */
78struct mrf24j40 {
79 struct spi_device *spi;
80 struct ieee802154_dev *dev;
81
82 struct mutex buffer_mutex; /* only used to protect buf */
83 struct completion tx_complete;
84 struct work_struct irqwork;
85 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
86};
87
88/* Read/Write SPI Commands for Short and Long Address registers. */
89#define MRF24J40_READSHORT(reg) ((reg) << 1)
90#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
91#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
92#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
93
94/* Maximum speed to run the device at. TODO: Get the real max value from
95 * someone at Microchip since it isn't in the datasheet. */
96#define MAX_SPI_SPEED_HZ 1000000
97
98#define printdev(X) (&X->spi->dev)
99
100static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
101{
102 int ret;
103 struct spi_message msg;
104 struct spi_transfer xfer = {
105 .len = 2,
106 .tx_buf = devrec->buf,
107 .rx_buf = devrec->buf,
108 };
109
110 spi_message_init(&msg);
111 spi_message_add_tail(&xfer, &msg);
112
113 mutex_lock(&devrec->buffer_mutex);
114 devrec->buf[0] = MRF24J40_WRITESHORT(reg);
115 devrec->buf[1] = value;
116
117 ret = spi_sync(devrec->spi, &msg);
118 if (ret)
119 dev_err(printdev(devrec),
120 "SPI write Failed for short register 0x%hhx\n", reg);
121
122 mutex_unlock(&devrec->buffer_mutex);
123 return ret;
124}
125
126static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
127{
128 int ret = -1;
129 struct spi_message msg;
130 struct spi_transfer xfer = {
131 .len = 2,
132 .tx_buf = devrec->buf,
133 .rx_buf = devrec->buf,
134 };
135
136 spi_message_init(&msg);
137 spi_message_add_tail(&xfer, &msg);
138
139 mutex_lock(&devrec->buffer_mutex);
140 devrec->buf[0] = MRF24J40_READSHORT(reg);
141 devrec->buf[1] = 0;
142
143 ret = spi_sync(devrec->spi, &msg);
144 if (ret)
145 dev_err(printdev(devrec),
146 "SPI read Failed for short register 0x%hhx\n", reg);
147 else
148 *val = devrec->buf[1];
149
150 mutex_unlock(&devrec->buffer_mutex);
151 return ret;
152}
153
154static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
155{
156 int ret;
157 u16 cmd;
158 struct spi_message msg;
159 struct spi_transfer xfer = {
160 .len = 3,
161 .tx_buf = devrec->buf,
162 .rx_buf = devrec->buf,
163 };
164
165 spi_message_init(&msg);
166 spi_message_add_tail(&xfer, &msg);
167
168 cmd = MRF24J40_READLONG(reg);
169 mutex_lock(&devrec->buffer_mutex);
170 devrec->buf[0] = cmd >> 8 & 0xff;
171 devrec->buf[1] = cmd & 0xff;
172 devrec->buf[2] = 0;
173
174 ret = spi_sync(devrec->spi, &msg);
175 if (ret)
176 dev_err(printdev(devrec),
177 "SPI read Failed for long register 0x%hx\n", reg);
178 else
179 *value = devrec->buf[2];
180
181 mutex_unlock(&devrec->buffer_mutex);
182 return ret;
183}
184
185static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
186{
187 int ret;
188 u16 cmd;
189 struct spi_message msg;
190 struct spi_transfer xfer = {
191 .len = 3,
192 .tx_buf = devrec->buf,
193 .rx_buf = devrec->buf,
194 };
195
196 spi_message_init(&msg);
197 spi_message_add_tail(&xfer, &msg);
198
199 cmd = MRF24J40_WRITELONG(reg);
200 mutex_lock(&devrec->buffer_mutex);
201 devrec->buf[0] = cmd >> 8 & 0xff;
202 devrec->buf[1] = cmd & 0xff;
203 devrec->buf[2] = val;
204
205 ret = spi_sync(devrec->spi, &msg);
206 if (ret)
207 dev_err(printdev(devrec),
208 "SPI write Failed for long register 0x%hx\n", reg);
209
210 mutex_unlock(&devrec->buffer_mutex);
211 return ret;
212}
213
214/* This function relies on an undocumented write method. Once a write command
215 and address is set, as many bytes of data as desired can be clocked into
216 the device. The datasheet only shows setting one byte at a time. */
217static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
218 const u8 *data, size_t length)
219{
220 int ret;
221 u16 cmd;
222 u8 lengths[2];
223 struct spi_message msg;
224 struct spi_transfer addr_xfer = {
225 .len = 2,
226 .tx_buf = devrec->buf,
227 };
228 struct spi_transfer lengths_xfer = {
229 .len = 2,
230 .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
231 };
232 struct spi_transfer data_xfer = {
233 .len = length,
234 .tx_buf = data,
235 };
236
237 /* Range check the length. 2 bytes are used for the length fields.*/
238 if (length > TX_FIFO_SIZE-2) {
239 dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n");
240 length = TX_FIFO_SIZE-2;
241 }
242
243 spi_message_init(&msg);
244 spi_message_add_tail(&addr_xfer, &msg);
245 spi_message_add_tail(&lengths_xfer, &msg);
246 spi_message_add_tail(&data_xfer, &msg);
247
248 cmd = MRF24J40_WRITELONG(reg);
249 mutex_lock(&devrec->buffer_mutex);
250 devrec->buf[0] = cmd >> 8 & 0xff;
251 devrec->buf[1] = cmd & 0xff;
252 lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
253 lengths[1] = length; /* Total length */
254
255 ret = spi_sync(devrec->spi, &msg);
256 if (ret)
257 dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
258
259 mutex_unlock(&devrec->buffer_mutex);
260 return ret;
261}
262
263static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
264 u8 *data, u8 *len, u8 *lqi)
265{
266 u8 rx_len;
267 u8 addr[2];
268 u8 lqi_rssi[2];
269 u16 cmd;
270 int ret;
271 struct spi_message msg;
272 struct spi_transfer addr_xfer = {
273 .len = 2,
274 .tx_buf = &addr,
275 };
276 struct spi_transfer data_xfer = {
277 .len = 0x0, /* set below */
278 .rx_buf = data,
279 };
280 struct spi_transfer status_xfer = {
281 .len = 2,
282 .rx_buf = &lqi_rssi,
283 };
284
285 /* Get the length of the data in the RX FIFO. The length in this
286 * register exclues the 1-byte length field at the beginning. */
287 ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
288 if (ret)
289 goto out;
290
291 /* Range check the RX FIFO length, accounting for the one-byte
292 * length field at the begining. */
293 if (rx_len > RX_FIFO_SIZE-1) {
294 dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
295 rx_len = RX_FIFO_SIZE-1;
296 }
297
298 if (rx_len > *len) {
299 /* Passed in buffer wasn't big enough. Should never happen. */
300 dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
301 rx_len = *len;
302 }
303
304 /* Set up the commands to read the data. */
305 cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
306 addr[0] = cmd >> 8 & 0xff;
307 addr[1] = cmd & 0xff;
308 data_xfer.len = rx_len;
309
310 spi_message_init(&msg);
311 spi_message_add_tail(&addr_xfer, &msg);
312 spi_message_add_tail(&data_xfer, &msg);
313 spi_message_add_tail(&status_xfer, &msg);
314
315 ret = spi_sync(devrec->spi, &msg);
316 if (ret) {
317 dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
318 goto out;
319 }
320
321 *lqi = lqi_rssi[0];
322 *len = rx_len;
323
324#ifdef DEBUG
325 print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
326 DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
327 printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
328 lqi_rssi[0], lqi_rssi[1]);
329#endif
330
331out:
332 return ret;
333}
334
335static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
336{
337 struct mrf24j40 *devrec = dev->priv;
338 u8 val;
339 int ret = 0;
340
341 dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
342
343 ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
344 if (ret)
345 goto err;
346
347 /* Set TXNTRIG bit of TXNCON to send packet */
348 ret = read_short_reg(devrec, REG_TXNCON, &val);
349 if (ret)
350 goto err;
351 val |= 0x1;
352 val &= ~0x4;
353 write_short_reg(devrec, REG_TXNCON, val);
354
355 INIT_COMPLETION(devrec->tx_complete);
356
357 /* Wait for the device to send the TX complete interrupt. */
358 ret = wait_for_completion_interruptible_timeout(
359 &devrec->tx_complete,
360 5 * HZ);
361 if (ret == -ERESTARTSYS)
362 goto err;
363 if (ret == 0) {
364 ret = -ETIMEDOUT;
365 goto err;
366 }
367
368 /* Check for send error from the device. */
369 ret = read_short_reg(devrec, REG_TXSTAT, &val);
370 if (ret)
371 goto err;
372 if (val & 0x1) {
373 dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n");
374 ret = -ECOMM; /* TODO: Better error code ? */
375 } else
376 dev_dbg(printdev(devrec), "Packet Sent\n");
377
378err:
379
380 return ret;
381}
382
383static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
384{
385 /* TODO: */
386 printk(KERN_WARNING "mrf24j40: ed not implemented\n");
387 *level = 0;
388 return 0;
389}
390
391static int mrf24j40_start(struct ieee802154_dev *dev)
392{
393 struct mrf24j40 *devrec = dev->priv;
394 u8 val;
395 int ret;
396
397 dev_dbg(printdev(devrec), "start\n");
398
399 ret = read_short_reg(devrec, REG_INTCON, &val);
400 if (ret)
401 return ret;
402 val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
403 write_short_reg(devrec, REG_INTCON, val);
404
405 return 0;
406}
407
408static void mrf24j40_stop(struct ieee802154_dev *dev)
409{
410 struct mrf24j40 *devrec = dev->priv;
411 u8 val;
412 int ret;
413 dev_dbg(printdev(devrec), "stop\n");
414
415 ret = read_short_reg(devrec, REG_INTCON, &val);
416 if (ret)
417 return;
418 val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
419 write_short_reg(devrec, REG_INTCON, val);
420
421 return;
422}
423
424static int mrf24j40_set_channel(struct ieee802154_dev *dev,
425 int page, int channel)
426{
427 struct mrf24j40 *devrec = dev->priv;
428 u8 val;
429 int ret;
430
431 dev_dbg(printdev(devrec), "Set Channel %d\n", channel);
432
433 WARN_ON(page != 0);
434 WARN_ON(channel < MRF24J40_CHAN_MIN);
435 WARN_ON(channel > MRF24J40_CHAN_MAX);
436
437 /* Set Channel TODO */
438 val = (channel-11) << 4 | 0x03;
439 write_long_reg(devrec, REG_RFCON0, val);
440
441 /* RF Reset */
442 ret = read_short_reg(devrec, REG_RFCTL, &val);
443 if (ret)
444 return ret;
445 val |= 0x04;
446 write_short_reg(devrec, REG_RFCTL, val);
447 val &= ~0x04;
448 write_short_reg(devrec, REG_RFCTL, val);
449
450 udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
451
452 return 0;
453}
454
455static int mrf24j40_filter(struct ieee802154_dev *dev,
456 struct ieee802154_hw_addr_filt *filt,
457 unsigned long changed)
458{
459 struct mrf24j40 *devrec = dev->priv;
460
461 dev_dbg(printdev(devrec), "filter\n");
462
463 if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
464 /* Short Addr */
465 u8 addrh, addrl;
466 addrh = filt->short_addr >> 8 & 0xff;
467 addrl = filt->short_addr & 0xff;
468
469 write_short_reg(devrec, REG_SADRH, addrh);
470 write_short_reg(devrec, REG_SADRL, addrl);
471 dev_dbg(printdev(devrec),
472 "Set short addr to %04hx\n", filt->short_addr);
473 }
474
475 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
476 /* Device Address */
477 int i;
478 for (i = 0; i < 8; i++)
479 write_short_reg(devrec, REG_EADR0+i,
480 filt->ieee_addr[i]);
481
482#ifdef DEBUG
483 printk(KERN_DEBUG "Set long addr to: ");
484 for (i = 0; i < 8; i++)
485 printk("%02hhx ", filt->ieee_addr[i]);
486 printk(KERN_DEBUG "\n");
487#endif
488 }
489
490 if (changed & IEEE802515_AFILT_PANID_CHANGED) {
491 /* PAN ID */
492 u8 panidl, panidh;
493 panidh = filt->pan_id >> 8 & 0xff;
494 panidl = filt->pan_id & 0xff;
495 write_short_reg(devrec, REG_PANIDH, panidh);
496 write_short_reg(devrec, REG_PANIDL, panidl);
497
498 dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
499 }
500
501 if (changed & IEEE802515_AFILT_PANC_CHANGED) {
502 /* Pan Coordinator */
503 u8 val;
504 int ret;
505
506 ret = read_short_reg(devrec, REG_RXMCR, &val);
507 if (ret)
508 return ret;
509 if (filt->pan_coord)
510 val |= 0x8;
511 else
512 val &= ~0x8;
513 write_short_reg(devrec, REG_RXMCR, val);
514
515 /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
516 * REG_ORDER is maintained as default (no beacon/superframe).
517 */
518
519 dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
520 filt->pan_coord ? "on" : "off");
521 }
522
523 return 0;
524}
525
526static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
527{
528 u8 len = RX_FIFO_SIZE;
529 u8 lqi = 0;
530 u8 val;
531 int ret = 0;
532 struct sk_buff *skb;
533
534 /* Turn off reception of packets off the air. This prevents the
535 * device from overwriting the buffer while we're reading it. */
536 ret = read_short_reg(devrec, REG_BBREG1, &val);
537 if (ret)
538 goto out;
539 val |= 4; /* SET RXDECINV */
540 write_short_reg(devrec, REG_BBREG1, val);
541
542 skb = alloc_skb(len, GFP_KERNEL);
543 if (!skb) {
544 ret = -ENOMEM;
545 goto out;
546 }
547
548 ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
549 if (ret < 0) {
550 dev_err(printdev(devrec), "Failure reading RX FIFO\n");
551 kfree_skb(skb);
552 ret = -EINVAL;
553 goto out;
554 }
555
556 /* Cut off the checksum */
557 skb_trim(skb, len-2);
558
559 /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
560 * also from a workqueue). I think irqsafe is not necessary here.
561 * Can someone confirm? */
562 ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
563
564 dev_dbg(printdev(devrec), "RX Handled\n");
565
566out:
567 /* Turn back on reception of packets off the air. */
568 ret = read_short_reg(devrec, REG_BBREG1, &val);
569 if (ret)
570 return ret;
571 val &= ~0x4; /* Clear RXDECINV */
572 write_short_reg(devrec, REG_BBREG1, val);
573
574 return ret;
575}
576
577static struct ieee802154_ops mrf24j40_ops = {
578 .owner = THIS_MODULE,
579 .xmit = mrf24j40_tx,
580 .ed = mrf24j40_ed,
581 .start = mrf24j40_start,
582 .stop = mrf24j40_stop,
583 .set_channel = mrf24j40_set_channel,
584 .set_hw_addr_filt = mrf24j40_filter,
585};
586
587static irqreturn_t mrf24j40_isr(int irq, void *data)
588{
589 struct mrf24j40 *devrec = data;
590
591 disable_irq_nosync(irq);
592
593 schedule_work(&devrec->irqwork);
594
595 return IRQ_HANDLED;
596}
597
598static void mrf24j40_isrwork(struct work_struct *work)
599{
600 struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
601 u8 intstat;
602 int ret;
603
604 /* Read the interrupt status */
605 ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
606 if (ret)
607 goto out;
608
609 /* Check for TX complete */
610 if (intstat & 0x1)
611 complete(&devrec->tx_complete);
612
613 /* Check for Rx */
614 if (intstat & 0x8)
615 mrf24j40_handle_rx(devrec);
616
617out:
618 enable_irq(devrec->spi->irq);
619}
620
621static int __devinit mrf24j40_probe(struct spi_device *spi)
622{
623 int ret = -ENOMEM;
624 u8 val;
625 struct mrf24j40 *devrec;
626
627 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
628
629 devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
630 if (!devrec)
631 goto err_devrec;
632 devrec->buf = kzalloc(3, GFP_KERNEL);
633 if (!devrec->buf)
634 goto err_buf;
635
636 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
637 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
638 spi->max_speed_hz = MAX_SPI_SPEED_HZ;
639
640 mutex_init(&devrec->buffer_mutex);
641 init_completion(&devrec->tx_complete);
642 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
643 devrec->spi = spi;
644 dev_set_drvdata(&spi->dev, devrec);
645
646 /* Register with the 802154 subsystem */
647
648 devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
649 if (!devrec->dev)
650 goto err_alloc_dev;
651
652 devrec->dev->priv = devrec;
653 devrec->dev->parent = &devrec->spi->dev;
654 devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
655 devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
656
657 dev_dbg(printdev(devrec), "registered mrf24j40\n");
658 ret = ieee802154_register_device(devrec->dev);
659 if (ret)
660 goto err_register_device;
661
662 /* Initialize the device.
663 From datasheet section 3.2: Initialization. */
664 write_short_reg(devrec, REG_SOFTRST, 0x07);
665 write_short_reg(devrec, REG_PACON2, 0x98);
666 write_short_reg(devrec, REG_TXSTBL, 0x95);
667 write_long_reg(devrec, REG_RFCON0, 0x03);
668 write_long_reg(devrec, REG_RFCON1, 0x01);
669 write_long_reg(devrec, REG_RFCON2, 0x80);
670 write_long_reg(devrec, REG_RFCON6, 0x90);
671 write_long_reg(devrec, REG_RFCON7, 0x80);
672 write_long_reg(devrec, REG_RFCON8, 0x10);
673 write_long_reg(devrec, REG_SLPCON1, 0x21);
674 write_short_reg(devrec, REG_BBREG2, 0x80);
675 write_short_reg(devrec, REG_CCAEDTH, 0x60);
676 write_short_reg(devrec, REG_BBREG6, 0x40);
677 write_short_reg(devrec, REG_RFCTL, 0x04);
678 write_short_reg(devrec, REG_RFCTL, 0x0);
679 udelay(192);
680
681 /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
682 ret = read_short_reg(devrec, REG_RXMCR, &val);
683 if (ret)
684 goto err_read_reg;
685 val &= ~0x3; /* Clear RX mode (normal) */
686 write_short_reg(devrec, REG_RXMCR, val);
687
688 ret = request_irq(spi->irq,
689 mrf24j40_isr,
690 IRQF_TRIGGER_FALLING,
691 dev_name(&spi->dev),
692 devrec);
693
694 if (ret) {
695 dev_err(printdev(devrec), "Unable to get IRQ");
696 goto err_irq;
697 }
698
699 return 0;
700
701err_irq:
702err_read_reg:
703 ieee802154_unregister_device(devrec->dev);
704err_register_device:
705 ieee802154_free_device(devrec->dev);
706err_alloc_dev:
707 kfree(devrec->buf);
708err_buf:
709 kfree(devrec);
710err_devrec:
711 return ret;
712}
713
714static int __devexit mrf24j40_remove(struct spi_device *spi)
715{
716 struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
717
718 dev_dbg(printdev(devrec), "remove\n");
719
720 free_irq(spi->irq, devrec);
721 flush_work_sync(&devrec->irqwork); /* TODO: Is this the right call? */
722 ieee802154_unregister_device(devrec->dev);
723 ieee802154_free_device(devrec->dev);
724 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
725 * complete? */
726
727 /* Clean up the SPI stuff. */
728 dev_set_drvdata(&spi->dev, NULL);
729 kfree(devrec->buf);
730 kfree(devrec);
731 return 0;
732}
733
734static const struct spi_device_id mrf24j40_ids[] = {
735 { "mrf24j40", 0 },
736 { "mrf24j40ma", 0 },
737 { },
738};
739MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
740
741static struct spi_driver mrf24j40_driver = {
742 .driver = {
743 .name = "mrf24j40",
744 .bus = &spi_bus_type,
745 .owner = THIS_MODULE,
746 },
747 .id_table = mrf24j40_ids,
748 .probe = mrf24j40_probe,
749 .remove = __devexit_p(mrf24j40_remove),
750};
751
752static int __init mrf24j40_init(void)
753{
754 return spi_register_driver(&mrf24j40_driver);
755}
756
757static void __exit mrf24j40_exit(void)
758{
759 spi_unregister_driver(&mrf24j40_driver);
760}
761
762module_init(mrf24j40_init);
763module_exit(mrf24j40_exit);
764
765MODULE_LICENSE("GPL");
766MODULE_AUTHOR("Alan Ott");
767MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index e2a06fd996d5..81f8f9e31db5 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -157,7 +157,7 @@ static const struct net_device_ops loopback_ops = {
157 */ 157 */
158static void loopback_setup(struct net_device *dev) 158static void loopback_setup(struct net_device *dev)
159{ 159{
160 dev->mtu = (16 * 1024) + 20 + 20 + 12; 160 dev->mtu = 64 * 1024;
161 dev->hard_header_len = ETH_HLEN; /* 14 */ 161 dev->hard_header_len = ETH_HLEN; /* 14 */
162 dev->addr_len = ETH_ALEN; /* 6 */ 162 dev->addr_len = ETH_ALEN; /* 6 */
163 dev->tx_queue_len = 0; 163 dev->tx_queue_len = 0;
@@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net)
197 if (err) 197 if (err)
198 goto out_free_netdev; 198 goto out_free_netdev;
199 199
200 BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
200 net->loopback_dev = dev; 201 net->loopback_dev = dev;
201 return 0; 202 return 0;
202 203
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 66a9bfe7b1c8..68a43fe602e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -546,9 +546,9 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
546 return 0; 546 return 0;
547} 547}
548 548
549static int macvlan_fdb_add(struct ndmsg *ndm, 549static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
550 struct net_device *dev, 550 struct net_device *dev,
551 unsigned char *addr, 551 const unsigned char *addr,
552 u16 flags) 552 u16 flags)
553{ 553{
554 struct macvlan_dev *vlan = netdev_priv(dev); 554 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,7 +567,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm,
567 567
568static int macvlan_fdb_del(struct ndmsg *ndm, 568static int macvlan_fdb_del(struct ndmsg *ndm,
569 struct net_device *dev, 569 struct net_device *dev,
570 unsigned char *addr) 570 const unsigned char *addr)
571{ 571{
572 struct macvlan_dev *vlan = netdev_priv(dev); 572 struct macvlan_dev *vlan = netdev_priv(dev);
573 int err = -EINVAL; 573 int err = -EINVAL;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3090dc65a6f1..983bbf4d5ef6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
159 several child MDIO busses to a parent bus. Child bus 159 several child MDIO busses to a parent bus. Child bus
160 selection is under the control of GPIO lines. 160 selection is under the control of GPIO lines.
161 161
162config MDIO_BUS_MUX_MMIOREG
163 tristate "Support for MMIO device-controlled MDIO bus multiplexers"
164 depends on OF_MDIO
165 select MDIO_BUS_MUX
166 help
167 This module provides a driver for MDIO bus multiplexers that
168 are controlled via a simple memory-mapped device, like an FPGA.
169 The multiplexer connects one of several child MDIO busses to a
170 parent bus. Child bus selection is under the control of one of
171 the FPGA's registers.
172
173 Currently, only 8-bit registers are supported.
174
162endif # PHYLIB 175endif # PHYLIB
163 176
164config MICREL_KS8995MA 177config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6d2dc6c94f2e..426674debae4 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
28obj-$(CONFIG_AMD_PHY) += amd.o 28obj-$(CONFIG_AMD_PHY) += amd.o
29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o 29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o 30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
31obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0da0226661f..24e05c43bff8 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -980,7 +980,7 @@ static int dp83640_probe(struct phy_device *phydev)
980 980
981 if (choose_this_phy(clock, phydev)) { 981 if (choose_this_phy(clock, phydev)) {
982 clock->chosen = dp83640; 982 clock->chosen = dp83640;
983 clock->ptp_clock = ptp_clock_register(&clock->caps); 983 clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev);
984 if (IS_ERR(clock->ptp_clock)) { 984 if (IS_ERR(clock->ptp_clock)) {
985 err = PTR_ERR(clock->ptp_clock); 985 err = PTR_ERR(clock->ptp_clock);
986 goto no_register; 986 goto no_register;
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6d1e3fcc43e2..ec40ba882f61 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -122,6 +122,123 @@ static int lxt971_config_intr(struct phy_device *phydev)
122 return err; 122 return err;
123} 123}
124 124
125/*
126 * A2 version of LXT973 chip has an ERRATA: it randomly return the contents
127 * of the previous even register when you read a odd register regularly
128 */
129
130static int lxt973a2_update_link(struct phy_device *phydev)
131{
132 int status;
133 int control;
134 int retry = 8; /* we try 8 times */
135
136 /* Do a fake read */
137 status = phy_read(phydev, MII_BMSR);
138
139 if (status < 0)
140 return status;
141
142 control = phy_read(phydev, MII_BMCR);
143 if (control < 0)
144 return control;
145
146 do {
147 /* Read link and autonegotiation status */
148 status = phy_read(phydev, MII_BMSR);
149 } while (status >= 0 && retry-- && status == control);
150
151 if (status < 0)
152 return status;
153
154 if ((status & BMSR_LSTATUS) == 0)
155 phydev->link = 0;
156 else
157 phydev->link = 1;
158
159 return 0;
160}
161
162int lxt973a2_read_status(struct phy_device *phydev)
163{
164 int adv;
165 int err;
166 int lpa;
167 int lpagb = 0;
168
169 /* Update the link, but return if there was an error */
170 err = lxt973a2_update_link(phydev);
171 if (err)
172 return err;
173
174 if (AUTONEG_ENABLE == phydev->autoneg) {
175 int retry = 1;
176
177 adv = phy_read(phydev, MII_ADVERTISE);
178
179 if (adv < 0)
180 return adv;
181
182 do {
183 lpa = phy_read(phydev, MII_LPA);
184
185 if (lpa < 0)
186 return lpa;
187
188 /* If both registers are equal, it is suspect but not
189 * impossible, hence a new try
190 */
191 } while (lpa == adv && retry--);
192
193 lpa &= adv;
194
195 phydev->speed = SPEED_10;
196 phydev->duplex = DUPLEX_HALF;
197 phydev->pause = phydev->asym_pause = 0;
198
199 if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
200 phydev->speed = SPEED_1000;
201
202 if (lpagb & LPA_1000FULL)
203 phydev->duplex = DUPLEX_FULL;
204 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
205 phydev->speed = SPEED_100;
206
207 if (lpa & LPA_100FULL)
208 phydev->duplex = DUPLEX_FULL;
209 } else {
210 if (lpa & LPA_10FULL)
211 phydev->duplex = DUPLEX_FULL;
212 }
213
214 if (phydev->duplex == DUPLEX_FULL) {
215 phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
216 phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
217 }
218 } else {
219 int bmcr = phy_read(phydev, MII_BMCR);
220
221 if (bmcr < 0)
222 return bmcr;
223
224 if (bmcr & BMCR_FULLDPLX)
225 phydev->duplex = DUPLEX_FULL;
226 else
227 phydev->duplex = DUPLEX_HALF;
228
229 if (bmcr & BMCR_SPEED1000)
230 phydev->speed = SPEED_1000;
231 else if (bmcr & BMCR_SPEED100)
232 phydev->speed = SPEED_100;
233 else
234 phydev->speed = SPEED_10;
235
236 phydev->pause = phydev->asym_pause = 0;
237 }
238
239 return 0;
240}
241
125static int lxt973_probe(struct phy_device *phydev) 242static int lxt973_probe(struct phy_device *phydev)
126{ 243{
127 int val = phy_read(phydev, MII_LXT973_PCR); 244 int val = phy_read(phydev, MII_LXT973_PCR);
@@ -175,6 +292,16 @@ static struct phy_driver lxt97x_driver[] = {
175 .driver = { .owner = THIS_MODULE,}, 292 .driver = { .owner = THIS_MODULE,},
176}, { 293}, {
177 .phy_id = 0x00137a10, 294 .phy_id = 0x00137a10,
295 .name = "LXT973-A2",
296 .phy_id_mask = 0xffffffff,
297 .features = PHY_BASIC_FEATURES,
298 .flags = 0,
299 .probe = lxt973_probe,
300 .config_aneg = lxt973_config_aneg,
301 .read_status = lxt973a2_read_status,
302 .driver = { .owner = THIS_MODULE,},
303}, {
304 .phy_id = 0x00137a10,
178 .name = "LXT973", 305 .name = "LXT973",
179 .phy_id_mask = 0xfffffff0, 306 .phy_id_mask = 0xfffffff0,
180 .features = PHY_BASIC_FEATURES, 307 .features = PHY_BASIC_FEATURES,
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7189adf54bd1..899274f2f9b1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -28,17 +28,38 @@
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/mdio-gpio.h> 29#include <linux/mdio-gpio.h>
30 30
31#ifdef CONFIG_OF_GPIO
32#include <linux/of_gpio.h> 31#include <linux/of_gpio.h>
33#include <linux/of_mdio.h> 32#include <linux/of_mdio.h>
34#include <linux/of_platform.h>
35#endif
36 33
37struct mdio_gpio_info { 34struct mdio_gpio_info {
38 struct mdiobb_ctrl ctrl; 35 struct mdiobb_ctrl ctrl;
39 int mdc, mdio; 36 int mdc, mdio;
40}; 37};
41 38
39static void *mdio_gpio_of_get_data(struct platform_device *pdev)
40{
41 struct device_node *np = pdev->dev.of_node;
42 struct mdio_gpio_platform_data *pdata;
43 int ret;
44
45 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
46 if (!pdata)
47 return NULL;
48
49 ret = of_get_gpio(np, 0);
50 if (ret < 0)
51 return NULL;
52
53 pdata->mdc = ret;
54
55 ret = of_get_gpio(np, 1);
56 if (ret < 0)
57 return NULL;
58 pdata->mdio = ret;
59
60 return pdata;
61}
62
42static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) 63static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
43{ 64{
44 struct mdio_gpio_info *bitbang = 65 struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
162 183
163static int __devinit mdio_gpio_probe(struct platform_device *pdev) 184static int __devinit mdio_gpio_probe(struct platform_device *pdev)
164{ 185{
165 struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; 186 struct mdio_gpio_platform_data *pdata;
166 struct mii_bus *new_bus; 187 struct mii_bus *new_bus;
167 int ret; 188 int ret;
168 189
190 if (pdev->dev.of_node)
191 pdata = mdio_gpio_of_get_data(pdev);
192 else
193 pdata = pdev->dev.platform_data;
194
169 if (!pdata) 195 if (!pdata)
170 return -ENODEV; 196 return -ENODEV;
171 197
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
173 if (!new_bus) 199 if (!new_bus)
174 return -ENODEV; 200 return -ENODEV;
175 201
176 ret = mdiobus_register(new_bus); 202 if (pdev->dev.of_node)
203 ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
204 else
205 ret = mdiobus_register(new_bus);
206
177 if (ret) 207 if (ret)
178 mdio_gpio_bus_deinit(&pdev->dev); 208 mdio_gpio_bus_deinit(&pdev->dev);
179 209
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
187 return 0; 217 return 0;
188} 218}
189 219
190#ifdef CONFIG_OF_GPIO 220static struct of_device_id mdio_gpio_of_match[] = {
191 221 { .compatible = "virtual,mdio-gpio", },
192static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev) 222 { /* sentinel */ }
193{
194 struct mdio_gpio_platform_data *pdata;
195 struct mii_bus *new_bus;
196 int ret;
197
198 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
199 if (!pdata)
200 return -ENOMEM;
201
202 ret = of_get_gpio(ofdev->dev.of_node, 0);
203 if (ret < 0)
204 goto out_free;
205 pdata->mdc = ret;
206
207 ret = of_get_gpio(ofdev->dev.of_node, 1);
208 if (ret < 0)
209 goto out_free;
210 pdata->mdio = ret;
211
212 new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
213 if (!new_bus)
214 goto out_free;
215
216 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
217 if (ret)
218 mdio_gpio_bus_deinit(&ofdev->dev);
219
220 return ret;
221
222out_free:
223 kfree(pdata);
224 return -ENODEV;
225}
226
227static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
228{
229 mdio_gpio_bus_destroy(&ofdev->dev);
230 kfree(ofdev->dev.platform_data);
231
232 return 0;
233}
234
235static struct of_device_id mdio_ofgpio_match[] = {
236 {
237 .compatible = "virtual,mdio-gpio",
238 },
239 {},
240};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
242
243static struct platform_driver mdio_ofgpio_driver = {
244 .driver = {
245 .name = "mdio-ofgpio",
246 .owner = THIS_MODULE,
247 .of_match_table = mdio_ofgpio_match,
248 },
249 .probe = mdio_ofgpio_probe,
250 .remove = __devexit_p(mdio_ofgpio_remove),
251}; 223};
252 224
253static inline int __init mdio_ofgpio_init(void)
254{
255 return platform_driver_register(&mdio_ofgpio_driver);
256}
257
258static inline void mdio_ofgpio_exit(void)
259{
260 platform_driver_unregister(&mdio_ofgpio_driver);
261}
262#else
263static inline int __init mdio_ofgpio_init(void) { return 0; }
264static inline void mdio_ofgpio_exit(void) { }
265#endif /* CONFIG_OF_GPIO */
266
267static struct platform_driver mdio_gpio_driver = { 225static struct platform_driver mdio_gpio_driver = {
268 .probe = mdio_gpio_probe, 226 .probe = mdio_gpio_probe,
269 .remove = __devexit_p(mdio_gpio_remove), 227 .remove = __devexit_p(mdio_gpio_remove),
270 .driver = { 228 .driver = {
271 .name = "mdio-gpio", 229 .name = "mdio-gpio",
272 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
231 .of_match_table = mdio_gpio_of_match,
273 }, 232 },
274}; 233};
275 234
276static int __init mdio_gpio_init(void) 235static int __init mdio_gpio_init(void)
277{ 236{
278 int ret; 237 return platform_driver_register(&mdio_gpio_driver);
279
280 ret = mdio_ofgpio_init();
281 if (ret)
282 return ret;
283
284 ret = platform_driver_register(&mdio_gpio_driver);
285 if (ret)
286 mdio_ofgpio_exit();
287
288 return ret;
289} 238}
290module_init(mdio_gpio_init); 239module_init(mdio_gpio_init);
291 240
292static void __exit mdio_gpio_exit(void) 241static void __exit mdio_gpio_exit(void)
293{ 242{
294 platform_driver_unregister(&mdio_gpio_driver); 243 platform_driver_unregister(&mdio_gpio_driver);
295 mdio_ofgpio_exit();
296} 244}
297module_exit(mdio_gpio_exit); 245module_exit(mdio_gpio_exit);
298 246
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644
index 000000000000..9061ba622ac4
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -0,0 +1,171 @@
1/*
2 * Simple memory-mapped device MDIO MUX driver
3 *
4 * Author: Timur Tabi <timur@freescale.com>
5 *
6 * Copyright 2012 Freescale Semiconductor, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/platform_device.h>
14#include <linux/device.h>
15#include <linux/of_address.h>
16#include <linux/of_mdio.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/phy.h>
20#include <linux/mdio-mux.h>
21
22struct mdio_mux_mmioreg_state {
23 void *mux_handle;
24 phys_addr_t phys;
25 uint8_t mask;
26};
27
28/*
29 * MDIO multiplexing switch function
30 *
31 * This function is called by the mdio-mux layer when it thinks the mdio bus
32 * multiplexer needs to switch.
33 *
34 * 'current_child' is the current value of the mux register (masked via
35 * s->mask).
36 *
37 * 'desired_child' is the value of the 'reg' property of the target child MDIO
38 * node.
39 *
40 * The first time this function is called, current_child == -1.
41 *
42 * If current_child == desired_child, then the mux is already set to the
43 * correct bus.
44 */
45static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
46 void *data)
47{
48 struct mdio_mux_mmioreg_state *s = data;
49
50 if (current_child ^ desired_child) {
51 void *p = ioremap(s->phys, 1);
52 uint8_t x, y;
53
54 if (!p)
55 return -ENOMEM;
56
57 x = ioread8(p);
58 y = (x & ~s->mask) | desired_child;
59 if (x != y) {
60 iowrite8((x & ~s->mask) | desired_child, p);
61 pr_debug("%s: %02x -> %02x\n", __func__, x, y);
62 }
63
64 iounmap(p);
65 }
66
67 return 0;
68}
69
70static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
71{
72 struct device_node *np2, *np = pdev->dev.of_node;
73 struct mdio_mux_mmioreg_state *s;
74 struct resource res;
75 const __be32 *iprop;
76 int len, ret;
77
78 dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
79
80 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
81 if (!s)
82 return -ENOMEM;
83
84 ret = of_address_to_resource(np, 0, &res);
85 if (ret) {
86 dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
87 np->full_name);
88 return ret;
89 }
90 s->phys = res.start;
91
92 if (resource_size(&res) != sizeof(uint8_t)) {
93 dev_err(&pdev->dev, "only 8-bit registers are supported\n");
94 return -EINVAL;
95 }
96
97 iprop = of_get_property(np, "mux-mask", &len);
98 if (!iprop || len != sizeof(uint32_t)) {
99 dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
100 return -ENODEV;
101 }
102 if (be32_to_cpup(iprop) > 255) {
103 dev_err(&pdev->dev, "only 8-bit registers are supported\n");
104 return -EINVAL;
105 }
106 s->mask = be32_to_cpup(iprop);
107
108 /*
109 * Verify that the 'reg' property of each child MDIO bus does not
110 * set any bits outside of the 'mask'.
111 */
112 for_each_available_child_of_node(np, np2) {
113 iprop = of_get_property(np2, "reg", &len);
114 if (!iprop || len != sizeof(uint32_t)) {
115 dev_err(&pdev->dev, "mdio-mux child node %s is "
116 "missing a 'reg' property\n", np2->full_name);
117 return -ENODEV;
118 }
119 if (be32_to_cpup(iprop) & ~s->mask) {
120 dev_err(&pdev->dev, "mdio-mux child node %s has "
121 "a 'reg' value with unmasked bits\n",
122 np2->full_name);
123 return -ENODEV;
124 }
125 }
126
127 ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
128 &s->mux_handle, s);
129 if (ret) {
130 dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
131 np->full_name);
132 return ret;
133 }
134
135 pdev->dev.platform_data = s;
136
137 return 0;
138}
139
140static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
141{
142 struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
143
144 mdio_mux_uninit(s->mux_handle);
145
146 return 0;
147}
148
149static struct of_device_id mdio_mux_mmioreg_match[] = {
150 {
151 .compatible = "mdio-mux-mmioreg",
152 },
153 {},
154};
155MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
156
157static struct platform_driver mdio_mux_mmioreg_driver = {
158 .driver = {
159 .name = "mdio-mux-mmioreg",
160 .owner = THIS_MODULE,
161 .of_match_table = mdio_mux_mmioreg_match,
162 },
163 .probe = mdio_mux_mmioreg_probe,
164 .remove = __devexit_p(mdio_mux_mmioreg_remove),
165};
166
167module_platform_driver(mdio_mux_mmioreg_driver);
168
169MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
170MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
171MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7ca2ff97c368..ef9ea9248223 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1035 bus->write(bus, addr, MII_MMD_DATA, data); 1035 bus->write(bus, addr, MII_MMD_DATA, data);
1036} 1036}
1037 1037
1038static u32 phy_eee_to_adv(u16 eee_adv)
1039{
1040 u32 adv = 0;
1041
1042 if (eee_adv & MDIO_EEE_100TX)
1043 adv |= ADVERTISED_100baseT_Full;
1044 if (eee_adv & MDIO_EEE_1000T)
1045 adv |= ADVERTISED_1000baseT_Full;
1046 if (eee_adv & MDIO_EEE_10GT)
1047 adv |= ADVERTISED_10000baseT_Full;
1048 if (eee_adv & MDIO_EEE_1000KX)
1049 adv |= ADVERTISED_1000baseKX_Full;
1050 if (eee_adv & MDIO_EEE_10GKX4)
1051 adv |= ADVERTISED_10000baseKX4_Full;
1052 if (eee_adv & MDIO_EEE_10GKR)
1053 adv |= ADVERTISED_10000baseKR_Full;
1054
1055 return adv;
1056}
1057
1058static u32 phy_eee_to_supported(u16 eee_caported)
1059{
1060 u32 supported = 0;
1061
1062 if (eee_caported & MDIO_EEE_100TX)
1063 supported |= SUPPORTED_100baseT_Full;
1064 if (eee_caported & MDIO_EEE_1000T)
1065 supported |= SUPPORTED_1000baseT_Full;
1066 if (eee_caported & MDIO_EEE_10GT)
1067 supported |= SUPPORTED_10000baseT_Full;
1068 if (eee_caported & MDIO_EEE_1000KX)
1069 supported |= SUPPORTED_1000baseKX_Full;
1070 if (eee_caported & MDIO_EEE_10GKX4)
1071 supported |= SUPPORTED_10000baseKX4_Full;
1072 if (eee_caported & MDIO_EEE_10GKR)
1073 supported |= SUPPORTED_10000baseKR_Full;
1074
1075 return supported;
1076}
1077
1078static u16 phy_adv_to_eee(u32 adv)
1079{
1080 u16 reg = 0;
1081
1082 if (adv & ADVERTISED_100baseT_Full)
1083 reg |= MDIO_EEE_100TX;
1084 if (adv & ADVERTISED_1000baseT_Full)
1085 reg |= MDIO_EEE_1000T;
1086 if (adv & ADVERTISED_10000baseT_Full)
1087 reg |= MDIO_EEE_10GT;
1088 if (adv & ADVERTISED_1000baseKX_Full)
1089 reg |= MDIO_EEE_1000KX;
1090 if (adv & ADVERTISED_10000baseKX4_Full)
1091 reg |= MDIO_EEE_10GKX4;
1092 if (adv & ADVERTISED_10000baseKR_Full)
1093 reg |= MDIO_EEE_10GKR;
1094
1095 return reg;
1096}
1097
1098/** 1038/**
1099 * phy_init_eee - init and check the EEE feature 1039 * phy_init_eee - init and check the EEE feature
1100 * @phydev: target phy_device struct 1040 * @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1132 if (eee_cap < 0) 1072 if (eee_cap < 0)
1133 return eee_cap; 1073 return eee_cap;
1134 1074
1135 cap = phy_eee_to_supported(eee_cap); 1075 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1136 if (!cap) 1076 if (!cap)
1137 goto eee_exit; 1077 goto eee_exit;
1138 1078
@@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1149 if (eee_adv < 0) 1089 if (eee_adv < 0)
1150 return eee_adv; 1090 return eee_adv;
1151 1091
1152 adv = phy_eee_to_adv(eee_adv); 1092 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1153 lp = phy_eee_to_adv(eee_lp); 1093 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1154 idx = phy_find_setting(phydev->speed, phydev->duplex); 1094 idx = phy_find_setting(phydev->speed, phydev->duplex);
1155 if ((lp & adv & settings[idx].setting)) 1095 if ((lp & adv & settings[idx].setting))
1156 goto eee_exit; 1096 goto eee_exit;
@@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1210 MDIO_MMD_PCS, phydev->addr); 1150 MDIO_MMD_PCS, phydev->addr);
1211 if (val < 0) 1151 if (val < 0)
1212 return val; 1152 return val;
1213 data->supported = phy_eee_to_supported(val); 1153 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1214 1154
1215 /* Get advertisement EEE */ 1155 /* Get advertisement EEE */
1216 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, 1156 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1217 MDIO_MMD_AN, phydev->addr); 1157 MDIO_MMD_AN, phydev->addr);
1218 if (val < 0) 1158 if (val < 0)
1219 return val; 1159 return val;
1220 data->advertised = phy_eee_to_adv(val); 1160 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1221 1161
1222 /* Get LP advertisement EEE */ 1162 /* Get LP advertisement EEE */
1223 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE, 1163 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1224 MDIO_MMD_AN, phydev->addr); 1164 MDIO_MMD_AN, phydev->addr);
1225 if (val < 0) 1165 if (val < 0)
1226 return val; 1166 return val;
1227 data->lp_advertised = phy_eee_to_adv(val); 1167 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1228 1168
1229 return 0; 1169 return 0;
1230} 1170}
@@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1241{ 1181{
1242 int val; 1182 int val;
1243 1183
1244 val = phy_adv_to_eee(data->advertised); 1184 val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1245 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, 1185 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1246 phydev->addr, val); 1186 phydev->addr, val);
1247 1187
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5c0557222f20..eb3f5cefeba3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -94,6 +94,18 @@ struct ppp_file {
94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
95 95
96/* 96/*
97 * Data structure to hold primary network stats for which
98 * we want to use 64 bit storage. Other network stats
99 * are stored in dev->stats of the ppp strucute.
100 */
101struct ppp_link_stats {
102 u64 rx_packets;
103 u64 tx_packets;
104 u64 rx_bytes;
105 u64 tx_bytes;
106};
107
108/*
97 * Data structure describing one ppp unit. 109 * Data structure describing one ppp unit.
98 * A ppp unit corresponds to a ppp network interface device 110 * A ppp unit corresponds to a ppp network interface device
99 * and represents a multilink bundle. 111 * and represents a multilink bundle.
@@ -136,6 +148,7 @@ struct ppp {
136 unsigned pass_len, active_len; 148 unsigned pass_len, active_len;
137#endif /* CONFIG_PPP_FILTER */ 149#endif /* CONFIG_PPP_FILTER */
138 struct net *ppp_net; /* the net we belong to */ 150 struct net *ppp_net; /* the net we belong to */
151 struct ppp_link_stats stats64; /* 64 bit network stats */
139}; 152};
140 153
141/* 154/*
@@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1021 return err; 1034 return err;
1022} 1035}
1023 1036
1037struct rtnl_link_stats64*
1038ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1039{
1040 struct ppp *ppp = netdev_priv(dev);
1041
1042 ppp_recv_lock(ppp);
1043 stats64->rx_packets = ppp->stats64.rx_packets;
1044 stats64->rx_bytes = ppp->stats64.rx_bytes;
1045 ppp_recv_unlock(ppp);
1046
1047 ppp_xmit_lock(ppp);
1048 stats64->tx_packets = ppp->stats64.tx_packets;
1049 stats64->tx_bytes = ppp->stats64.tx_bytes;
1050 ppp_xmit_unlock(ppp);
1051
1052 stats64->rx_errors = dev->stats.rx_errors;
1053 stats64->tx_errors = dev->stats.tx_errors;
1054 stats64->rx_dropped = dev->stats.rx_dropped;
1055 stats64->tx_dropped = dev->stats.tx_dropped;
1056 stats64->rx_length_errors = dev->stats.rx_length_errors;
1057
1058 return stats64;
1059}
1060
1024static const struct net_device_ops ppp_netdev_ops = { 1061static const struct net_device_ops ppp_netdev_ops = {
1025 .ndo_start_xmit = ppp_start_xmit, 1062 .ndo_start_xmit = ppp_start_xmit,
1026 .ndo_do_ioctl = ppp_net_ioctl, 1063 .ndo_do_ioctl = ppp_net_ioctl,
1064 .ndo_get_stats64 = ppp_get_stats64,
1027}; 1065};
1028 1066
1029static void ppp_setup(struct net_device *dev) 1067static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1157#endif /* CONFIG_PPP_FILTER */ 1195#endif /* CONFIG_PPP_FILTER */
1158 } 1196 }
1159 1197
1160 ++ppp->dev->stats.tx_packets; 1198 ++ppp->stats64.tx_packets;
1161 ppp->dev->stats.tx_bytes += skb->len - 2; 1199 ppp->stats64.tx_bytes += skb->len - 2;
1162 1200
1163 switch (proto) { 1201 switch (proto) {
1164 case PPP_IP: 1202 case PPP_IP:
@@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1745 break; 1783 break;
1746 } 1784 }
1747 1785
1748 ++ppp->dev->stats.rx_packets; 1786 ++ppp->stats64.rx_packets;
1749 ppp->dev->stats.rx_bytes += skb->len - 2; 1787 ppp->stats64.rx_bytes += skb->len - 2;
1750 1788
1751 npi = proto_to_npindex(proto); 1789 npi = proto_to_npindex(proto);
1752 if (npi < 0) { 1790 if (npi < 0) {
@@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2570 struct slcompress *vj = ppp->vj; 2608 struct slcompress *vj = ppp->vj;
2571 2609
2572 memset(st, 0, sizeof(*st)); 2610 memset(st, 0, sizeof(*st));
2573 st->p.ppp_ipackets = ppp->dev->stats.rx_packets; 2611 st->p.ppp_ipackets = ppp->stats64.rx_packets;
2574 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 2612 st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
2575 st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; 2613 st->p.ppp_ibytes = ppp->stats64.rx_bytes;
2576 st->p.ppp_opackets = ppp->dev->stats.tx_packets; 2614 st->p.ppp_opackets = ppp->stats64.tx_packets;
2577 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 2615 st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
2578 st->p.ppp_obytes = ppp->dev->stats.tx_bytes; 2616 st->p.ppp_obytes = ppp->stats64.tx_bytes;
2579 if (!vj) 2617 if (!vj)
2580 return; 2618 return;
2581 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2619 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 6a7260b03a1e..6b08bd419fba 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST
21 ---help--- 21 ---help---
22 Basic mode where packets are transmitted always by all suitable ports. 22 Basic mode where packets are transmitted always by all suitable ports.
23 23
24 All added ports are setup to have team's mac address. 24 All added ports are setup to have team's device address.
25 25
26 To compile this team mode as a module, choose M here: the module 26 To compile this team mode as a module, choose M here: the module
27 will be called team_mode_broadcast. 27 will be called team_mode_broadcast.
@@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
33 Basic mode where port used for transmitting packets is selected in 33 Basic mode where port used for transmitting packets is selected in
34 round-robin fashion using packet counter. 34 round-robin fashion using packet counter.
35 35
36 All added ports are setup to have team's mac address. 36 All added ports are setup to have team's device address.
37 37
38 To compile this team mode as a module, choose M here: the module 38 To compile this team mode as a module, choose M here: the module
39 will be called team_mode_roundrobin. 39 will be called team_mode_roundrobin.
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f8cd61f449a4..5c7547c4f802 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
54} 54}
55 55
56/* 56/*
57 * Since the ability to change mac address for open port device is tested in 57 * Since the ability to change device address for open port device is tested in
58 * team_port_add, this function can be called without control of return value 58 * team_port_add, this function can be called without control of return value
59 */ 59 */
60static int __set_port_mac(struct net_device *port_dev, 60static int __set_port_dev_addr(struct net_device *port_dev,
61 const unsigned char *dev_addr) 61 const unsigned char *dev_addr)
62{ 62{
63 struct sockaddr addr; 63 struct sockaddr addr;
64 64
65 memcpy(addr.sa_data, dev_addr, ETH_ALEN); 65 memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
66 addr.sa_family = ARPHRD_ETHER; 66 addr.sa_family = port_dev->type;
67 return dev_set_mac_address(port_dev, &addr); 67 return dev_set_mac_address(port_dev, &addr);
68} 68}
69 69
70static int team_port_set_orig_mac(struct team_port *port) 70static int team_port_set_orig_dev_addr(struct team_port *port)
71{ 71{
72 return __set_port_mac(port->dev, port->orig.dev_addr); 72 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
73} 73}
74 74
75int team_port_set_team_mac(struct team_port *port) 75int team_port_set_team_dev_addr(struct team_port *port)
76{ 76{
77 return __set_port_mac(port->dev, port->team->dev->dev_addr); 77 return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
78} 78}
79EXPORT_SYMBOL(team_port_set_team_mac); 79EXPORT_SYMBOL(team_port_set_team_dev_addr);
80 80
81static void team_refresh_port_linkup(struct team_port *port) 81static void team_refresh_port_linkup(struct team_port *port)
82{ 82{
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
658} 658}
659 659
660 660
661/*************************************
662 * Multiqueue Tx port select override
663 *************************************/
664
665static int team_queue_override_init(struct team *team)
666{
667 struct list_head *listarr;
668 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
669 unsigned int i;
670
671 if (!queue_cnt)
672 return 0;
673 listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
674 if (!listarr)
675 return -ENOMEM;
676 team->qom_lists = listarr;
677 for (i = 0; i < queue_cnt; i++)
678 INIT_LIST_HEAD(listarr++);
679 return 0;
680}
681
682static void team_queue_override_fini(struct team *team)
683{
684 kfree(team->qom_lists);
685}
686
687static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
688{
689 return &team->qom_lists[queue_id - 1];
690}
691
692/*
693 * note: already called with rcu_read_lock
694 */
695static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
696{
697 struct list_head *qom_list;
698 struct team_port *port;
699
700 if (!team->queue_override_enabled || !skb->queue_mapping)
701 return false;
702 qom_list = __team_get_qom_list(team, skb->queue_mapping);
703 list_for_each_entry_rcu(port, qom_list, qom_list) {
704 if (!team_dev_queue_xmit(team, port, skb))
705 return true;
706 }
707 return false;
708}
709
710static void __team_queue_override_port_del(struct team *team,
711 struct team_port *port)
712{
713 list_del_rcu(&port->qom_list);
714 synchronize_rcu();
715 INIT_LIST_HEAD(&port->qom_list);
716}
717
718static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
719 struct team_port *cur)
720{
721 if (port->priority < cur->priority)
722 return true;
723 if (port->priority > cur->priority)
724 return false;
725 if (port->index < cur->index)
726 return true;
727 return false;
728}
729
730static void __team_queue_override_port_add(struct team *team,
731 struct team_port *port)
732{
733 struct team_port *cur;
734 struct list_head *qom_list;
735 struct list_head *node;
736
737 if (!port->queue_id || !team_port_enabled(port))
738 return;
739
740 qom_list = __team_get_qom_list(team, port->queue_id);
741 node = qom_list;
742 list_for_each_entry(cur, qom_list, qom_list) {
743 if (team_queue_override_port_has_gt_prio_than(port, cur))
744 break;
745 node = &cur->qom_list;
746 }
747 list_add_tail_rcu(&port->qom_list, node);
748}
749
750static void __team_queue_override_enabled_check(struct team *team)
751{
752 struct team_port *port;
753 bool enabled = false;
754
755 list_for_each_entry(port, &team->port_list, list) {
756 if (!list_empty(&port->qom_list)) {
757 enabled = true;
758 break;
759 }
760 }
761 if (enabled == team->queue_override_enabled)
762 return;
763 netdev_dbg(team->dev, "%s queue override\n",
764 enabled ? "Enabling" : "Disabling");
765 team->queue_override_enabled = enabled;
766}
767
768static void team_queue_override_port_refresh(struct team *team,
769 struct team_port *port)
770{
771 __team_queue_override_port_del(team, port);
772 __team_queue_override_port_add(team, port);
773 __team_queue_override_enabled_check(team);
774}
775
776
661/**************** 777/****************
662 * Port handling 778 * Port handling
663 ****************/ 779 ****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
688 hlist_add_head_rcu(&port->hlist, 804 hlist_add_head_rcu(&port->hlist,
689 team_port_index_hash(team, port->index)); 805 team_port_index_hash(team, port->index));
690 team_adjust_ops(team); 806 team_adjust_ops(team);
807 team_queue_override_port_refresh(team, port);
691 if (team->ops.port_enabled) 808 if (team->ops.port_enabled)
692 team->ops.port_enabled(team, port); 809 team->ops.port_enabled(team, port);
693} 810}
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
716 hlist_del_rcu(&port->hlist); 833 hlist_del_rcu(&port->hlist);
717 __reconstruct_port_hlist(team, port->index); 834 __reconstruct_port_hlist(team, port->index);
718 port->index = -1; 835 port->index = -1;
836 team_queue_override_port_refresh(team, port);
719 __team_adjust_ops(team, team->en_port_count - 1); 837 __team_adjust_ops(team, team->en_port_count - 1);
720 /* 838 /*
721 * Wait until readers see adjusted ops. This ensures that 839 * Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
849#endif 967#endif
850 968
851static void __team_port_change_port_added(struct team_port *port, bool linkup); 969static void __team_port_change_port_added(struct team_port *port, bool linkup);
970static int team_dev_type_check_change(struct net_device *dev,
971 struct net_device *port_dev);
852 972
853static int team_port_add(struct team *team, struct net_device *port_dev) 973static int team_port_add(struct team *team, struct net_device *port_dev)
854{ 974{
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
857 char *portname = port_dev->name; 977 char *portname = port_dev->name;
858 int err; 978 int err;
859 979
860 if (port_dev->flags & IFF_LOOPBACK || 980 if (port_dev->flags & IFF_LOOPBACK) {
861 port_dev->type != ARPHRD_ETHER) { 981 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
862 netdev_err(dev, "Device %s is of an unsupported type\n",
863 portname); 982 portname);
864 return -EINVAL; 983 return -EINVAL;
865 } 984 }
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
870 return -EBUSY; 989 return -EBUSY;
871 } 990 }
872 991
992 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
993 vlan_uses_dev(dev)) {
994 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
995 portname);
996 return -EPERM;
997 }
998
999 err = team_dev_type_check_change(dev, port_dev);
1000 if (err)
1001 return err;
1002
873 if (port_dev->flags & IFF_UP) { 1003 if (port_dev->flags & IFF_UP) {
874 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", 1004 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
875 portname); 1005 portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
883 1013
884 port->dev = port_dev; 1014 port->dev = port_dev;
885 port->team = team; 1015 port->team = team;
1016 INIT_LIST_HEAD(&port->qom_list);
886 1017
887 port->orig.mtu = port_dev->mtu; 1018 port->orig.mtu = port_dev->mtu;
888 err = dev_set_mtu(port_dev, dev->mtu); 1019 err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
891 goto err_set_mtu; 1022 goto err_set_mtu;
892 } 1023 }
893 1024
894 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); 1025 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
895 1026
896 err = team_port_enter(team, port); 1027 err = team_port_enter(team, port);
897 if (err) { 1028 if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
972 1103
973err_dev_open: 1104err_dev_open:
974 team_port_leave(team, port); 1105 team_port_leave(team, port);
975 team_port_set_orig_mac(port); 1106 team_port_set_orig_dev_addr(port);
976 1107
977err_port_enter: 1108err_port_enter:
978 dev_set_mtu(port_dev, port->orig.mtu); 1109 dev_set_mtu(port_dev, port->orig.mtu);
@@ -1010,7 +1141,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1010 vlan_vids_del_by_dev(port_dev, dev); 1141 vlan_vids_del_by_dev(port_dev, dev);
1011 dev_close(port_dev); 1142 dev_close(port_dev);
1012 team_port_leave(team, port); 1143 team_port_leave(team, port);
1013 team_port_set_orig_mac(port); 1144 team_port_set_orig_dev_addr(port);
1014 dev_set_mtu(port_dev, port->orig.mtu); 1145 dev_set_mtu(port_dev, port->orig.mtu);
1015 synchronize_rcu(); 1146 synchronize_rcu();
1016 kfree(port); 1147 kfree(port);
@@ -1095,6 +1226,49 @@ static int team_user_linkup_en_option_set(struct team *team,
1095 return 0; 1226 return 0;
1096} 1227}
1097 1228
1229static int team_priority_option_get(struct team *team,
1230 struct team_gsetter_ctx *ctx)
1231{
1232 struct team_port *port = ctx->info->port;
1233
1234 ctx->data.s32_val = port->priority;
1235 return 0;
1236}
1237
1238static int team_priority_option_set(struct team *team,
1239 struct team_gsetter_ctx *ctx)
1240{
1241 struct team_port *port = ctx->info->port;
1242
1243 port->priority = ctx->data.s32_val;
1244 team_queue_override_port_refresh(team, port);
1245 return 0;
1246}
1247
1248static int team_queue_id_option_get(struct team *team,
1249 struct team_gsetter_ctx *ctx)
1250{
1251 struct team_port *port = ctx->info->port;
1252
1253 ctx->data.u32_val = port->queue_id;
1254 return 0;
1255}
1256
1257static int team_queue_id_option_set(struct team *team,
1258 struct team_gsetter_ctx *ctx)
1259{
1260 struct team_port *port = ctx->info->port;
1261
1262 if (port->queue_id == ctx->data.u32_val)
1263 return 0;
1264 if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
1265 return -EINVAL;
1266 port->queue_id = ctx->data.u32_val;
1267 team_queue_override_port_refresh(team, port);
1268 return 0;
1269}
1270
1271
1098static const struct team_option team_options[] = { 1272static const struct team_option team_options[] = {
1099 { 1273 {
1100 .name = "mode", 1274 .name = "mode",
@@ -1123,6 +1297,20 @@ static const struct team_option team_options[] = {
1123 .getter = team_user_linkup_en_option_get, 1297 .getter = team_user_linkup_en_option_get,
1124 .setter = team_user_linkup_en_option_set, 1298 .setter = team_user_linkup_en_option_set,
1125 }, 1299 },
1300 {
1301 .name = "priority",
1302 .type = TEAM_OPTION_TYPE_S32,
1303 .per_port = true,
1304 .getter = team_priority_option_get,
1305 .setter = team_priority_option_set,
1306 },
1307 {
1308 .name = "queue_id",
1309 .type = TEAM_OPTION_TYPE_U32,
1310 .per_port = true,
1311 .getter = team_queue_id_option_get,
1312 .setter = team_queue_id_option_set,
1313 },
1126}; 1314};
1127 1315
1128static struct lock_class_key team_netdev_xmit_lock_key; 1316static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1158,6 +1346,9 @@ static int team_init(struct net_device *dev)
1158 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1346 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1159 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1347 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1160 INIT_LIST_HEAD(&team->port_list); 1348 INIT_LIST_HEAD(&team->port_list);
1349 err = team_queue_override_init(team);
1350 if (err)
1351 goto err_team_queue_override_init;
1161 1352
1162 team_adjust_ops(team); 1353 team_adjust_ops(team);
1163 1354
@@ -1173,6 +1364,8 @@ static int team_init(struct net_device *dev)
1173 return 0; 1364 return 0;
1174 1365
1175err_options_register: 1366err_options_register:
1367 team_queue_override_fini(team);
1368err_team_queue_override_init:
1176 free_percpu(team->pcpu_stats); 1369 free_percpu(team->pcpu_stats);
1177 1370
1178 return err; 1371 return err;
@@ -1190,6 +1383,7 @@ static void team_uninit(struct net_device *dev)
1190 1383
1191 __team_change_mode(team, NULL); /* cleanup */ 1384 __team_change_mode(team, NULL); /* cleanup */
1192 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1385 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1386 team_queue_override_fini(team);
1193 mutex_unlock(&team->lock); 1387 mutex_unlock(&team->lock);
1194} 1388}
1195 1389
@@ -1219,10 +1413,12 @@ static int team_close(struct net_device *dev)
1219static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) 1413static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1220{ 1414{
1221 struct team *team = netdev_priv(dev); 1415 struct team *team = netdev_priv(dev);
1222 bool tx_success = false; 1416 bool tx_success;
1223 unsigned int len = skb->len; 1417 unsigned int len = skb->len;
1224 1418
1225 tx_success = team->ops.transmit(team, skb); 1419 tx_success = team_queue_override_transmit(team, skb);
1420 if (!tx_success)
1421 tx_success = team->ops.transmit(team, skb);
1226 if (tx_success) { 1422 if (tx_success) {
1227 struct team_pcpu_stats *pcpu_stats; 1423 struct team_pcpu_stats *pcpu_stats;
1228 1424
@@ -1296,17 +1492,18 @@ static void team_set_rx_mode(struct net_device *dev)
1296 1492
1297static int team_set_mac_address(struct net_device *dev, void *p) 1493static int team_set_mac_address(struct net_device *dev, void *p)
1298{ 1494{
1495 struct sockaddr *addr = p;
1299 struct team *team = netdev_priv(dev); 1496 struct team *team = netdev_priv(dev);
1300 struct team_port *port; 1497 struct team_port *port;
1301 int err;
1302 1498
1303 err = eth_mac_addr(dev, p); 1499 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1304 if (err) 1500 return -EADDRNOTAVAIL;
1305 return err; 1501 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1502 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1306 rcu_read_lock(); 1503 rcu_read_lock();
1307 list_for_each_entry_rcu(port, &team->port_list, list) 1504 list_for_each_entry_rcu(port, &team->port_list, list)
1308 if (team->ops.port_change_mac) 1505 if (team->ops.port_change_dev_addr)
1309 team->ops.port_change_mac(team, port); 1506 team->ops.port_change_dev_addr(team, port);
1310 rcu_read_unlock(); 1507 rcu_read_unlock();
1311 return 0; 1508 return 0;
1312} 1509}
@@ -1537,6 +1734,45 @@ static const struct net_device_ops team_netdev_ops = {
1537 * rt netlink interface 1734 * rt netlink interface
1538 ***********************/ 1735 ***********************/
1539 1736
1737static void team_setup_by_port(struct net_device *dev,
1738 struct net_device *port_dev)
1739{
1740 dev->header_ops = port_dev->header_ops;
1741 dev->type = port_dev->type;
1742 dev->hard_header_len = port_dev->hard_header_len;
1743 dev->addr_len = port_dev->addr_len;
1744 dev->mtu = port_dev->mtu;
1745 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
1746 memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
1747 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1748}
1749
1750static int team_dev_type_check_change(struct net_device *dev,
1751 struct net_device *port_dev)
1752{
1753 struct team *team = netdev_priv(dev);
1754 char *portname = port_dev->name;
1755 int err;
1756
1757 if (dev->type == port_dev->type)
1758 return 0;
1759 if (!list_empty(&team->port_list)) {
1760 netdev_err(dev, "Device %s is of different type\n", portname);
1761 return -EBUSY;
1762 }
1763 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
1764 err = notifier_to_errno(err);
1765 if (err) {
1766 netdev_err(dev, "Refused to change device type\n");
1767 return err;
1768 }
1769 dev_uc_flush(dev);
1770 dev_mc_flush(dev);
1771 team_setup_by_port(dev, port_dev);
1772 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1773 return 0;
1774}
1775
1540static void team_setup(struct net_device *dev) 1776static void team_setup(struct net_device *dev)
1541{ 1777{
1542 ether_setup(dev); 1778 ether_setup(dev);
@@ -1651,7 +1887,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1651 if (!msg) 1887 if (!msg)
1652 return -ENOMEM; 1888 return -ENOMEM;
1653 1889
1654 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 1890 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1655 &team_nl_family, 0, TEAM_CMD_NOOP); 1891 &team_nl_family, 0, TEAM_CMD_NOOP);
1656 if (!hdr) { 1892 if (!hdr) {
1657 err = -EMSGSIZE; 1893 err = -EMSGSIZE;
@@ -1660,7 +1896,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1660 1896
1661 genlmsg_end(msg, hdr); 1897 genlmsg_end(msg, hdr);
1662 1898
1663 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); 1899 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
1664 1900
1665err_msg_put: 1901err_msg_put:
1666 nlmsg_free(msg); 1902 nlmsg_free(msg);
@@ -1717,7 +1953,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
1717 if (err < 0) 1953 if (err < 0)
1718 goto err_fill; 1954 goto err_fill;
1719 1955
1720 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); 1956 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
1721 return err; 1957 return err;
1722 1958
1723err_fill: 1959err_fill:
@@ -1726,11 +1962,11 @@ err_fill:
1726} 1962}
1727 1963
1728typedef int team_nl_send_func_t(struct sk_buff *skb, 1964typedef int team_nl_send_func_t(struct sk_buff *skb,
1729 struct team *team, u32 pid); 1965 struct team *team, u32 portid);
1730 1966
1731static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid) 1967static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
1732{ 1968{
1733 return genlmsg_unicast(dev_net(team->dev), skb, pid); 1969 return genlmsg_unicast(dev_net(team->dev), skb, portid);
1734} 1970}
1735 1971
1736static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, 1972static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
@@ -1790,6 +2026,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1790 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) 2026 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1791 goto nest_cancel; 2027 goto nest_cancel;
1792 break; 2028 break;
2029 case TEAM_OPTION_TYPE_S32:
2030 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2031 goto nest_cancel;
2032 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2033 goto nest_cancel;
2034 break;
1793 default: 2035 default:
1794 BUG(); 2036 BUG();
1795 } 2037 }
@@ -1809,13 +2051,13 @@ nest_cancel:
1809} 2051}
1810 2052
1811static int __send_and_alloc_skb(struct sk_buff **pskb, 2053static int __send_and_alloc_skb(struct sk_buff **pskb,
1812 struct team *team, u32 pid, 2054 struct team *team, u32 portid,
1813 team_nl_send_func_t *send_func) 2055 team_nl_send_func_t *send_func)
1814{ 2056{
1815 int err; 2057 int err;
1816 2058
1817 if (*pskb) { 2059 if (*pskb) {
1818 err = send_func(*pskb, team, pid); 2060 err = send_func(*pskb, team, portid);
1819 if (err) 2061 if (err)
1820 return err; 2062 return err;
1821 } 2063 }
@@ -1825,7 +2067,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb,
1825 return 0; 2067 return 0;
1826} 2068}
1827 2069
1828static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, 2070static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
1829 int flags, team_nl_send_func_t *send_func, 2071 int flags, team_nl_send_func_t *send_func,
1830 struct list_head *sel_opt_inst_list) 2072 struct list_head *sel_opt_inst_list)
1831{ 2073{
@@ -1842,11 +2084,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1842 struct team_option_inst, tmp_list); 2084 struct team_option_inst, tmp_list);
1843 2085
1844start_again: 2086start_again:
1845 err = __send_and_alloc_skb(&skb, team, pid, send_func); 2087 err = __send_and_alloc_skb(&skb, team, portid, send_func);
1846 if (err) 2088 if (err)
1847 return err; 2089 return err;
1848 2090
1849 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, 2091 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
1850 TEAM_CMD_OPTIONS_GET); 2092 TEAM_CMD_OPTIONS_GET);
1851 if (!hdr) 2093 if (!hdr)
1852 return -EMSGSIZE; 2094 return -EMSGSIZE;
@@ -1879,15 +2121,15 @@ start_again:
1879 goto start_again; 2121 goto start_again;
1880 2122
1881send_done: 2123send_done:
1882 nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); 2124 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1883 if (!nlh) { 2125 if (!nlh) {
1884 err = __send_and_alloc_skb(&skb, team, pid, send_func); 2126 err = __send_and_alloc_skb(&skb, team, portid, send_func);
1885 if (err) 2127 if (err)
1886 goto errout; 2128 goto errout;
1887 goto send_done; 2129 goto send_done;
1888 } 2130 }
1889 2131
1890 return send_func(skb, team, pid); 2132 return send_func(skb, team, portid);
1891 2133
1892nla_put_failure: 2134nla_put_failure:
1893 err = -EMSGSIZE; 2135 err = -EMSGSIZE;
@@ -1910,7 +2152,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1910 2152
1911 list_for_each_entry(opt_inst, &team->option_inst_list, list) 2153 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1912 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2154 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1913 err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq, 2155 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
1914 NLM_F_ACK, team_nl_send_unicast, 2156 NLM_F_ACK, team_nl_send_unicast,
1915 &sel_opt_inst_list); 2157 &sel_opt_inst_list);
1916 2158
@@ -1978,6 +2220,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1978 case NLA_FLAG: 2220 case NLA_FLAG:
1979 opt_type = TEAM_OPTION_TYPE_BOOL; 2221 opt_type = TEAM_OPTION_TYPE_BOOL;
1980 break; 2222 break;
2223 case NLA_S32:
2224 opt_type = TEAM_OPTION_TYPE_S32;
2225 break;
1981 default: 2226 default:
1982 goto team_put; 2227 goto team_put;
1983 } 2228 }
@@ -2034,6 +2279,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2034 case TEAM_OPTION_TYPE_BOOL: 2279 case TEAM_OPTION_TYPE_BOOL:
2035 ctx.data.bool_val = attr_data ? true : false; 2280 ctx.data.bool_val = attr_data ? true : false;
2036 break; 2281 break;
2282 case TEAM_OPTION_TYPE_S32:
2283 ctx.data.s32_val = nla_get_s32(attr_data);
2284 break;
2037 default: 2285 default:
2038 BUG(); 2286 BUG();
2039 } 2287 }
@@ -2058,7 +2306,7 @@ team_put:
2058} 2306}
2059 2307
2060static int team_nl_fill_port_list_get(struct sk_buff *skb, 2308static int team_nl_fill_port_list_get(struct sk_buff *skb,
2061 u32 pid, u32 seq, int flags, 2309 u32 portid, u32 seq, int flags,
2062 struct team *team, 2310 struct team *team,
2063 bool fillall) 2311 bool fillall)
2064{ 2312{
@@ -2066,7 +2314,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
2066 void *hdr; 2314 void *hdr;
2067 struct team_port *port; 2315 struct team_port *port;
2068 2316
2069 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 2317 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
2070 TEAM_CMD_PORT_LIST_GET); 2318 TEAM_CMD_PORT_LIST_GET);
2071 if (!hdr) 2319 if (!hdr)
2072 return -EMSGSIZE; 2320 return -EMSGSIZE;
@@ -2115,7 +2363,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
2115 struct genl_info *info, int flags, 2363 struct genl_info *info, int flags,
2116 struct team *team) 2364 struct team *team)
2117{ 2365{
2118 return team_nl_fill_port_list_get(skb, info->snd_pid, 2366 return team_nl_fill_port_list_get(skb, info->snd_portid,
2119 info->snd_seq, NLM_F_ACK, 2367 info->snd_seq, NLM_F_ACK,
2120 team, true); 2368 team, true);
2121} 2369}
@@ -2168,7 +2416,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
2168}; 2416};
2169 2417
2170static int team_nl_send_multicast(struct sk_buff *skb, 2418static int team_nl_send_multicast(struct sk_buff *skb,
2171 struct team *team, u32 pid) 2419 struct team *team, u32 portid)
2172{ 2420{
2173 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, 2421 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
2174 team_change_event_mcgrp.id, GFP_KERNEL); 2422 team_change_event_mcgrp.id, GFP_KERNEL);
@@ -2246,7 +2494,7 @@ static void __team_options_change_check(struct team *team)
2246 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2494 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2247 } 2495 }
2248 err = team_nl_send_event_options_get(team, &sel_opt_inst_list); 2496 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2249 if (err) 2497 if (err && err != -ESRCH)
2250 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", 2498 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2251 err); 2499 err);
2252} 2500}
@@ -2275,9 +2523,9 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
2275 2523
2276send_event: 2524send_event:
2277 err = team_nl_send_event_port_list_get(port->team); 2525 err = team_nl_send_event_port_list_get(port->team);
2278 if (err) 2526 if (err && err != -ESRCH)
2279 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 2527 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2280 port->dev->name); 2528 port->dev->name, err);
2281 2529
2282} 2530}
2283 2531
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c96e4d2967f0..9db0171e9366 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
48 48
49static int bc_port_enter(struct team *team, struct team_port *port) 49static int bc_port_enter(struct team *team, struct team_port *port)
50{ 50{
51 return team_port_set_team_mac(port); 51 return team_port_set_team_dev_addr(port);
52} 52}
53 53
54static void bc_port_change_mac(struct team *team, struct team_port *port) 54static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
55{ 55{
56 team_port_set_team_mac(port); 56 team_port_set_team_dev_addr(port);
57} 57}
58 58
59static const struct team_mode_ops bc_mode_ops = { 59static const struct team_mode_ops bc_mode_ops = {
60 .transmit = bc_transmit, 60 .transmit = bc_transmit,
61 .port_enter = bc_port_enter, 61 .port_enter = bc_port_enter,
62 .port_change_mac = bc_port_change_mac, 62 .port_change_dev_addr = bc_port_change_dev_addr,
63}; 63};
64 64
65static const struct team_mode bc_mode = { 65static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index ad7ed0ec544c..105135aa8f05 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -66,18 +66,18 @@ drop:
66 66
67static int rr_port_enter(struct team *team, struct team_port *port) 67static int rr_port_enter(struct team *team, struct team_port *port)
68{ 68{
69 return team_port_set_team_mac(port); 69 return team_port_set_team_dev_addr(port);
70} 70}
71 71
72static void rr_port_change_mac(struct team *team, struct team_port *port) 72static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
73{ 73{
74 team_port_set_team_mac(port); 74 team_port_set_team_dev_addr(port);
75} 75}
76 76
77static const struct team_mode_ops rr_mode_ops = { 77static const struct team_mode_ops rr_mode_ops = {
78 .transmit = rr_transmit, 78 .transmit = rr_transmit,
79 .port_enter = rr_port_enter, 79 .port_enter = rr_port_enter,
80 .port_change_mac = rr_port_change_mac, 80 .port_change_dev_addr = rr_port_change_dev_addr,
81}; 81};
82 82
83static const struct team_mode rr_mode = { 83static const struct team_mode rr_mode = {
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 32e31c5c5dc6..33ab824773c5 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -221,7 +221,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
221 /* Get the MAC address */ 221 /* Get the MAC address */
222 ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); 222 ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
223 if (ret < 0) { 223 if (ret < 0) {
224 dbg("read AX_CMD_READ_NODE_ID failed: %d", ret); 224 netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
225 ret);
225 goto out; 226 goto out;
226 } 227 }
227 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 228 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -303,7 +304,7 @@ static int ax88772_reset(struct usbnet *dev)
303 304
304 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); 305 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
305 if (ret < 0) { 306 if (ret < 0) {
306 dbg("Select PHY #1 failed: %d", ret); 307 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
307 goto out; 308 goto out;
308 } 309 }
309 310
@@ -331,13 +332,13 @@ static int ax88772_reset(struct usbnet *dev)
331 332
332 msleep(150); 333 msleep(150);
333 rx_ctl = asix_read_rx_ctl(dev); 334 rx_ctl = asix_read_rx_ctl(dev);
334 dbg("RX_CTL is 0x%04x after software reset", rx_ctl); 335 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
335 ret = asix_write_rx_ctl(dev, 0x0000); 336 ret = asix_write_rx_ctl(dev, 0x0000);
336 if (ret < 0) 337 if (ret < 0)
337 goto out; 338 goto out;
338 339
339 rx_ctl = asix_read_rx_ctl(dev); 340 rx_ctl = asix_read_rx_ctl(dev);
340 dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl); 341 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
341 342
342 ret = asix_sw_reset(dev, AX_SWRESET_PRL); 343 ret = asix_sw_reset(dev, AX_SWRESET_PRL);
343 if (ret < 0) 344 if (ret < 0)
@@ -364,7 +365,7 @@ static int ax88772_reset(struct usbnet *dev)
364 AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, 365 AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
365 AX88772_IPG2_DEFAULT, 0, NULL); 366 AX88772_IPG2_DEFAULT, 0, NULL);
366 if (ret < 0) { 367 if (ret < 0) {
367 dbg("Write IPG,IPG1,IPG2 failed: %d", ret); 368 netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
368 goto out; 369 goto out;
369 } 370 }
370 371
@@ -381,10 +382,13 @@ static int ax88772_reset(struct usbnet *dev)
381 goto out; 382 goto out;
382 383
383 rx_ctl = asix_read_rx_ctl(dev); 384 rx_ctl = asix_read_rx_ctl(dev);
384 dbg("RX_CTL is 0x%04x after all initializations", rx_ctl); 385 netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
386 rx_ctl);
385 387
386 rx_ctl = asix_read_medium_status(dev); 388 rx_ctl = asix_read_medium_status(dev);
387 dbg("Medium Status is 0x%04x after all initializations", rx_ctl); 389 netdev_dbg(dev->net,
390 "Medium Status is 0x%04x after all initializations\n",
391 rx_ctl);
388 392
389 return 0; 393 return 0;
390 394
@@ -416,7 +420,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
416 /* Get the MAC address */ 420 /* Get the MAC address */
417 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); 421 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
418 if (ret < 0) { 422 if (ret < 0) {
419 dbg("Failed to read MAC address: %d", ret); 423 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
420 return ret; 424 return ret;
421 } 425 }
422 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 426 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -439,7 +443,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
439 /* Reset the PHY to normal operation mode */ 443 /* Reset the PHY to normal operation mode */
440 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); 444 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
441 if (ret < 0) { 445 if (ret < 0) {
442 dbg("Select PHY #1 failed: %d", ret); 446 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
443 return ret; 447 return ret;
444 } 448 }
445 449
@@ -459,7 +463,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
459 463
460 /* Read PHYID register *AFTER* the PHY was reset properly */ 464 /* Read PHYID register *AFTER* the PHY was reset properly */
461 phyid = asix_get_phyid(dev); 465 phyid = asix_get_phyid(dev);
462 dbg("PHYID=0x%08x", phyid); 466 netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
463 467
464 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 468 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
465 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 469 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -575,13 +579,13 @@ static int ax88178_reset(struct usbnet *dev)
575 u32 phyid; 579 u32 phyid;
576 580
577 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); 581 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
578 dbg("GPIO Status: 0x%04x", status); 582 netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
579 583
580 asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL); 584 asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
581 asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom); 585 asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
582 asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL); 586 asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
583 587
584 dbg("EEPROM index 0x17 is 0x%04x", eeprom); 588 netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
585 589
586 if (eeprom == cpu_to_le16(0xffff)) { 590 if (eeprom == cpu_to_le16(0xffff)) {
587 data->phymode = PHY_MODE_MARVELL; 591 data->phymode = PHY_MODE_MARVELL;
@@ -592,7 +596,7 @@ static int ax88178_reset(struct usbnet *dev)
592 data->ledmode = le16_to_cpu(eeprom) >> 8; 596 data->ledmode = le16_to_cpu(eeprom) >> 8;
593 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; 597 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
594 } 598 }
595 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); 599 netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
596 600
597 /* Power up external GigaPHY through AX88178 GPIO pin */ 601 /* Power up external GigaPHY through AX88178 GPIO pin */
598 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); 602 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
@@ -601,14 +605,14 @@ static int ax88178_reset(struct usbnet *dev)
601 asix_write_gpio(dev, 0x001c, 300); 605 asix_write_gpio(dev, 0x001c, 300);
602 asix_write_gpio(dev, 0x003c, 30); 606 asix_write_gpio(dev, 0x003c, 30);
603 } else { 607 } else {
604 dbg("gpio phymode == 1 path"); 608 netdev_dbg(dev->net, "gpio phymode == 1 path\n");
605 asix_write_gpio(dev, AX_GPIO_GPO1EN, 30); 609 asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
606 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); 610 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
607 } 611 }
608 612
609 /* Read PHYID register *AFTER* powering up PHY */ 613 /* Read PHYID register *AFTER* powering up PHY */
610 phyid = asix_get_phyid(dev); 614 phyid = asix_get_phyid(dev);
611 dbg("PHYID=0x%08x", phyid); 615 netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
612 616
613 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ 617 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
614 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL); 618 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
@@ -770,7 +774,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
770 /* Get the MAC address */ 774 /* Get the MAC address */
771 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); 775 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
772 if (ret < 0) { 776 if (ret < 0) {
773 dbg("Failed to read MAC address: %d", ret); 777 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
774 return ret; 778 return ret;
775 } 779 }
776 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 780 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -930,6 +934,10 @@ static const struct usb_device_id products [] = {
930 USB_DEVICE (0x04f1, 0x3008), 934 USB_DEVICE (0x04f1, 0x3008),
931 .driver_info = (unsigned long) &ax8817x_info, 935 .driver_info = (unsigned long) &ax8817x_info,
932}, { 936}, {
937 // Lenovo U2L100P 10/100
938 USB_DEVICE (0x17ef, 0x7203),
939 .driver_info = (unsigned long) &ax88772_info,
940}, {
933 // ASIX AX88772B 10/100 941 // ASIX AX88772B 10/100
934 USB_DEVICE (0x0b95, 0x772b), 942 USB_DEVICE (0x0b95, 0x772b),
935 .driver_info = (unsigned long) &ax88772_info, 943 .driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 26c5bebd9eca..18d9579123ea 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -236,7 +236,8 @@ static void catc_rx_done(struct urb *urb)
236 } 236 }
237 237
238 if (status) { 238 if (status) {
239 dbg("rx_done, status %d, length %d", status, urb->actual_length); 239 dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
240 status, urb->actual_length);
240 return; 241 return;
241 } 242 }
242 243
@@ -275,10 +276,11 @@ static void catc_rx_done(struct urb *urb)
275 if (atomic_read(&catc->recq_sz)) { 276 if (atomic_read(&catc->recq_sz)) {
276 int state; 277 int state;
277 atomic_dec(&catc->recq_sz); 278 atomic_dec(&catc->recq_sz);
278 dbg("getting extra packet"); 279 netdev_dbg(catc->netdev, "getting extra packet\n");
279 urb->dev = catc->usbdev; 280 urb->dev = catc->usbdev;
280 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { 281 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
281 dbg("submit(rx_urb) status %d", state); 282 netdev_dbg(catc->netdev,
283 "submit(rx_urb) status %d\n", state);
282 } 284 }
283 } else { 285 } else {
284 clear_bit(RX_RUNNING, &catc->flags); 286 clear_bit(RX_RUNNING, &catc->flags);
@@ -317,18 +319,20 @@ static void catc_irq_done(struct urb *urb)
317 return; 319 return;
318 /* -EPIPE: should clear the halt */ 320 /* -EPIPE: should clear the halt */
319 default: /* error */ 321 default: /* error */
320 dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]); 322 dev_dbg(&urb->dev->dev,
323 "irq_done, status %d, data %02x %02x.\n",
324 status, data[0], data[1]);
321 goto resubmit; 325 goto resubmit;
322 } 326 }
323 327
324 if (linksts == LinkGood) { 328 if (linksts == LinkGood) {
325 netif_carrier_on(catc->netdev); 329 netif_carrier_on(catc->netdev);
326 dbg("link ok"); 330 netdev_dbg(catc->netdev, "link ok\n");
327 } 331 }
328 332
329 if (linksts == LinkBad) { 333 if (linksts == LinkBad) {
330 netif_carrier_off(catc->netdev); 334 netif_carrier_off(catc->netdev);
331 dbg("link bad"); 335 netdev_dbg(catc->netdev, "link bad\n");
332 } 336 }
333 337
334 if (hasdata) { 338 if (hasdata) {
@@ -385,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
385 int r, status = urb->status; 389 int r, status = urb->status;
386 390
387 if (status == -ECONNRESET) { 391 if (status == -ECONNRESET) {
388 dbg("Tx Reset."); 392 dev_dbg(&urb->dev->dev, "Tx Reset.\n");
389 urb->status = 0; 393 urb->status = 0;
390 catc->netdev->trans_start = jiffies; 394 catc->netdev->trans_start = jiffies;
391 catc->netdev->stats.tx_errors++; 395 catc->netdev->stats.tx_errors++;
@@ -395,7 +399,8 @@ static void catc_tx_done(struct urb *urb)
395 } 399 }
396 400
397 if (status) { 401 if (status) {
398 dbg("tx_done, status %d, length %d", status, urb->actual_length); 402 dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
403 status, urb->actual_length);
399 return; 404 return;
400 } 405 }
401 406
@@ -511,7 +516,8 @@ static void catc_ctrl_done(struct urb *urb)
511 int status = urb->status; 516 int status = urb->status;
512 517
513 if (status) 518 if (status)
514 dbg("ctrl_done, status %d, len %d.", status, urb->actual_length); 519 dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
520 status, urb->actual_length);
515 521
516 spin_lock_irqsave(&catc->ctrl_lock, flags); 522 spin_lock_irqsave(&catc->ctrl_lock, flags);
517 523
@@ -667,7 +673,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
667 f5u011_mchash_async(catc, catc->multicast); 673 f5u011_mchash_async(catc, catc->multicast);
668 if (catc->rxmode[0] != rx) { 674 if (catc->rxmode[0] != rx) {
669 catc->rxmode[0] = rx; 675 catc->rxmode[0] = rx;
670 dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]); 676 netdev_dbg(catc->netdev,
677 "Setting RX mode to %2.2X %2.2X\n",
678 catc->rxmode[0], catc->rxmode[1]);
671 f5u011_rxmode_async(catc, catc->rxmode); 679 f5u011_rxmode_async(catc, catc->rxmode);
672 } 680 }
673 } 681 }
@@ -766,6 +774,7 @@ static const struct net_device_ops catc_netdev_ops = {
766 774
767static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) 775static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
768{ 776{
777 struct device *dev = &intf->dev;
769 struct usb_device *usbdev = interface_to_usbdev(intf); 778 struct usb_device *usbdev = interface_to_usbdev(intf);
770 struct net_device *netdev; 779 struct net_device *netdev;
771 struct catc *catc; 780 struct catc *catc;
@@ -774,7 +783,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
774 783
775 if (usb_set_interface(usbdev, 784 if (usb_set_interface(usbdev,
776 intf->altsetting->desc.bInterfaceNumber, 1)) { 785 intf->altsetting->desc.bInterfaceNumber, 1)) {
777 dev_err(&intf->dev, "Can't set altsetting 1.\n"); 786 dev_err(dev, "Can't set altsetting 1.\n");
778 return -EIO; 787 return -EIO;
779 } 788 }
780 789
@@ -817,7 +826,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
817 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && 826 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
818 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && 827 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
819 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { 828 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
820 dbg("Testing for f5u011"); 829 dev_dbg(dev, "Testing for f5u011\n");
821 catc->is_f5u011 = 1; 830 catc->is_f5u011 = 1;
822 atomic_set(&catc->recq_sz, 0); 831 atomic_set(&catc->recq_sz, 0);
823 pktsz = RX_PKT_SZ; 832 pktsz = RX_PKT_SZ;
@@ -838,7 +847,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
838 catc->irq_buf, 2, catc_irq_done, catc, 1); 847 catc->irq_buf, 2, catc_irq_done, catc, 1);
839 848
840 if (!catc->is_f5u011) { 849 if (!catc->is_f5u011) {
841 dbg("Checking memory size\n"); 850 dev_dbg(dev, "Checking memory size\n");
842 851
843 i = 0x12345678; 852 i = 0x12345678;
844 catc_write_mem(catc, 0x7a80, &i, 4); 853 catc_write_mem(catc, 0x7a80, &i, 4);
@@ -850,7 +859,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
850 case 0x12345678: 859 case 0x12345678:
851 catc_set_reg(catc, TxBufCount, 8); 860 catc_set_reg(catc, TxBufCount, 8);
852 catc_set_reg(catc, RxBufCount, 32); 861 catc_set_reg(catc, RxBufCount, 32);
853 dbg("64k Memory\n"); 862 dev_dbg(dev, "64k Memory\n");
854 break; 863 break;
855 default: 864 default:
856 dev_warn(&intf->dev, 865 dev_warn(&intf->dev,
@@ -858,49 +867,49 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
858 case 0x87654321: 867 case 0x87654321:
859 catc_set_reg(catc, TxBufCount, 4); 868 catc_set_reg(catc, TxBufCount, 4);
860 catc_set_reg(catc, RxBufCount, 16); 869 catc_set_reg(catc, RxBufCount, 16);
861 dbg("32k Memory\n"); 870 dev_dbg(dev, "32k Memory\n");
862 break; 871 break;
863 } 872 }
864 873
865 dbg("Getting MAC from SEEROM."); 874 dev_dbg(dev, "Getting MAC from SEEROM.\n");
866 875
867 catc_get_mac(catc, netdev->dev_addr); 876 catc_get_mac(catc, netdev->dev_addr);
868 877
869 dbg("Setting MAC into registers."); 878 dev_dbg(dev, "Setting MAC into registers.\n");
870 879
871 for (i = 0; i < 6; i++) 880 for (i = 0; i < 6; i++)
872 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); 881 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
873 882
874 dbg("Filling the multicast list."); 883 dev_dbg(dev, "Filling the multicast list.\n");
875 884
876 memset(broadcast, 0xff, 6); 885 memset(broadcast, 0xff, 6);
877 catc_multicast(broadcast, catc->multicast); 886 catc_multicast(broadcast, catc->multicast);
878 catc_multicast(netdev->dev_addr, catc->multicast); 887 catc_multicast(netdev->dev_addr, catc->multicast);
879 catc_write_mem(catc, 0xfa80, catc->multicast, 64); 888 catc_write_mem(catc, 0xfa80, catc->multicast, 64);
880 889
881 dbg("Clearing error counters."); 890 dev_dbg(dev, "Clearing error counters.\n");
882 891
883 for (i = 0; i < 8; i++) 892 for (i = 0; i < 8; i++)
884 catc_set_reg(catc, EthStats + i, 0); 893 catc_set_reg(catc, EthStats + i, 0);
885 catc->last_stats = jiffies; 894 catc->last_stats = jiffies;
886 895
887 dbg("Enabling."); 896 dev_dbg(dev, "Enabling.\n");
888 897
889 catc_set_reg(catc, MaxBurst, RX_MAX_BURST); 898 catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
890 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); 899 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
891 catc_set_reg(catc, LEDCtrl, LEDLink); 900 catc_set_reg(catc, LEDCtrl, LEDLink);
892 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); 901 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
893 } else { 902 } else {
894 dbg("Performing reset\n"); 903 dev_dbg(dev, "Performing reset\n");
895 catc_reset(catc); 904 catc_reset(catc);
896 catc_get_mac(catc, netdev->dev_addr); 905 catc_get_mac(catc, netdev->dev_addr);
897 906
898 dbg("Setting RX Mode"); 907 dev_dbg(dev, "Setting RX Mode\n");
899 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; 908 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
900 catc->rxmode[1] = 0; 909 catc->rxmode[1] = 0;
901 f5u011_rxmode(catc, catc->rxmode); 910 f5u011_rxmode(catc, catc->rxmode);
902 } 911 }
903 dbg("Init done."); 912 dev_dbg(dev, "Init done.\n");
904 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", 913 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
905 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", 914 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
906 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); 915 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 49ab45e17fe8..1e207f086b75 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -302,18 +302,9 @@ static const struct driver_info cx82310_info = {
302 .tx_fixup = cx82310_tx_fixup, 302 .tx_fixup = cx82310_tx_fixup,
303}; 303};
304 304
305#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
306 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
307 USB_DEVICE_ID_MATCH_DEV_INFO, \
308 .idVendor = (vend), \
309 .idProduct = (prod), \
310 .bDeviceClass = (cl), \
311 .bDeviceSubClass = (sc), \
312 .bDeviceProtocol = (pr)
313
314static const struct usb_device_id products[] = { 305static const struct usb_device_id products[] = {
315 { 306 {
316 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), 307 USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
317 .driver_info = (unsigned long) &cx82310_info 308 .driver_info = (unsigned long) &cx82310_info
318 }, 309 },
319 { }, 310 { },
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index db3c8021f2a3..a7e3f4e55bf3 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -91,7 +91,9 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
91 // get the packet count of the received skb 91 // get the packet count of the received skb
92 count = le32_to_cpu(header->packet_count); 92 count = le32_to_cpu(header->packet_count);
93 if (count > GL_MAX_TRANSMIT_PACKETS) { 93 if (count > GL_MAX_TRANSMIT_PACKETS) {
94 dbg("genelink: invalid received packet count %u", count); 94 netdev_dbg(dev->net,
95 "genelink: invalid received packet count %u\n",
96 count);
95 return 0; 97 return 0;
96 } 98 }
97 99
@@ -107,7 +109,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
107 109
108 // this may be a broken packet 110 // this may be a broken packet
109 if (size > GL_MAX_PACKET_LEN) { 111 if (size > GL_MAX_PACKET_LEN) {
110 dbg("genelink: invalid rx length %d", size); 112 netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
113 size);
111 return 0; 114 return 0;
112 } 115 }
113 116
@@ -133,7 +136,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
133 skb_pull(skb, 4); 136 skb_pull(skb, 4);
134 137
135 if (skb->len > GL_MAX_PACKET_LEN) { 138 if (skb->len > GL_MAX_PACKET_LEN) {
136 dbg("genelink: invalid rx length %d", skb->len); 139 netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
140 skb->len);
137 return 0; 141 return 0;
138 } 142 }
139 return 1; 143 return 1;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c3d03490c97d..c75e11e1b385 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -267,19 +267,16 @@ static int kaweth_control(struct kaweth_device *kaweth,
267 struct usb_ctrlrequest *dr; 267 struct usb_ctrlrequest *dr;
268 int retval; 268 int retval;
269 269
270 dbg("kaweth_control()"); 270 netdev_dbg(kaweth->net, "kaweth_control()\n");
271 271
272 if(in_interrupt()) { 272 if(in_interrupt()) {
273 dbg("in_interrupt()"); 273 netdev_dbg(kaweth->net, "in_interrupt()\n");
274 return -EBUSY; 274 return -EBUSY;
275 } 275 }
276 276
277 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 277 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
278 278 if (!dr)
279 if (!dr) {
280 dbg("kmalloc() failed");
281 return -ENOMEM; 279 return -ENOMEM;
282 }
283 280
284 dr->bRequestType = requesttype; 281 dr->bRequestType = requesttype;
285 dr->bRequest = request; 282 dr->bRequest = request;
@@ -305,7 +302,7 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
305{ 302{
306 int retval; 303 int retval;
307 304
308 dbg("Reading kaweth configuration"); 305 netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
309 306
310 retval = kaweth_control(kaweth, 307 retval = kaweth_control(kaweth,
311 usb_rcvctrlpipe(kaweth->dev, 0), 308 usb_rcvctrlpipe(kaweth->dev, 0),
@@ -327,7 +324,7 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
327{ 324{
328 int retval; 325 int retval;
329 326
330 dbg("Setting URB size to %d", (unsigned)urb_size); 327 netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
331 328
332 retval = kaweth_control(kaweth, 329 retval = kaweth_control(kaweth,
333 usb_sndctrlpipe(kaweth->dev, 0), 330 usb_sndctrlpipe(kaweth->dev, 0),
@@ -349,7 +346,7 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
349{ 346{
350 int retval; 347 int retval;
351 348
352 dbg("Set SOFS wait to %d", (unsigned)sofs_wait); 349 netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
353 350
354 retval = kaweth_control(kaweth, 351 retval = kaweth_control(kaweth,
355 usb_sndctrlpipe(kaweth->dev, 0), 352 usb_sndctrlpipe(kaweth->dev, 0),
@@ -372,7 +369,8 @@ static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
372{ 369{
373 int retval; 370 int retval;
374 371
375 dbg("Set receive filter to %d", (unsigned)receive_filter); 372 netdev_dbg(kaweth->net, "Set receive filter to %d\n",
373 (unsigned)receive_filter);
376 374
377 retval = kaweth_control(kaweth, 375 retval = kaweth_control(kaweth,
378 usb_sndctrlpipe(kaweth->dev, 0), 376 usb_sndctrlpipe(kaweth->dev, 0),
@@ -421,12 +419,13 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
421 kaweth->firmware_buf[4] = type; 419 kaweth->firmware_buf[4] = type;
422 kaweth->firmware_buf[5] = interrupt; 420 kaweth->firmware_buf[5] = interrupt;
423 421
424 dbg("High: %i, Low:%i", kaweth->firmware_buf[3], 422 netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3],
425 kaweth->firmware_buf[2]); 423 kaweth->firmware_buf[2]);
426 424
427 dbg("Downloading firmware at %p to kaweth device at %p", 425 netdev_dbg(kaweth->net,
428 fw->data, kaweth); 426 "Downloading firmware at %p to kaweth device at %p\n",
429 dbg("Firmware length: %d", data_len); 427 fw->data, kaweth);
428 netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
430 429
431 return kaweth_control(kaweth, 430 return kaweth_control(kaweth,
432 usb_sndctrlpipe(kaweth->dev, 0), 431 usb_sndctrlpipe(kaweth->dev, 0),
@@ -454,7 +453,7 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
454 kaweth->firmware_buf[6] = 0x00; 453 kaweth->firmware_buf[6] = 0x00;
455 kaweth->firmware_buf[7] = 0x00; 454 kaweth->firmware_buf[7] = 0x00;
456 455
457 dbg("Triggering firmware"); 456 netdev_dbg(kaweth->net, "Triggering firmware\n");
458 457
459 return kaweth_control(kaweth, 458 return kaweth_control(kaweth,
460 usb_sndctrlpipe(kaweth->dev, 0), 459 usb_sndctrlpipe(kaweth->dev, 0),
@@ -474,11 +473,11 @@ static int kaweth_reset(struct kaweth_device *kaweth)
474{ 473{
475 int result; 474 int result;
476 475
477 dbg("kaweth_reset(%p)", kaweth); 476 netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
478 result = usb_reset_configuration(kaweth->dev); 477 result = usb_reset_configuration(kaweth->dev);
479 mdelay(10); 478 mdelay(10);
480 479
481 dbg("kaweth_reset() returns %d.",result); 480 netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result);
482 481
483 return result; 482 return result;
484} 483}
@@ -595,6 +594,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
595 ****************************************************************/ 594 ****************************************************************/
596static void kaweth_usb_receive(struct urb *urb) 595static void kaweth_usb_receive(struct urb *urb)
597{ 596{
597 struct device *dev = &urb->dev->dev;
598 struct kaweth_device *kaweth = urb->context; 598 struct kaweth_device *kaweth = urb->context;
599 struct net_device *net = kaweth->net; 599 struct net_device *net = kaweth->net;
600 int status = urb->status; 600 int status = urb->status;
@@ -610,25 +610,25 @@ static void kaweth_usb_receive(struct urb *urb)
610 kaweth->stats.rx_errors++; 610 kaweth->stats.rx_errors++;
611 kaweth->end = 1; 611 kaweth->end = 1;
612 wake_up(&kaweth->term_wait); 612 wake_up(&kaweth->term_wait);
613 dbg("Status was -EPIPE."); 613 dev_dbg(dev, "Status was -EPIPE.\n");
614 return; 614 return;
615 } 615 }
616 if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { 616 if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
617 /* we are killed - set a flag and wake the disconnect handler */ 617 /* we are killed - set a flag and wake the disconnect handler */
618 kaweth->end = 1; 618 kaweth->end = 1;
619 wake_up(&kaweth->term_wait); 619 wake_up(&kaweth->term_wait);
620 dbg("Status was -ECONNRESET or -ESHUTDOWN."); 620 dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n");
621 return; 621 return;
622 } 622 }
623 if (unlikely(status == -EPROTO || status == -ETIME || 623 if (unlikely(status == -EPROTO || status == -ETIME ||
624 status == -EILSEQ)) { 624 status == -EILSEQ)) {
625 kaweth->stats.rx_errors++; 625 kaweth->stats.rx_errors++;
626 dbg("Status was -EPROTO, -ETIME, or -EILSEQ."); 626 dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
627 return; 627 return;
628 } 628 }
629 if (unlikely(status == -EOVERFLOW)) { 629 if (unlikely(status == -EOVERFLOW)) {
630 kaweth->stats.rx_errors++; 630 kaweth->stats.rx_errors++;
631 dbg("Status was -EOVERFLOW."); 631 dev_dbg(dev, "Status was -EOVERFLOW.\n");
632 } 632 }
633 spin_lock(&kaweth->device_lock); 633 spin_lock(&kaweth->device_lock);
634 if (IS_BLOCKED(kaweth->status)) { 634 if (IS_BLOCKED(kaweth->status)) {
@@ -687,7 +687,7 @@ static int kaweth_open(struct net_device *net)
687 struct kaweth_device *kaweth = netdev_priv(net); 687 struct kaweth_device *kaweth = netdev_priv(net);
688 int res; 688 int res;
689 689
690 dbg("Opening network device."); 690 netdev_dbg(kaweth->net, "Opening network device.\n");
691 691
692 res = usb_autopm_get_interface(kaweth->intf); 692 res = usb_autopm_get_interface(kaweth->intf);
693 if (res) { 693 if (res) {
@@ -787,7 +787,8 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
787 787
788 if (unlikely(status != 0)) 788 if (unlikely(status != 0))
789 if (status != -ENOENT) 789 if (status != -ENOENT)
790 dbg("%s: TX status %d.", kaweth->net->name, status); 790 dev_dbg(&urb->dev->dev, "%s: TX status %d.\n",
791 kaweth->net->name, status);
791 792
792 netif_wake_queue(kaweth->net); 793 netif_wake_queue(kaweth->net);
793 dev_kfree_skb_irq(skb); 794 dev_kfree_skb_irq(skb);
@@ -871,7 +872,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
871 KAWETH_PACKET_FILTER_BROADCAST | 872 KAWETH_PACKET_FILTER_BROADCAST |
872 KAWETH_PACKET_FILTER_MULTICAST; 873 KAWETH_PACKET_FILTER_MULTICAST;
873 874
874 dbg("Setting Rx mode to %d", packet_filter_bitmap); 875 netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap);
875 876
876 netif_stop_queue(net); 877 netif_stop_queue(net);
877 878
@@ -916,7 +917,8 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
916 result); 917 result);
917 } 918 }
918 else { 919 else {
919 dbg("Set Rx mode to %d", packet_filter_bitmap); 920 netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
921 packet_filter_bitmap);
920 } 922 }
921} 923}
922 924
@@ -951,7 +953,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
951 struct kaweth_device *kaweth = usb_get_intfdata(intf); 953 struct kaweth_device *kaweth = usb_get_intfdata(intf);
952 unsigned long flags; 954 unsigned long flags;
953 955
954 dbg("Suspending device"); 956 dev_dbg(&intf->dev, "Suspending device\n");
955 spin_lock_irqsave(&kaweth->device_lock, flags); 957 spin_lock_irqsave(&kaweth->device_lock, flags);
956 kaweth->status |= KAWETH_STATUS_SUSPENDING; 958 kaweth->status |= KAWETH_STATUS_SUSPENDING;
957 spin_unlock_irqrestore(&kaweth->device_lock, flags); 959 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +970,7 @@ static int kaweth_resume(struct usb_interface *intf)
968 struct kaweth_device *kaweth = usb_get_intfdata(intf); 970 struct kaweth_device *kaweth = usb_get_intfdata(intf);
969 unsigned long flags; 971 unsigned long flags;
970 972
971 dbg("Resuming device"); 973 dev_dbg(&intf->dev, "Resuming device\n");
972 spin_lock_irqsave(&kaweth->device_lock, flags); 974 spin_lock_irqsave(&kaweth->device_lock, flags);
973 kaweth->status &= ~KAWETH_STATUS_SUSPENDING; 975 kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
974 spin_unlock_irqrestore(&kaweth->device_lock, flags); 976 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1003,36 +1005,37 @@ static int kaweth_probe(
1003 const struct usb_device_id *id /* from id_table */ 1005 const struct usb_device_id *id /* from id_table */
1004 ) 1006 )
1005{ 1007{
1006 struct usb_device *dev = interface_to_usbdev(intf); 1008 struct device *dev = &intf->dev;
1009 struct usb_device *udev = interface_to_usbdev(intf);
1007 struct kaweth_device *kaweth; 1010 struct kaweth_device *kaweth;
1008 struct net_device *netdev; 1011 struct net_device *netdev;
1009 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1012 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1010 int result = 0; 1013 int result = 0;
1011 1014
1012 dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x", 1015 dev_dbg(dev,
1013 dev->devnum, 1016 "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
1014 le16_to_cpu(dev->descriptor.idVendor), 1017 udev->devnum, le16_to_cpu(udev->descriptor.idVendor),
1015 le16_to_cpu(dev->descriptor.idProduct), 1018 le16_to_cpu(udev->descriptor.idProduct),
1016 le16_to_cpu(dev->descriptor.bcdDevice)); 1019 le16_to_cpu(udev->descriptor.bcdDevice));
1017 1020
1018 dbg("Device at %p", dev); 1021 dev_dbg(dev, "Device at %p\n", udev);
1019 1022
1020 dbg("Descriptor length: %x type: %x", 1023 dev_dbg(dev, "Descriptor length: %x type: %x\n",
1021 (int)dev->descriptor.bLength, 1024 (int)udev->descriptor.bLength,
1022 (int)dev->descriptor.bDescriptorType); 1025 (int)udev->descriptor.bDescriptorType);
1023 1026
1024 netdev = alloc_etherdev(sizeof(*kaweth)); 1027 netdev = alloc_etherdev(sizeof(*kaweth));
1025 if (!netdev) 1028 if (!netdev)
1026 return -ENOMEM; 1029 return -ENOMEM;
1027 1030
1028 kaweth = netdev_priv(netdev); 1031 kaweth = netdev_priv(netdev);
1029 kaweth->dev = dev; 1032 kaweth->dev = udev;
1030 kaweth->net = netdev; 1033 kaweth->net = netdev;
1031 1034
1032 spin_lock_init(&kaweth->device_lock); 1035 spin_lock_init(&kaweth->device_lock);
1033 init_waitqueue_head(&kaweth->term_wait); 1036 init_waitqueue_head(&kaweth->term_wait);
1034 1037
1035 dbg("Resetting."); 1038 dev_dbg(dev, "Resetting.\n");
1036 1039
1037 kaweth_reset(kaweth); 1040 kaweth_reset(kaweth);
1038 1041
@@ -1041,17 +1044,17 @@ static int kaweth_probe(
1041 * downloaded. Don't try to do it again, or we'll hang the device. 1044 * downloaded. Don't try to do it again, or we'll hang the device.
1042 */ 1045 */
1043 1046
1044 if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) { 1047 if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) {
1045 dev_info(&intf->dev, "Firmware present in device.\n"); 1048 dev_info(dev, "Firmware present in device.\n");
1046 } else { 1049 } else {
1047 /* Download the firmware */ 1050 /* Download the firmware */
1048 dev_info(&intf->dev, "Downloading firmware...\n"); 1051 dev_info(dev, "Downloading firmware...\n");
1049 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); 1052 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
1050 if ((result = kaweth_download_firmware(kaweth, 1053 if ((result = kaweth_download_firmware(kaweth,
1051 "kaweth/new_code.bin", 1054 "kaweth/new_code.bin",
1052 100, 1055 100,
1053 2)) < 0) { 1056 2)) < 0) {
1054 dev_err(&intf->dev, "Error downloading firmware (%d)\n", 1057 dev_err(dev, "Error downloading firmware (%d)\n",
1055 result); 1058 result);
1056 goto err_fw; 1059 goto err_fw;
1057 } 1060 }
@@ -1060,8 +1063,7 @@ static int kaweth_probe(
1060 "kaweth/new_code_fix.bin", 1063 "kaweth/new_code_fix.bin",
1061 100, 1064 100,
1062 3)) < 0) { 1065 3)) < 0) {
1063 dev_err(&intf->dev, 1066 dev_err(dev, "Error downloading firmware fix (%d)\n",
1064 "Error downloading firmware fix (%d)\n",
1065 result); 1067 result);
1066 goto err_fw; 1068 goto err_fw;
1067 } 1069 }
@@ -1070,8 +1072,7 @@ static int kaweth_probe(
1070 "kaweth/trigger_code.bin", 1072 "kaweth/trigger_code.bin",
1071 126, 1073 126,
1072 2)) < 0) { 1074 2)) < 0) {
1073 dev_err(&intf->dev, 1075 dev_err(dev, "Error downloading trigger code (%d)\n",
1074 "Error downloading trigger code (%d)\n",
1075 result); 1076 result);
1076 goto err_fw; 1077 goto err_fw;
1077 1078
@@ -1081,19 +1082,18 @@ static int kaweth_probe(
1081 "kaweth/trigger_code_fix.bin", 1082 "kaweth/trigger_code_fix.bin",
1082 126, 1083 126,
1083 3)) < 0) { 1084 3)) < 0) {
1084 dev_err(&intf->dev, "Error downloading trigger code fix (%d)\n", result); 1085 dev_err(dev, "Error downloading trigger code fix (%d)\n", result);
1085 goto err_fw; 1086 goto err_fw;
1086 } 1087 }
1087 1088
1088 1089
1089 if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { 1090 if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
1090 dev_err(&intf->dev, "Error triggering firmware (%d)\n", 1091 dev_err(dev, "Error triggering firmware (%d)\n", result);
1091 result);
1092 goto err_fw; 1092 goto err_fw;
1093 } 1093 }
1094 1094
1095 /* Device will now disappear for a moment... */ 1095 /* Device will now disappear for a moment... */
1096 dev_info(&intf->dev, "Firmware loaded. I'll be back...\n"); 1096 dev_info(dev, "Firmware loaded. I'll be back...\n");
1097err_fw: 1097err_fw:
1098 free_page((unsigned long)kaweth->firmware_buf); 1098 free_page((unsigned long)kaweth->firmware_buf);
1099 free_netdev(netdev); 1099 free_netdev(netdev);
@@ -1103,29 +1103,29 @@ err_fw:
1103 result = kaweth_read_configuration(kaweth); 1103 result = kaweth_read_configuration(kaweth);
1104 1104
1105 if(result < 0) { 1105 if(result < 0) {
1106 dev_err(&intf->dev, "Error reading configuration (%d), no net device created\n", result); 1106 dev_err(dev, "Error reading configuration (%d), no net device created\n", result);
1107 goto err_free_netdev; 1107 goto err_free_netdev;
1108 } 1108 }
1109 1109
1110 dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); 1110 dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
1111 dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); 1111 dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
1112 dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); 1112 dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
1113 dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); 1113 dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
1114 1114
1115 if(!memcmp(&kaweth->configuration.hw_addr, 1115 if(!memcmp(&kaweth->configuration.hw_addr,
1116 &bcast_addr, 1116 &bcast_addr,
1117 sizeof(bcast_addr))) { 1117 sizeof(bcast_addr))) {
1118 dev_err(&intf->dev, "Firmware not functioning properly, no net device created\n"); 1118 dev_err(dev, "Firmware not functioning properly, no net device created\n");
1119 goto err_free_netdev; 1119 goto err_free_netdev;
1120 } 1120 }
1121 1121
1122 if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { 1122 if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
1123 dbg("Error setting URB size"); 1123 dev_dbg(dev, "Error setting URB size\n");
1124 goto err_free_netdev; 1124 goto err_free_netdev;
1125 } 1125 }
1126 1126
1127 if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { 1127 if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
1128 dev_err(&intf->dev, "Error setting SOFS wait\n"); 1128 dev_err(dev, "Error setting SOFS wait\n");
1129 goto err_free_netdev; 1129 goto err_free_netdev;
1130 } 1130 }
1131 1131
@@ -1135,11 +1135,11 @@ err_fw:
1135 KAWETH_PACKET_FILTER_MULTICAST); 1135 KAWETH_PACKET_FILTER_MULTICAST);
1136 1136
1137 if(result < 0) { 1137 if(result < 0) {
1138 dev_err(&intf->dev, "Error setting receive filter\n"); 1138 dev_err(dev, "Error setting receive filter\n");
1139 goto err_free_netdev; 1139 goto err_free_netdev;
1140 } 1140 }
1141 1141
1142 dbg("Initializing net device."); 1142 dev_dbg(dev, "Initializing net device.\n");
1143 1143
1144 kaweth->intf = intf; 1144 kaweth->intf = intf;
1145 1145
@@ -1181,20 +1181,20 @@ err_fw:
1181 1181
1182#if 0 1182#if 0
1183// dma_supported() is deeply broken on almost all architectures 1183// dma_supported() is deeply broken on almost all architectures
1184 if (dma_supported (&intf->dev, 0xffffffffffffffffULL)) 1184 if (dma_supported (dev, 0xffffffffffffffffULL))
1185 kaweth->net->features |= NETIF_F_HIGHDMA; 1185 kaweth->net->features |= NETIF_F_HIGHDMA;
1186#endif 1186#endif
1187 1187
1188 SET_NETDEV_DEV(netdev, &intf->dev); 1188 SET_NETDEV_DEV(netdev, dev);
1189 if (register_netdev(netdev) != 0) { 1189 if (register_netdev(netdev) != 0) {
1190 dev_err(&intf->dev, "Error registering netdev.\n"); 1190 dev_err(dev, "Error registering netdev.\n");
1191 goto err_intfdata; 1191 goto err_intfdata;
1192 } 1192 }
1193 1193
1194 dev_info(&intf->dev, "kaweth interface created at %s\n", 1194 dev_info(dev, "kaweth interface created at %s\n",
1195 kaweth->net->name); 1195 kaweth->net->name);
1196 1196
1197 dbg("Kaweth probe returning."); 1197 dev_dbg(dev, "Kaweth probe returning.\n");
1198 1198
1199 return 0; 1199 return 0;
1200 1200
@@ -1232,7 +1232,7 @@ static void kaweth_disconnect(struct usb_interface *intf)
1232 } 1232 }
1233 netdev = kaweth->net; 1233 netdev = kaweth->net;
1234 1234
1235 dbg("Unregistering net device"); 1235 netdev_dbg(kaweth->net, "Unregistering net device\n");
1236 unregister_netdev(netdev); 1236 unregister_netdev(netdev);
1237 1237
1238 usb_free_urb(kaweth->rx_urb); 1238 usb_free_urb(kaweth->rx_urb);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 28c4d513ba85..c062a3e8295c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -155,12 +155,10 @@ static void nc_dump_registers(struct usbnet *dev)
155 u8 reg; 155 u8 reg;
156 u16 *vp = kmalloc(sizeof (u16)); 156 u16 *vp = kmalloc(sizeof (u16));
157 157
158 if (!vp) { 158 if (!vp)
159 dbg("no memory?");
160 return; 159 return;
161 }
162 160
163 dbg("%s registers:", dev->net->name); 161 netdev_dbg(dev->net, "registers:\n");
164 for (reg = 0; reg < 0x20; reg++) { 162 for (reg = 0; reg < 0x20; reg++) {
165 int retval; 163 int retval;
166 164
@@ -172,11 +170,10 @@ static void nc_dump_registers(struct usbnet *dev)
172 170
173 retval = nc_register_read(dev, reg, vp); 171 retval = nc_register_read(dev, reg, vp);
174 if (retval < 0) 172 if (retval < 0)
175 dbg("%s reg [0x%x] ==> error %d", 173 netdev_dbg(dev->net, "reg [0x%x] ==> error %d\n",
176 dev->net->name, reg, retval); 174 reg, retval);
177 else 175 else
178 dbg("%s reg [0x%x] = 0x%x", 176 netdev_dbg(dev->net, "reg [0x%x] = 0x%x\n", reg, *vp);
179 dev->net->name, reg, *vp);
180 } 177 }
181 kfree(vp); 178 kfree(vp);
182} 179}
@@ -300,15 +297,15 @@ static int net1080_reset(struct usbnet *dev)
300 // nc_dump_registers(dev); 297 // nc_dump_registers(dev);
301 298
302 if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) { 299 if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
303 dbg("can't read %s-%s status: %d", 300 netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
304 dev->udev->bus->bus_name, dev->udev->devpath, retval); 301 dev->udev->bus->bus_name, dev->udev->devpath, retval);
305 goto done; 302 goto done;
306 } 303 }
307 status = *vp; 304 status = *vp;
308 nc_dump_status(dev, status); 305 nc_dump_status(dev, status);
309 306
310 if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) { 307 if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
311 dbg("can't read USBCTL, %d", retval); 308 netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
312 goto done; 309 goto done;
313 } 310 }
314 usbctl = *vp; 311 usbctl = *vp;
@@ -318,7 +315,7 @@ static int net1080_reset(struct usbnet *dev)
318 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER); 315 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
319 316
320 if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) { 317 if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
321 dbg("can't read TTL, %d", retval); 318 netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
322 goto done; 319 goto done;
323 } 320 }
324 ttl = *vp; 321 ttl = *vp;
@@ -326,7 +323,7 @@ static int net1080_reset(struct usbnet *dev)
326 323
327 nc_register_write(dev, REG_TTL, 324 nc_register_write(dev, REG_TTL,
328 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) ); 325 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
329 dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS); 326 netdev_dbg(dev->net, "assigned TTL, %d ms\n", NC_READ_TTL_MS);
330 327
331 netif_info(dev, link, dev->net, "port %c, peer %sconnected\n", 328 netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
332 (status & STATUS_PORT_A) ? 'A' : 'B', 329 (status & STATUS_PORT_A) ? 'A' : 'B',
@@ -350,7 +347,7 @@ static int net1080_check_connect(struct usbnet *dev)
350 status = *vp; 347 status = *vp;
351 kfree(vp); 348 kfree(vp);
352 if (retval != 0) { 349 if (retval != 0) {
353 dbg("%s net1080_check_conn read - %d", dev->net->name, retval); 350 netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
354 return retval; 351 return retval;
355 } 352 }
356 if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER) 353 if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
@@ -420,11 +417,9 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
420 u16 hdr_len, packet_len; 417 u16 hdr_len, packet_len;
421 418
422 if (!(skb->len & 0x01)) { 419 if (!(skb->len & 0x01)) {
423#ifdef DEBUG 420 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
424 struct net_device *net = dev->net; 421 skb->len, dev->net->hard_header_len, dev->hard_mtu,
425 dbg("rx framesize %d range %d..%d mtu %d", skb->len, 422 dev->net->mtu);
426 net->hard_header_len, dev->hard_mtu, net->mtu);
427#endif
428 dev->net->stats.rx_frame_errors++; 423 dev->net->stats.rx_frame_errors++;
429 nc_ensure_sync(dev); 424 nc_ensure_sync(dev);
430 return 0; 425 return 0;
@@ -435,17 +430,17 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
435 packet_len = le16_to_cpup(&header->packet_len); 430 packet_len = le16_to_cpup(&header->packet_len);
436 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { 431 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
437 dev->net->stats.rx_frame_errors++; 432 dev->net->stats.rx_frame_errors++;
438 dbg("packet too big, %d", packet_len); 433 netdev_dbg(dev->net, "packet too big, %d\n", packet_len);
439 nc_ensure_sync(dev); 434 nc_ensure_sync(dev);
440 return 0; 435 return 0;
441 } else if (hdr_len < MIN_HEADER) { 436 } else if (hdr_len < MIN_HEADER) {
442 dev->net->stats.rx_frame_errors++; 437 dev->net->stats.rx_frame_errors++;
443 dbg("header too short, %d", hdr_len); 438 netdev_dbg(dev->net, "header too short, %d\n", hdr_len);
444 nc_ensure_sync(dev); 439 nc_ensure_sync(dev);
445 return 0; 440 return 0;
446 } else if (hdr_len > MIN_HEADER) { 441 } else if (hdr_len > MIN_HEADER) {
447 // out of band data for us? 442 // out of band data for us?
448 dbg("header OOB, %d bytes", hdr_len - MIN_HEADER); 443 netdev_dbg(dev->net, "header OOB, %d bytes\n", hdr_len - MIN_HEADER);
449 nc_ensure_sync(dev); 444 nc_ensure_sync(dev);
450 // switch (vendor/product ids) { ... } 445 // switch (vendor/product ids) { ... }
451 } 446 }
@@ -458,23 +453,23 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
458 if ((packet_len & 0x01) == 0) { 453 if ((packet_len & 0x01) == 0) {
459 if (skb->data [packet_len] != PAD_BYTE) { 454 if (skb->data [packet_len] != PAD_BYTE) {
460 dev->net->stats.rx_frame_errors++; 455 dev->net->stats.rx_frame_errors++;
461 dbg("bad pad"); 456 netdev_dbg(dev->net, "bad pad\n");
462 return 0; 457 return 0;
463 } 458 }
464 skb_trim(skb, skb->len - 1); 459 skb_trim(skb, skb->len - 1);
465 } 460 }
466 if (skb->len != packet_len) { 461 if (skb->len != packet_len) {
467 dev->net->stats.rx_frame_errors++; 462 dev->net->stats.rx_frame_errors++;
468 dbg("bad packet len %d (expected %d)", 463 netdev_dbg(dev->net, "bad packet len %d (expected %d)\n",
469 skb->len, packet_len); 464 skb->len, packet_len);
470 nc_ensure_sync(dev); 465 nc_ensure_sync(dev);
471 return 0; 466 return 0;
472 } 467 }
473 if (header->packet_id != get_unaligned(&trailer->packet_id)) { 468 if (header->packet_id != get_unaligned(&trailer->packet_id)) {
474 dev->net->stats.rx_fifo_errors++; 469 dev->net->stats.rx_fifo_errors++;
475 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", 470 netdev_dbg(dev->net, "(2+ dropped) rx packet_id mismatch 0x%x 0x%x\n",
476 le16_to_cpu(header->packet_id), 471 le16_to_cpu(header->packet_id),
477 le16_to_cpu(trailer->packet_id)); 472 le16_to_cpu(trailer->packet_id));
478 return 0; 473 return 0;
479 } 474 }
480#if 0 475#if 0
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3543c9e57824..6883c371c59f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
108 atomic_set(&info->pmcount, 0); 108 atomic_set(&info->pmcount, 0);
109 109
110 /* register subdriver */ 110 /* register subdriver */
111 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); 111 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
112 if (IS_ERR(subdriver)) { 112 if (IS_ERR(subdriver)) {
113 dev_err(&info->control->dev, "subdriver registration failed\n"); 113 dev_err(&info->control->dev, "subdriver registration failed\n");
114 rv = PTR_ERR(subdriver); 114 rv = PTR_ERR(subdriver);
@@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
139 139
140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
141 141
142 /* require a single interrupt status endpoint for subdriver */ 142 /* control and data is shared? */
143 if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
144 info->control = intf;
145 info->data = intf;
146 goto shared;
147 }
148
149 /* else require a single interrupt status endpoint on control intf */
143 if (intf->cur_altsetting->desc.bNumEndpoints != 1) 150 if (intf->cur_altsetting->desc.bNumEndpoints != 1)
144 goto err; 151 goto err;
145 152
153 /* and a number of CDC descriptors */
146 while (len > 3) { 154 while (len > 3) {
147 struct usb_descriptor_header *h = (void *)buf; 155 struct usb_descriptor_header *h = (void *)buf;
148 156
@@ -231,8 +239,9 @@ next_desc:
231 if (status < 0) 239 if (status < 0)
232 goto err; 240 goto err;
233 241
242shared:
234 status = qmi_wwan_register_subdriver(dev); 243 status = qmi_wwan_register_subdriver(dev);
235 if (status < 0) { 244 if (status < 0 && info->control != info->data) {
236 usb_set_intfdata(info->data, NULL); 245 usb_set_intfdata(info->data, NULL);
237 usb_driver_release_interface(driver, info->data); 246 usb_driver_release_interface(driver, info->data);
238 } 247 }
@@ -241,20 +250,6 @@ err:
241 return status; 250 return status;
242} 251}
243 252
244/* Some devices combine the "control" and "data" functions into a
245 * single interface with all three endpoints: interrupt + bulk in and
246 * out
247 */
248static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
249{
250 struct qmi_wwan_state *info = (void *)&dev->data;
251
252 /* control and data is shared */
253 info->control = intf;
254 info->data = intf;
255 return qmi_wwan_register_subdriver(dev);
256}
257
258static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) 253static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
259{ 254{
260 struct qmi_wwan_state *info = (void *)&dev->data; 255 struct qmi_wwan_state *info = (void *)&dev->data;
@@ -331,20 +326,12 @@ static const struct driver_info qmi_wwan_info = {
331 .manage_power = qmi_wwan_manage_power, 326 .manage_power = qmi_wwan_manage_power,
332}; 327};
333 328
334static const struct driver_info qmi_wwan_shared = {
335 .description = "WWAN/QMI device",
336 .flags = FLAG_WWAN,
337 .bind = qmi_wwan_bind_shared,
338 .unbind = qmi_wwan_unbind,
339 .manage_power = qmi_wwan_manage_power,
340};
341
342#define HUAWEI_VENDOR_ID 0x12D1 329#define HUAWEI_VENDOR_ID 0x12D1
343 330
344/* map QMI/wwan function by a fixed interface number */ 331/* map QMI/wwan function by a fixed interface number */
345#define QMI_FIXED_INTF(vend, prod, num) \ 332#define QMI_FIXED_INTF(vend, prod, num) \
346 USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ 333 USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
347 .driver_info = (unsigned long)&qmi_wwan_shared 334 .driver_info = (unsigned long)&qmi_wwan_info
348 335
349/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ 336/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
350#define QMI_GOBI1K_DEVICE(vend, prod) \ 337#define QMI_GOBI1K_DEVICE(vend, prod) \
@@ -372,15 +359,15 @@ static const struct usb_device_id products[] = {
372 }, 359 },
373 { /* Huawei E392, E398 and possibly others in "Windows mode" */ 360 { /* Huawei E392, E398 and possibly others in "Windows mode" */
374 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), 361 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
375 .driver_info = (unsigned long)&qmi_wwan_shared, 362 .driver_info = (unsigned long)&qmi_wwan_info,
376 }, 363 },
377 { /* Pantech UML290, P4200 and more */ 364 { /* Pantech UML290, P4200 and more */
378 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), 365 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
379 .driver_info = (unsigned long)&qmi_wwan_shared, 366 .driver_info = (unsigned long)&qmi_wwan_info,
380 }, 367 },
381 { /* Pantech UML290 - newer firmware */ 368 { /* Pantech UML290 - newer firmware */
382 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), 369 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
383 .driver_info = (unsigned long)&qmi_wwan_shared, 370 .driver_info = (unsigned long)&qmi_wwan_info,
384 }, 371 },
385 372
386 /* 3. Combined interface devices matching on interface number */ 373 /* 3. Combined interface devices matching on interface number */
@@ -467,7 +454,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id
467 */ 454 */
468 if (!id->driver_info) { 455 if (!id->driver_info) {
469 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); 456 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
470 id->driver_info = (unsigned long)&qmi_wwan_shared; 457 id->driver_info = (unsigned long)&qmi_wwan_info;
471 } 458 }
472 459
473 return usbnet_probe(intf, id); 460 return usbnet_probe(intf, id);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 0e2c92e0e532..5f39a3b225ef 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -275,7 +275,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
275 return -EBUSY; 275 return -EBUSY;
276 276
277 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 277 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
278 dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); 278 netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
279 /* Set the IDR registers. */ 279 /* Set the IDR registers. */
280 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); 280 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
281#ifdef EEPROM_WRITE 281#ifdef EEPROM_WRITE
@@ -503,12 +503,12 @@ static void intr_callback(struct urb *urb)
503 if ((d[INT_MSR] & MSR_LINK) == 0) { 503 if ((d[INT_MSR] & MSR_LINK) == 0) {
504 if (netif_carrier_ok(dev->netdev)) { 504 if (netif_carrier_ok(dev->netdev)) {
505 netif_carrier_off(dev->netdev); 505 netif_carrier_off(dev->netdev);
506 dbg("%s: LINK LOST\n", __func__); 506 netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__);
507 } 507 }
508 } else { 508 } else {
509 if (!netif_carrier_ok(dev->netdev)) { 509 if (!netif_carrier_ok(dev->netdev)) {
510 netif_carrier_on(dev->netdev); 510 netif_carrier_on(dev->netdev);
511 dbg("%s: LINK CAME BACK\n", __func__); 511 netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__);
512 } 512 }
513 } 513 }
514 514
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 8e22417fa6c1..c27d27701aee 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -68,9 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
68 */ 68 */
69#define SIERRA_NET_USBCTL_BUF_LEN 1024 69#define SIERRA_NET_USBCTL_BUF_LEN 1024
70 70
71struct sierra_net_info_data { 71/* Overriding the default usbnet rx_urb_size */
72 u16 rx_urb_size; 72#define SIERRA_NET_RX_URB_SIZE (8 * 1024)
73};
74 73
75/* Private data structure */ 74/* Private data structure */
76struct sierra_net_data { 75struct sierra_net_data {
@@ -560,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
560/* 559/*
561 * Sync Retransmit Timer Handler. On expiry, kick the work queue 560 * Sync Retransmit Timer Handler. On expiry, kick the work queue
562 */ 561 */
563void sierra_sync_timer(unsigned long syncdata) 562static void sierra_sync_timer(unsigned long syncdata)
564{ 563{
565 struct usbnet *dev = (struct usbnet *)syncdata; 564 struct usbnet *dev = (struct usbnet *)syncdata;
566 565
@@ -678,9 +677,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
678 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 677 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
679 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; 678 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
680 679
681 struct sierra_net_info_data *data =
682 (struct sierra_net_info_data *)dev->driver_info->data;
683
684 dev_dbg(&dev->udev->dev, "%s", __func__); 680 dev_dbg(&dev->udev->dev, "%s", __func__);
685 681
686 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; 682 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -725,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
725 sierra_net_set_ctx_index(priv, 0); 721 sierra_net_set_ctx_index(priv, 0);
726 722
727 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ 723 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
728 dev->rx_urb_size = data->rx_urb_size; 724 dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE;
729 if (dev->udev->speed != USB_SPEED_HIGH) 725 if (dev->udev->speed != USB_SPEED_HIGH)
730 dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); 726 dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE);
731 727
732 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; 728 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
733 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 729 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
@@ -842,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
842 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); 838 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
843 839
844 dev->net->stats.rx_frame_errors++; 840 dev->net->stats.rx_frame_errors++;
845 /* dev->net->stats.rx_errors incremented by caller */; 841 /* dev->net->stats.rx_errors incremented by caller */
846 return 0; 842 return 0;
847 } 843 }
848 844
@@ -866,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
866} 862}
867 863
868/* ---------------------------- Transmit data path ----------------------*/ 864/* ---------------------------- Transmit data path ----------------------*/
869struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 865static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
870 gfp_t flags) 866 struct sk_buff *skb, gfp_t flags)
871{ 867{
872 struct sierra_net_data *priv = sierra_net_get_private(dev); 868 struct sierra_net_data *priv = sierra_net_get_private(dev);
873 u16 len; 869 u16 len;
@@ -918,10 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
918 return NULL; 914 return NULL;
919} 915}
920 916
921static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
922 .rx_urb_size = 8 * 1024,
923};
924
925static const struct driver_info sierra_net_info_direct_ip = { 917static const struct driver_info sierra_net_info_direct_ip = {
926 .description = "Sierra Wireless USB-to-WWAN Modem", 918 .description = "Sierra Wireless USB-to-WWAN Modem",
927 .flags = FLAG_WWAN | FLAG_SEND_ZLP, 919 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
@@ -930,7 +922,6 @@ static const struct driver_info sierra_net_info_direct_ip = {
930 .status = sierra_net_status, 922 .status = sierra_net_status,
931 .rx_fixup = sierra_net_rx_fixup, 923 .rx_fixup = sierra_net_rx_fixup,
932 .tx_fixup = sierra_net_tx_fixup, 924 .tx_fixup = sierra_net_tx_fixup,
933 .data = (unsigned long)&sierra_net_info_data_direct_ip,
934}; 925};
935 926
936#define DIRECT_IP_DEVICE(vend, prod) \ 927#define DIRECT_IP_DEVICE(vend, prod) \
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 376143e8a1aa..b77ae76f4aa8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -52,6 +52,7 @@
52#define USB_PRODUCT_ID_LAN7500 (0x7500) 52#define USB_PRODUCT_ID_LAN7500 (0x7500)
53#define USB_PRODUCT_ID_LAN7505 (0x7505) 53#define USB_PRODUCT_ID_LAN7505 (0x7505)
54#define RXW_PADDING 2 54#define RXW_PADDING 2
55#define SUPPORTED_WAKE (WAKE_MAGIC)
55 56
56#define check_warn(ret, fmt, args...) \ 57#define check_warn(ret, fmt, args...) \
57 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 58 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -65,6 +66,7 @@
65struct smsc75xx_priv { 66struct smsc75xx_priv {
66 struct usbnet *dev; 67 struct usbnet *dev;
67 u32 rfe_ctl; 68 u32 rfe_ctl;
69 u32 wolopts;
68 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; 70 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
69 struct mutex dataport_mutex; 71 struct mutex dataport_mutex;
70 spinlock_t rfe_ctl_lock; 72 spinlock_t rfe_ctl_lock;
@@ -135,6 +137,30 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
135 return ret; 137 return ret;
136} 138}
137 139
140static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
141{
142 if (WARN_ON_ONCE(!dev))
143 return -EINVAL;
144
145 cpu_to_le32s(&feature);
146
147 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
148 USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
149 USB_CTRL_SET_TIMEOUT);
150}
151
152static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
153{
154 if (WARN_ON_ONCE(!dev))
155 return -EINVAL;
156
157 cpu_to_le32s(&feature);
158
159 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
160 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
161 USB_CTRL_SET_TIMEOUT);
162}
163
138/* Loop until the read is completed with timeout 164/* Loop until the read is completed with timeout
139 * called with phy_mutex held */ 165 * called with phy_mutex held */
140static int smsc75xx_phy_wait_not_busy(struct usbnet *dev) 166static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
@@ -578,6 +604,26 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
578 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); 604 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
579} 605}
580 606
607static void smsc75xx_ethtool_get_wol(struct net_device *net,
608 struct ethtool_wolinfo *wolinfo)
609{
610 struct usbnet *dev = netdev_priv(net);
611 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
612
613 wolinfo->supported = SUPPORTED_WAKE;
614 wolinfo->wolopts = pdata->wolopts;
615}
616
617static int smsc75xx_ethtool_set_wol(struct net_device *net,
618 struct ethtool_wolinfo *wolinfo)
619{
620 struct usbnet *dev = netdev_priv(net);
621 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
622
623 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
624 return 0;
625}
626
581static const struct ethtool_ops smsc75xx_ethtool_ops = { 627static const struct ethtool_ops smsc75xx_ethtool_ops = {
582 .get_link = usbnet_get_link, 628 .get_link = usbnet_get_link,
583 .nway_reset = usbnet_nway_reset, 629 .nway_reset = usbnet_nway_reset,
@@ -589,6 +635,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
589 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, 635 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
590 .get_eeprom = smsc75xx_ethtool_get_eeprom, 636 .get_eeprom = smsc75xx_ethtool_get_eeprom,
591 .set_eeprom = smsc75xx_ethtool_set_eeprom, 637 .set_eeprom = smsc75xx_ethtool_set_eeprom,
638 .get_wol = smsc75xx_ethtool_get_wol,
639 .set_wol = smsc75xx_ethtool_set_wol,
592}; 640};
593 641
594static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 642static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -756,6 +804,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
756 return 0; 804 return 0;
757} 805}
758 806
807static int smsc75xx_wait_ready(struct usbnet *dev)
808{
809 int timeout = 0;
810
811 do {
812 u32 buf;
813 int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
814 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
815
816 if (buf & PMT_CTL_DEV_RDY)
817 return 0;
818
819 msleep(10);
820 timeout++;
821 } while (timeout < 100);
822
823 netdev_warn(dev->net, "timeout waiting for device ready");
824 return -EIO;
825}
826
759static int smsc75xx_reset(struct usbnet *dev) 827static int smsc75xx_reset(struct usbnet *dev)
760{ 828{
761 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 829 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -764,6 +832,9 @@ static int smsc75xx_reset(struct usbnet *dev)
764 832
765 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset"); 833 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
766 834
835 ret = smsc75xx_wait_ready(dev);
836 check_warn_return(ret, "device not ready in smsc75xx_reset");
837
767 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 838 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
768 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 839 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
769 840
@@ -1083,6 +1154,169 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1083 } 1154 }
1084} 1155}
1085 1156
1157static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
1158{
1159 struct usbnet *dev = usb_get_intfdata(intf);
1160 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1161 int ret;
1162 u32 val;
1163
1164 ret = usbnet_suspend(intf, message);
1165 check_warn_return(ret, "usbnet_suspend error");
1166
1167 /* if no wol options set, enter lowest power SUSPEND2 mode */
1168 if (!(pdata->wolopts & SUPPORTED_WAKE)) {
1169 netdev_info(dev->net, "entering SUSPEND2 mode");
1170
1171 /* disable energy detect (link up) & wake up events */
1172 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1173 check_warn_return(ret, "Error reading WUCSR");
1174
1175 val &= ~(WUCSR_MPEN | WUCSR_WUEN);
1176
1177 ret = smsc75xx_write_reg(dev, WUCSR, val);
1178 check_warn_return(ret, "Error writing WUCSR");
1179
1180 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1181 check_warn_return(ret, "Error reading PMT_CTL");
1182
1183 val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
1184
1185 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1186 check_warn_return(ret, "Error writing PMT_CTL");
1187
1188 /* enter suspend2 mode */
1189 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1190 check_warn_return(ret, "Error reading PMT_CTL");
1191
1192 val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
1193 val |= PMT_CTL_SUS_MODE_2;
1194
1195 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1196 check_warn_return(ret, "Error writing PMT_CTL");
1197
1198 return 0;
1199 }
1200
1201 if (pdata->wolopts & WAKE_MAGIC) {
1202 /* clear any pending magic packet status */
1203 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1204 check_warn_return(ret, "Error reading WUCSR");
1205
1206 val |= WUCSR_MPR;
1207
1208 ret = smsc75xx_write_reg(dev, WUCSR, val);
1209 check_warn_return(ret, "Error writing WUCSR");
1210 }
1211
1212 /* enable/disable magic packup wake */
1213 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1214 check_warn_return(ret, "Error reading WUCSR");
1215
1216 if (pdata->wolopts & WAKE_MAGIC) {
1217 netdev_info(dev->net, "enabling magic packet wakeup");
1218 val |= WUCSR_MPEN;
1219 } else {
1220 netdev_info(dev->net, "disabling magic packet wakeup");
1221 val &= ~WUCSR_MPEN;
1222 }
1223
1224 ret = smsc75xx_write_reg(dev, WUCSR, val);
1225 check_warn_return(ret, "Error writing WUCSR");
1226
1227 /* enable wol wakeup source */
1228 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1229 check_warn_return(ret, "Error reading PMT_CTL");
1230
1231 val |= PMT_CTL_WOL_EN;
1232
1233 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1234 check_warn_return(ret, "Error writing PMT_CTL");
1235
1236 /* enable receiver */
1237 ret = smsc75xx_read_reg(dev, MAC_RX, &val);
1238 check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
1239
1240 val |= MAC_RX_RXEN;
1241
1242 ret = smsc75xx_write_reg(dev, MAC_RX, val);
1243 check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
1244
1245 /* some wol options are enabled, so enter SUSPEND0 */
1246 netdev_info(dev->net, "entering SUSPEND0 mode");
1247
1248 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1249 check_warn_return(ret, "Error reading PMT_CTL");
1250
1251 val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
1252 val |= PMT_CTL_SUS_MODE_0;
1253
1254 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1255 check_warn_return(ret, "Error writing PMT_CTL");
1256
1257 /* clear wol status */
1258 val &= ~PMT_CTL_WUPS;
1259 val |= PMT_CTL_WUPS_WOL;
1260 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1261 check_warn_return(ret, "Error writing PMT_CTL");
1262
1263 /* read back PMT_CTL */
1264 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1265 check_warn_return(ret, "Error reading PMT_CTL");
1266
1267 smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1268
1269 return 0;
1270}
1271
1272static int smsc75xx_resume(struct usb_interface *intf)
1273{
1274 struct usbnet *dev = usb_get_intfdata(intf);
1275 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1276 int ret;
1277 u32 val;
1278
1279 if (pdata->wolopts & WAKE_MAGIC) {
1280 netdev_info(dev->net, "resuming from SUSPEND0");
1281
1282 smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1283
1284 /* Disable magic packup wake */
1285 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1286 check_warn_return(ret, "Error reading WUCSR");
1287
1288 val &= ~WUCSR_MPEN;
1289
1290 ret = smsc75xx_write_reg(dev, WUCSR, val);
1291 check_warn_return(ret, "Error writing WUCSR");
1292
1293 /* clear wake-up status */
1294 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1295 check_warn_return(ret, "Error reading PMT_CTL");
1296
1297 val &= ~PMT_CTL_WOL_EN;
1298 val |= PMT_CTL_WUPS;
1299
1300 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1301 check_warn_return(ret, "Error writing PMT_CTL");
1302 } else {
1303 netdev_info(dev->net, "resuming from SUSPEND2");
1304
1305 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1306 check_warn_return(ret, "Error reading PMT_CTL");
1307
1308 val |= PMT_CTL_PHY_PWRUP;
1309
1310 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1311 check_warn_return(ret, "Error writing PMT_CTL");
1312 }
1313
1314 ret = smsc75xx_wait_ready(dev);
1315 check_warn_return(ret, "device not ready in smsc75xx_resume");
1316
1317 return usbnet_resume(intf);
1318}
1319
1086static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, 1320static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
1087 u32 rx_cmd_a, u32 rx_cmd_b) 1321 u32 rx_cmd_a, u32 rx_cmd_b)
1088{ 1322{
@@ -1251,9 +1485,9 @@ static struct usb_driver smsc75xx_driver = {
1251 .name = SMSC_CHIPNAME, 1485 .name = SMSC_CHIPNAME,
1252 .id_table = products, 1486 .id_table = products,
1253 .probe = usbnet_probe, 1487 .probe = usbnet_probe,
1254 .suspend = usbnet_suspend, 1488 .suspend = smsc75xx_suspend,
1255 .resume = usbnet_resume, 1489 .resume = smsc75xx_resume,
1256 .reset_resume = usbnet_resume, 1490 .reset_resume = smsc75xx_resume,
1257 .disconnect = usbnet_disconnect, 1491 .disconnect = usbnet_disconnect,
1258 .disable_hub_initiated_lpm = 1, 1492 .disable_hub_initiated_lpm = 1,
1259}; 1493};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d45e539a84b7..7479a5761d0d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -46,11 +46,22 @@
46#define SMSC95XX_INTERNAL_PHY_ID (1) 46#define SMSC95XX_INTERNAL_PHY_ID (1)
47#define SMSC95XX_TX_OVERHEAD (8) 47#define SMSC95XX_TX_OVERHEAD (8)
48#define SMSC95XX_TX_OVERHEAD_CSUM (12) 48#define SMSC95XX_TX_OVERHEAD_CSUM (12)
49#define SUPPORTED_WAKE (WAKE_MAGIC)
50
51#define check_warn(ret, fmt, args...) \
52 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
53
54#define check_warn_return(ret, fmt, args...) \
55 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
56
57#define check_warn_goto_done(ret, fmt, args...) \
58 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
49 59
50struct smsc95xx_priv { 60struct smsc95xx_priv {
51 u32 mac_cr; 61 u32 mac_cr;
52 u32 hash_hi; 62 u32 hash_hi;
53 u32 hash_lo; 63 u32 hash_lo;
64 u32 wolopts;
54 spinlock_t mac_cr_lock; 65 spinlock_t mac_cr_lock;
55}; 66};
56 67
@@ -63,7 +74,8 @@ static bool turbo_mode = true;
63module_param(turbo_mode, bool, 0644); 74module_param(turbo_mode, bool, 0644);
64MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 75MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
65 76
66static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data) 77static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
78 u32 *data)
67{ 79{
68 u32 *buf = kmalloc(4, GFP_KERNEL); 80 u32 *buf = kmalloc(4, GFP_KERNEL);
69 int ret; 81 int ret;
@@ -88,7 +100,8 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
88 return ret; 100 return ret;
89} 101}
90 102
91static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data) 103static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
104 u32 data)
92{ 105{
93 u32 *buf = kmalloc(4, GFP_KERNEL); 106 u32 *buf = kmalloc(4, GFP_KERNEL);
94 int ret; 107 int ret;
@@ -114,15 +127,41 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
114 return ret; 127 return ret;
115} 128}
116 129
130static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
131{
132 if (WARN_ON_ONCE(!dev))
133 return -EINVAL;
134
135 cpu_to_le32s(&feature);
136
137 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
138 USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
139 USB_CTRL_SET_TIMEOUT);
140}
141
142static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
143{
144 if (WARN_ON_ONCE(!dev))
145 return -EINVAL;
146
147 cpu_to_le32s(&feature);
148
149 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
150 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
151 USB_CTRL_SET_TIMEOUT);
152}
153
117/* Loop until the read is completed with timeout 154/* Loop until the read is completed with timeout
118 * called with phy_mutex held */ 155 * called with phy_mutex held */
119static int smsc95xx_phy_wait_not_busy(struct usbnet *dev) 156static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
120{ 157{
121 unsigned long start_time = jiffies; 158 unsigned long start_time = jiffies;
122 u32 val; 159 u32 val;
160 int ret;
123 161
124 do { 162 do {
125 smsc95xx_read_reg(dev, MII_ADDR, &val); 163 ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
164 check_warn_return(ret, "Error reading MII_ACCESS");
126 if (!(val & MII_BUSY_)) 165 if (!(val & MII_BUSY_))
127 return 0; 166 return 0;
128 } while (!time_after(jiffies, start_time + HZ)); 167 } while (!time_after(jiffies, start_time + HZ));
@@ -134,33 +173,32 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
134{ 173{
135 struct usbnet *dev = netdev_priv(netdev); 174 struct usbnet *dev = netdev_priv(netdev);
136 u32 val, addr; 175 u32 val, addr;
176 int ret;
137 177
138 mutex_lock(&dev->phy_mutex); 178 mutex_lock(&dev->phy_mutex);
139 179
140 /* confirm MII not busy */ 180 /* confirm MII not busy */
141 if (smsc95xx_phy_wait_not_busy(dev)) { 181 ret = smsc95xx_phy_wait_not_busy(dev);
142 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n"); 182 check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
143 mutex_unlock(&dev->phy_mutex);
144 return -EIO;
145 }
146 183
147 /* set the address, index & direction (read from PHY) */ 184 /* set the address, index & direction (read from PHY) */
148 phy_id &= dev->mii.phy_id_mask; 185 phy_id &= dev->mii.phy_id_mask;
149 idx &= dev->mii.reg_num_mask; 186 idx &= dev->mii.reg_num_mask;
150 addr = (phy_id << 11) | (idx << 6) | MII_READ_; 187 addr = (phy_id << 11) | (idx << 6) | MII_READ_;
151 smsc95xx_write_reg(dev, MII_ADDR, addr); 188 ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
189 check_warn_goto_done(ret, "Error writing MII_ADDR");
152 190
153 if (smsc95xx_phy_wait_not_busy(dev)) { 191 ret = smsc95xx_phy_wait_not_busy(dev);
154 netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); 192 check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
155 mutex_unlock(&dev->phy_mutex);
156 return -EIO;
157 }
158 193
159 smsc95xx_read_reg(dev, MII_DATA, &val); 194 ret = smsc95xx_read_reg(dev, MII_DATA, &val);
195 check_warn_goto_done(ret, "Error reading MII_DATA");
160 196
161 mutex_unlock(&dev->phy_mutex); 197 ret = (u16)(val & 0xFFFF);
162 198
163 return (u16)(val & 0xFFFF); 199done:
200 mutex_unlock(&dev->phy_mutex);
201 return ret;
164} 202}
165 203
166static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, 204static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
@@ -168,38 +206,41 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
168{ 206{
169 struct usbnet *dev = netdev_priv(netdev); 207 struct usbnet *dev = netdev_priv(netdev);
170 u32 val, addr; 208 u32 val, addr;
209 int ret;
171 210
172 mutex_lock(&dev->phy_mutex); 211 mutex_lock(&dev->phy_mutex);
173 212
174 /* confirm MII not busy */ 213 /* confirm MII not busy */
175 if (smsc95xx_phy_wait_not_busy(dev)) { 214 ret = smsc95xx_phy_wait_not_busy(dev);
176 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n"); 215 check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
177 mutex_unlock(&dev->phy_mutex);
178 return;
179 }
180 216
181 val = regval; 217 val = regval;
182 smsc95xx_write_reg(dev, MII_DATA, val); 218 ret = smsc95xx_write_reg(dev, MII_DATA, val);
219 check_warn_goto_done(ret, "Error writing MII_DATA");
183 220
184 /* set the address, index & direction (write to PHY) */ 221 /* set the address, index & direction (write to PHY) */
185 phy_id &= dev->mii.phy_id_mask; 222 phy_id &= dev->mii.phy_id_mask;
186 idx &= dev->mii.reg_num_mask; 223 idx &= dev->mii.reg_num_mask;
187 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; 224 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
188 smsc95xx_write_reg(dev, MII_ADDR, addr); 225 ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
226 check_warn_goto_done(ret, "Error writing MII_ADDR");
189 227
190 if (smsc95xx_phy_wait_not_busy(dev)) 228 ret = smsc95xx_phy_wait_not_busy(dev);
191 netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); 229 check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
192 230
231done:
193 mutex_unlock(&dev->phy_mutex); 232 mutex_unlock(&dev->phy_mutex);
194} 233}
195 234
196static int smsc95xx_wait_eeprom(struct usbnet *dev) 235static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
197{ 236{
198 unsigned long start_time = jiffies; 237 unsigned long start_time = jiffies;
199 u32 val; 238 u32 val;
239 int ret;
200 240
201 do { 241 do {
202 smsc95xx_read_reg(dev, E2P_CMD, &val); 242 ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
243 check_warn_return(ret, "Error reading E2P_CMD");
203 if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_)) 244 if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
204 break; 245 break;
205 udelay(40); 246 udelay(40);
@@ -213,13 +254,15 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
213 return 0; 254 return 0;
214} 255}
215 256
216static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev) 257static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
217{ 258{
218 unsigned long start_time = jiffies; 259 unsigned long start_time = jiffies;
219 u32 val; 260 u32 val;
261 int ret;
220 262
221 do { 263 do {
222 smsc95xx_read_reg(dev, E2P_CMD, &val); 264 ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
265 check_warn_return(ret, "Error reading E2P_CMD");
223 266
224 if (!(val & E2P_CMD_BUSY_)) 267 if (!(val & E2P_CMD_BUSY_))
225 return 0; 268 return 0;
@@ -246,13 +289,15 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
246 289
247 for (i = 0; i < length; i++) { 290 for (i = 0; i < length; i++) {
248 val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_); 291 val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
249 smsc95xx_write_reg(dev, E2P_CMD, val); 292 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
293 check_warn_return(ret, "Error writing E2P_CMD");
250 294
251 ret = smsc95xx_wait_eeprom(dev); 295 ret = smsc95xx_wait_eeprom(dev);
252 if (ret < 0) 296 if (ret < 0)
253 return ret; 297 return ret;
254 298
255 smsc95xx_read_reg(dev, E2P_DATA, &val); 299 ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
300 check_warn_return(ret, "Error reading E2P_DATA");
256 301
257 data[i] = val & 0xFF; 302 data[i] = val & 0xFF;
258 offset++; 303 offset++;
@@ -276,7 +321,8 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
276 321
277 /* Issue write/erase enable command */ 322 /* Issue write/erase enable command */
278 val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_; 323 val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
279 smsc95xx_write_reg(dev, E2P_CMD, val); 324 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
325 check_warn_return(ret, "Error writing E2P_DATA");
280 326
281 ret = smsc95xx_wait_eeprom(dev); 327 ret = smsc95xx_wait_eeprom(dev);
282 if (ret < 0) 328 if (ret < 0)
@@ -286,11 +332,13 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
286 332
287 /* Fill data register */ 333 /* Fill data register */
288 val = data[i]; 334 val = data[i];
289 smsc95xx_write_reg(dev, E2P_DATA, val); 335 ret = smsc95xx_write_reg(dev, E2P_DATA, val);
336 check_warn_return(ret, "Error writing E2P_DATA");
290 337
291 /* Send "write" command */ 338 /* Send "write" command */
292 val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_); 339 val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
293 smsc95xx_write_reg(dev, E2P_CMD, val); 340 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
341 check_warn_return(ret, "Error writing E2P_CMD");
294 342
295 ret = smsc95xx_wait_eeprom(dev); 343 ret = smsc95xx_wait_eeprom(dev);
296 if (ret < 0) 344 if (ret < 0)
@@ -308,14 +356,14 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
308 struct usbnet *dev = usb_context->dev; 356 struct usbnet *dev = usb_context->dev;
309 int status = urb->status; 357 int status = urb->status;
310 358
311 if (status < 0) 359 check_warn(status, "async callback failed with %d\n", status);
312 netdev_warn(dev->net, "async callback failed with %d\n", status);
313 360
314 kfree(usb_context); 361 kfree(usb_context);
315 usb_free_urb(urb); 362 usb_free_urb(urb);
316} 363}
317 364
318static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data) 365static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
366 u32 *data)
319{ 367{
320 struct usb_context *usb_context; 368 struct usb_context *usb_context;
321 int status; 369 int status;
@@ -371,6 +419,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
371 struct usbnet *dev = netdev_priv(netdev); 419 struct usbnet *dev = netdev_priv(netdev);
372 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 420 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
373 unsigned long flags; 421 unsigned long flags;
422 int ret;
374 423
375 pdata->hash_hi = 0; 424 pdata->hash_hi = 0;
376 pdata->hash_lo = 0; 425 pdata->hash_lo = 0;
@@ -411,21 +460,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
411 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 460 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
412 461
413 /* Initiate async writes, as we can't wait for completion here */ 462 /* Initiate async writes, as we can't wait for completion here */
414 smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); 463 ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
415 smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); 464 check_warn(ret, "failed to initiate async write to HASHH");
416 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 465
466 ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
467 check_warn(ret, "failed to initiate async write to HASHL");
468
469 ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
470 check_warn(ret, "failed to initiate async write to MAC_CR");
417} 471}
418 472
419static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, 473static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
420 u16 lcladv, u16 rmtadv) 474 u16 lcladv, u16 rmtadv)
421{ 475{
422 u32 flow, afc_cfg = 0; 476 u32 flow, afc_cfg = 0;
423 477
424 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); 478 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
425 if (ret < 0) { 479 check_warn_return(ret, "Error reading AFC_CFG");
426 netdev_warn(dev->net, "error reading AFC_CFG\n");
427 return;
428 }
429 480
430 if (duplex == DUPLEX_FULL) { 481 if (duplex == DUPLEX_FULL) {
431 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 482 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -449,8 +500,13 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
449 afc_cfg |= 0xF; 500 afc_cfg |= 0xF;
450 } 501 }
451 502
452 smsc95xx_write_reg(dev, FLOW, flow); 503 ret = smsc95xx_write_reg(dev, FLOW, flow);
453 smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); 504 check_warn_return(ret, "Error writing FLOW");
505
506 ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
507 check_warn_return(ret, "Error writing AFC_CFG");
508
509 return 0;
454} 510}
455 511
456static int smsc95xx_link_reset(struct usbnet *dev) 512static int smsc95xx_link_reset(struct usbnet *dev)
@@ -460,12 +516,14 @@ static int smsc95xx_link_reset(struct usbnet *dev)
460 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; 516 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
461 unsigned long flags; 517 unsigned long flags;
462 u16 lcladv, rmtadv; 518 u16 lcladv, rmtadv;
463 u32 intdata; 519 int ret;
464 520
465 /* clear interrupt status */ 521 /* clear interrupt status */
466 smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); 522 ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
467 intdata = 0xFFFFFFFF; 523 check_warn_return(ret, "Error reading PHY_INT_SRC");
468 smsc95xx_write_reg(dev, INT_STS, intdata); 524
525 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
526 check_warn_return(ret, "Error writing INT_STS");
469 527
470 mii_check_media(mii, 1, 1); 528 mii_check_media(mii, 1, 1);
471 mii_ethtool_gset(&dev->mii, &ecmd); 529 mii_ethtool_gset(&dev->mii, &ecmd);
@@ -486,9 +544,11 @@ static int smsc95xx_link_reset(struct usbnet *dev)
486 } 544 }
487 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 545 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
488 546
489 smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 547 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
548 check_warn_return(ret, "Error writing MAC_CR");
490 549
491 smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); 550 ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
551 check_warn_return(ret, "Error updating PHY flow control");
492 552
493 return 0; 553 return 0;
494} 554}
@@ -524,10 +584,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
524 int ret; 584 int ret;
525 585
526 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 586 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
527 if (ret < 0) { 587 check_warn_return(ret, "Failed to read COE_CR: %d\n", ret);
528 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
529 return ret;
530 }
531 588
532 if (features & NETIF_F_HW_CSUM) 589 if (features & NETIF_F_HW_CSUM)
533 read_buf |= Tx_COE_EN_; 590 read_buf |= Tx_COE_EN_;
@@ -540,10 +597,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
540 read_buf &= ~Rx_COE_EN_; 597 read_buf &= ~Rx_COE_EN_;
541 598
542 ret = smsc95xx_write_reg(dev, COE_CR, read_buf); 599 ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
543 if (ret < 0) { 600 check_warn_return(ret, "Failed to write COE_CR: %d\n", ret);
544 netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
545 return ret;
546 }
547 601
548 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf); 602 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
549 return 0; 603 return 0;
@@ -608,6 +662,26 @@ smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
608 } 662 }
609} 663}
610 664
665static void smsc95xx_ethtool_get_wol(struct net_device *net,
666 struct ethtool_wolinfo *wolinfo)
667{
668 struct usbnet *dev = netdev_priv(net);
669 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
670
671 wolinfo->supported = SUPPORTED_WAKE;
672 wolinfo->wolopts = pdata->wolopts;
673}
674
675static int smsc95xx_ethtool_set_wol(struct net_device *net,
676 struct ethtool_wolinfo *wolinfo)
677{
678 struct usbnet *dev = netdev_priv(net);
679 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
680
681 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
682 return 0;
683}
684
611static const struct ethtool_ops smsc95xx_ethtool_ops = { 685static const struct ethtool_ops smsc95xx_ethtool_ops = {
612 .get_link = usbnet_get_link, 686 .get_link = usbnet_get_link,
613 .nway_reset = usbnet_nway_reset, 687 .nway_reset = usbnet_nway_reset,
@@ -621,6 +695,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
621 .set_eeprom = smsc95xx_ethtool_set_eeprom, 695 .set_eeprom = smsc95xx_ethtool_set_eeprom,
622 .get_regs_len = smsc95xx_ethtool_getregslen, 696 .get_regs_len = smsc95xx_ethtool_getregslen,
623 .get_regs = smsc95xx_ethtool_getregs, 697 .get_regs = smsc95xx_ethtool_getregs,
698 .get_wol = smsc95xx_ethtool_get_wol,
699 .set_wol = smsc95xx_ethtool_set_wol,
624}; 700};
625 701
626static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 702static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -658,55 +734,56 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
658 int ret; 734 int ret;
659 735
660 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo); 736 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
661 if (ret < 0) { 737 check_warn_return(ret, "Failed to write ADDRL: %d\n", ret);
662 netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
663 return ret;
664 }
665 738
666 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi); 739 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
667 if (ret < 0) { 740 check_warn_return(ret, "Failed to write ADDRH: %d\n", ret);
668 netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
669 return ret;
670 }
671 741
672 return 0; 742 return 0;
673} 743}
674 744
675/* starts the TX path */ 745/* starts the TX path */
676static void smsc95xx_start_tx_path(struct usbnet *dev) 746static int smsc95xx_start_tx_path(struct usbnet *dev)
677{ 747{
678 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 748 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
679 unsigned long flags; 749 unsigned long flags;
680 u32 reg_val; 750 int ret;
681 751
682 /* Enable Tx at MAC */ 752 /* Enable Tx at MAC */
683 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 753 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
684 pdata->mac_cr |= MAC_CR_TXEN_; 754 pdata->mac_cr |= MAC_CR_TXEN_;
685 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 755 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
686 756
687 smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 757 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
758 check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
688 759
689 /* Enable Tx at SCSRs */ 760 /* Enable Tx at SCSRs */
690 reg_val = TX_CFG_ON_; 761 ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
691 smsc95xx_write_reg(dev, TX_CFG, reg_val); 762 check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret);
763
764 return 0;
692} 765}
693 766
694/* Starts the Receive path */ 767/* Starts the Receive path */
695static void smsc95xx_start_rx_path(struct usbnet *dev) 768static int smsc95xx_start_rx_path(struct usbnet *dev)
696{ 769{
697 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 770 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
698 unsigned long flags; 771 unsigned long flags;
772 int ret;
699 773
700 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 774 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
701 pdata->mac_cr |= MAC_CR_RXEN_; 775 pdata->mac_cr |= MAC_CR_RXEN_;
702 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 776 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
703 777
704 smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 778 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
779 check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
780
781 return 0;
705} 782}
706 783
707static int smsc95xx_phy_initialize(struct usbnet *dev) 784static int smsc95xx_phy_initialize(struct usbnet *dev)
708{ 785{
709 int bmcr, timeout = 0; 786 int bmcr, ret, timeout = 0;
710 787
711 /* Initialize MII structure */ 788 /* Initialize MII structure */
712 dev->mii.dev = dev->net; 789 dev->mii.dev = dev->net;
@@ -735,7 +812,8 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
735 ADVERTISE_PAUSE_ASYM); 812 ADVERTISE_PAUSE_ASYM);
736 813
737 /* read to clear */ 814 /* read to clear */
738 smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 815 ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
816 check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
739 817
740 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 818 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
741 PHY_INT_MASK_DEFAULT_); 819 PHY_INT_MASK_DEFAULT_);
@@ -753,22 +831,14 @@ static int smsc95xx_reset(struct usbnet *dev)
753 831
754 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n"); 832 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
755 833
756 write_buf = HW_CFG_LRST_; 834 ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
757 ret = smsc95xx_write_reg(dev, HW_CFG, write_buf); 835 check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
758 if (ret < 0) {
759 netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
760 ret);
761 return ret;
762 }
763 836
764 timeout = 0; 837 timeout = 0;
765 do { 838 do {
766 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
767 if (ret < 0) {
768 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
769 return ret;
770 }
771 msleep(10); 839 msleep(10);
840 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
841 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
772 timeout++; 842 timeout++;
773 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100)); 843 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
774 844
@@ -777,21 +847,14 @@ static int smsc95xx_reset(struct usbnet *dev)
777 return ret; 847 return ret;
778 } 848 }
779 849
780 write_buf = PM_CTL_PHY_RST_; 850 ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
781 ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf); 851 check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret);
782 if (ret < 0) {
783 netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
784 return ret;
785 }
786 852
787 timeout = 0; 853 timeout = 0;
788 do { 854 do {
789 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
790 if (ret < 0) {
791 netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
792 return ret;
793 }
794 msleep(10); 855 msleep(10);
856 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
857 check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret);
795 timeout++; 858 timeout++;
796 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); 859 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
797 860
@@ -808,10 +871,7 @@ static int smsc95xx_reset(struct usbnet *dev)
808 "MAC Address: %pM\n", dev->net->dev_addr); 871 "MAC Address: %pM\n", dev->net->dev_addr);
809 872
810 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 873 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
811 if (ret < 0) { 874 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
812 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
813 return ret;
814 }
815 875
816 netif_dbg(dev, ifup, dev->net, 876 netif_dbg(dev, ifup, dev->net,
817 "Read Value from HW_CFG : 0x%08x\n", read_buf); 877 "Read Value from HW_CFG : 0x%08x\n", read_buf);
@@ -819,17 +879,10 @@ static int smsc95xx_reset(struct usbnet *dev)
819 read_buf |= HW_CFG_BIR_; 879 read_buf |= HW_CFG_BIR_;
820 880
821 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 881 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
822 if (ret < 0) { 882 check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
823 netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
824 ret);
825 return ret;
826 }
827 883
828 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 884 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
829 if (ret < 0) { 885 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
830 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
831 return ret;
832 }
833 netif_dbg(dev, ifup, dev->net, 886 netif_dbg(dev, ifup, dev->net,
834 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n", 887 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
835 read_buf); 888 read_buf);
@@ -849,41 +902,28 @@ static int smsc95xx_reset(struct usbnet *dev)
849 "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size); 902 "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
850 903
851 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap); 904 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
852 if (ret < 0) { 905 check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
853 netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
854 return ret;
855 }
856 906
857 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf); 907 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
858 if (ret < 0) { 908 check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
859 netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); 909
860 return ret;
861 }
862 netif_dbg(dev, ifup, dev->net, 910 netif_dbg(dev, ifup, dev->net,
863 "Read Value from BURST_CAP after writing: 0x%08x\n", 911 "Read Value from BURST_CAP after writing: 0x%08x\n",
864 read_buf); 912 read_buf);
865 913
866 read_buf = DEFAULT_BULK_IN_DELAY; 914 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
867 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf); 915 check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
868 if (ret < 0) {
869 netdev_warn(dev->net, "ret = %d\n", ret);
870 return ret;
871 }
872 916
873 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf); 917 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
874 if (ret < 0) { 918 check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
875 netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); 919
876 return ret;
877 }
878 netif_dbg(dev, ifup, dev->net, 920 netif_dbg(dev, ifup, dev->net,
879 "Read Value from BULK_IN_DLY after writing: 0x%08x\n", 921 "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
880 read_buf); 922 read_buf);
881 923
882 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 924 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
883 if (ret < 0) { 925 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
884 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); 926
885 return ret;
886 }
887 netif_dbg(dev, ifup, dev->net, 927 netif_dbg(dev, ifup, dev->net,
888 "Read Value from HW_CFG: 0x%08x\n", read_buf); 928 "Read Value from HW_CFG: 0x%08x\n", read_buf);
889 929
@@ -896,101 +936,66 @@ static int smsc95xx_reset(struct usbnet *dev)
896 read_buf |= NET_IP_ALIGN << 9; 936 read_buf |= NET_IP_ALIGN << 9;
897 937
898 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 938 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
899 if (ret < 0) { 939 check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
900 netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
901 ret);
902 return ret;
903 }
904 940
905 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 941 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
906 if (ret < 0) { 942 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
907 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); 943
908 return ret;
909 }
910 netif_dbg(dev, ifup, dev->net, 944 netif_dbg(dev, ifup, dev->net,
911 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf); 945 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
912 946
913 write_buf = 0xFFFFFFFF; 947 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
914 ret = smsc95xx_write_reg(dev, INT_STS, write_buf); 948 check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
915 if (ret < 0) {
916 netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
917 ret);
918 return ret;
919 }
920 949
921 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf); 950 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
922 if (ret < 0) { 951 check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
923 netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
924 return ret;
925 }
926 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf); 952 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
927 953
928 /* Configure GPIO pins as LED outputs */ 954 /* Configure GPIO pins as LED outputs */
929 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | 955 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
930 LED_GPIO_CFG_FDX_LED; 956 LED_GPIO_CFG_FDX_LED;
931 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); 957 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
932 if (ret < 0) { 958 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret);
933 netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
934 ret);
935 return ret;
936 }
937 959
938 /* Init Tx */ 960 /* Init Tx */
939 write_buf = 0; 961 ret = smsc95xx_write_reg(dev, FLOW, 0);
940 ret = smsc95xx_write_reg(dev, FLOW, write_buf); 962 check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
941 if (ret < 0) {
942 netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
943 return ret;
944 }
945 963
946 read_buf = AFC_CFG_DEFAULT; 964 ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
947 ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf); 965 check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret);
948 if (ret < 0) {
949 netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
950 return ret;
951 }
952 966
953 /* Don't need mac_cr_lock during initialisation */ 967 /* Don't need mac_cr_lock during initialisation */
954 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr); 968 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
955 if (ret < 0) { 969 check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
956 netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
957 return ret;
958 }
959 970
960 /* Init Rx */ 971 /* Init Rx */
961 /* Set Vlan */ 972 /* Set Vlan */
962 write_buf = (u32)ETH_P_8021Q; 973 ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
963 ret = smsc95xx_write_reg(dev, VLAN1, write_buf); 974 check_warn_return(ret, "Failed to write VLAN1: %d\n", ret);
964 if (ret < 0) {
965 netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
966 return ret;
967 }
968 975
969 /* Enable or disable checksum offload engines */ 976 /* Enable or disable checksum offload engines */
970 smsc95xx_set_features(dev->net, dev->net->features); 977 ret = smsc95xx_set_features(dev->net, dev->net->features);
978 check_warn_return(ret, "Failed to set checksum offload features");
971 979
972 smsc95xx_set_multicast(dev->net); 980 smsc95xx_set_multicast(dev->net);
973 981
974 if (smsc95xx_phy_initialize(dev) < 0) 982 ret = smsc95xx_phy_initialize(dev);
975 return -EIO; 983 check_warn_return(ret, "Failed to init PHY");
976 984
977 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf); 985 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
978 if (ret < 0) { 986 check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
979 netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
980 return ret;
981 }
982 987
983 /* enable PHY interrupts */ 988 /* enable PHY interrupts */
984 read_buf |= INT_EP_CTL_PHY_INT_; 989 read_buf |= INT_EP_CTL_PHY_INT_;
985 990
986 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf); 991 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
987 if (ret < 0) { 992 check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
988 netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
989 return ret;
990 }
991 993
992 smsc95xx_start_tx_path(dev); 994 ret = smsc95xx_start_tx_path(dev);
993 smsc95xx_start_rx_path(dev); 995 check_warn_return(ret, "Failed to start TX path");
996
997 ret = smsc95xx_start_rx_path(dev);
998 check_warn_return(ret, "Failed to start RX path");
994 999
995 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n"); 1000 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
996 return 0; 1001 return 0;
@@ -1017,10 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1017 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); 1022 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
1018 1023
1019 ret = usbnet_get_endpoints(dev, intf); 1024 ret = usbnet_get_endpoints(dev, intf);
1020 if (ret < 0) { 1025 check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
1021 netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
1022 return ret;
1023 }
1024 1026
1025 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv), 1027 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
1026 GFP_KERNEL); 1028 GFP_KERNEL);
@@ -1064,6 +1066,153 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1064 } 1066 }
1065} 1067}
1066 1068
1069static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
1070{
1071 struct usbnet *dev = usb_get_intfdata(intf);
1072 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1073 int ret;
1074 u32 val;
1075
1076 ret = usbnet_suspend(intf, message);
1077 check_warn_return(ret, "usbnet_suspend error");
1078
1079 /* if no wol options set, enter lowest power SUSPEND2 mode */
1080 if (!(pdata->wolopts & SUPPORTED_WAKE)) {
1081 netdev_info(dev->net, "entering SUSPEND2 mode");
1082
1083 /* disable energy detect (link up) & wake up events */
1084 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1085 check_warn_return(ret, "Error reading WUCSR");
1086
1087 val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
1088
1089 ret = smsc95xx_write_reg(dev, WUCSR, val);
1090 check_warn_return(ret, "Error writing WUCSR");
1091
1092 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1093 check_warn_return(ret, "Error reading PM_CTRL");
1094
1095 val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
1096
1097 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1098 check_warn_return(ret, "Error writing PM_CTRL");
1099
1100 /* enter suspend2 mode */
1101 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1102 check_warn_return(ret, "Error reading PM_CTRL");
1103
1104 val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
1105 val |= PM_CTL_SUS_MODE_2;
1106
1107 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1108 check_warn_return(ret, "Error writing PM_CTRL");
1109
1110 return 0;
1111 }
1112
1113 if (pdata->wolopts & WAKE_MAGIC) {
1114 /* clear any pending magic packet status */
1115 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1116 check_warn_return(ret, "Error reading WUCSR");
1117
1118 val |= WUCSR_MPR_;
1119
1120 ret = smsc95xx_write_reg(dev, WUCSR, val);
1121 check_warn_return(ret, "Error writing WUCSR");
1122 }
1123
1124 /* enable/disable magic packup wake */
1125 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1126 check_warn_return(ret, "Error reading WUCSR");
1127
1128 if (pdata->wolopts & WAKE_MAGIC) {
1129 netdev_info(dev->net, "enabling magic packet wakeup");
1130 val |= WUCSR_MPEN_;
1131 } else {
1132 netdev_info(dev->net, "disabling magic packet wakeup");
1133 val &= ~WUCSR_MPEN_;
1134 }
1135
1136 ret = smsc95xx_write_reg(dev, WUCSR, val);
1137 check_warn_return(ret, "Error writing WUCSR");
1138
1139 /* enable wol wakeup source */
1140 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1141 check_warn_return(ret, "Error reading PM_CTRL");
1142
1143 val |= PM_CTL_WOL_EN_;
1144
1145 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1146 check_warn_return(ret, "Error writing PM_CTRL");
1147
1148 /* enable receiver */
1149 smsc95xx_start_rx_path(dev);
1150
1151 /* some wol options are enabled, so enter SUSPEND0 */
1152 netdev_info(dev->net, "entering SUSPEND0 mode");
1153
1154 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1155 check_warn_return(ret, "Error reading PM_CTRL");
1156
1157 val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
1158 val |= PM_CTL_SUS_MODE_0;
1159
1160 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1161 check_warn_return(ret, "Error writing PM_CTRL");
1162
1163 /* clear wol status */
1164 val &= ~PM_CTL_WUPS_;
1165 val |= PM_CTL_WUPS_WOL_;
1166 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1167 check_warn_return(ret, "Error writing PM_CTRL");
1168
1169 /* read back PM_CTRL */
1170 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1171 check_warn_return(ret, "Error reading PM_CTRL");
1172
1173 smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1174
1175 return 0;
1176}
1177
1178static int smsc95xx_resume(struct usb_interface *intf)
1179{
1180 struct usbnet *dev = usb_get_intfdata(intf);
1181 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1182 int ret;
1183 u32 val;
1184
1185 BUG_ON(!dev);
1186
1187 if (pdata->wolopts & WAKE_MAGIC) {
1188 smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1189
1190 /* Disable magic packup wake */
1191 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1192 check_warn_return(ret, "Error reading WUCSR");
1193
1194 val &= ~WUCSR_MPEN_;
1195
1196 ret = smsc95xx_write_reg(dev, WUCSR, val);
1197 check_warn_return(ret, "Error writing WUCSR");
1198
1199 /* clear wake-up status */
1200 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1201 check_warn_return(ret, "Error reading PM_CTRL");
1202
1203 val &= ~PM_CTL_WOL_EN_;
1204 val |= PM_CTL_WUPS_;
1205
1206 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1207 check_warn_return(ret, "Error writing PM_CTRL");
1208 }
1209
1210 return usbnet_resume(intf);
1211 check_warn_return(ret, "usbnet_resume error");
1212
1213 return 0;
1214}
1215
1067static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1216static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1068{ 1217{
1069 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); 1218 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -1326,8 +1475,9 @@ static struct usb_driver smsc95xx_driver = {
1326 .name = "smsc95xx", 1475 .name = "smsc95xx",
1327 .id_table = products, 1476 .id_table = products,
1328 .probe = usbnet_probe, 1477 .probe = usbnet_probe,
1329 .suspend = usbnet_suspend, 1478 .suspend = smsc95xx_suspend,
1330 .resume = usbnet_resume, 1479 .resume = smsc95xx_resume,
1480 .reset_resume = smsc95xx_resume,
1331 .disconnect = usbnet_disconnect, 1481 .disconnect = usbnet_disconnect,
1332 .disable_hub_initiated_lpm = 1, 1482 .disable_hub_initiated_lpm = 1,
1333}; 1483};
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 86bc44977fbd..2ff9815aa27c 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -63,6 +63,7 @@
63#define INT_STS_TDFO_ (0x00001000) 63#define INT_STS_TDFO_ (0x00001000)
64#define INT_STS_RXDF_ (0x00000800) 64#define INT_STS_RXDF_ (0x00000800)
65#define INT_STS_GPIOS_ (0x000007FF) 65#define INT_STS_GPIOS_ (0x000007FF)
66#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
66 67
67#define RX_CFG (0x0C) 68#define RX_CFG (0x0C)
68#define RX_FIFO_FLUSH_ (0x00000001) 69#define RX_FIFO_FLUSH_ (0x00000001)
@@ -83,12 +84,16 @@
83#define HW_CFG_BCE_ (0x00000002) 84#define HW_CFG_BCE_ (0x00000002)
84#define HW_CFG_SRST_ (0x00000001) 85#define HW_CFG_SRST_ (0x00000001)
85 86
87#define RX_FIFO_INF (0x18)
88
86#define PM_CTRL (0x20) 89#define PM_CTRL (0x20)
90#define PM_CTL_RES_CLR_WKP_STS (0x00000200)
87#define PM_CTL_DEV_RDY_ (0x00000080) 91#define PM_CTL_DEV_RDY_ (0x00000080)
88#define PM_CTL_SUS_MODE_ (0x00000060) 92#define PM_CTL_SUS_MODE_ (0x00000060)
89#define PM_CTL_SUS_MODE_0 (0x00000000) 93#define PM_CTL_SUS_MODE_0 (0x00000000)
90#define PM_CTL_SUS_MODE_1 (0x00000020) 94#define PM_CTL_SUS_MODE_1 (0x00000020)
91#define PM_CTL_SUS_MODE_2 (0x00000060) 95#define PM_CTL_SUS_MODE_2 (0x00000040)
96#define PM_CTL_SUS_MODE_3 (0x00000060)
92#define PM_CTL_PHY_RST_ (0x00000010) 97#define PM_CTL_PHY_RST_ (0x00000010)
93#define PM_CTL_WOL_EN_ (0x00000008) 98#define PM_CTL_WOL_EN_ (0x00000008)
94#define PM_CTL_ED_EN_ (0x00000004) 99#define PM_CTL_ED_EN_ (0x00000004)
@@ -200,6 +205,11 @@
200#define WUFF (0x128) 205#define WUFF (0x128)
201 206
202#define WUCSR (0x12C) 207#define WUCSR (0x12C)
208#define WUCSR_GUE_ (0x00000200)
209#define WUCSR_WUFR_ (0x00000040)
210#define WUCSR_MPR_ (0x00000020)
211#define WUCSR_WAKE_EN_ (0x00000004)
212#define WUCSR_MPEN_ (0x00000002)
203 213
204#define COE_CR (0x130) 214#define COE_CR (0x130)
205#define Tx_COE_EN_ (0x00010000) 215#define Tx_COE_EN_ (0x00010000)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5852361032c4..e522ff70444c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
348 if (tbp[IFLA_ADDRESS] == NULL) 348 if (tbp[IFLA_ADDRESS] == NULL)
349 eth_hw_addr_random(peer); 349 eth_hw_addr_random(peer);
350 350
351 if (ifmp && (dev->ifindex != 0))
352 peer->ifindex = ifmp->ifi_index;
353
351 err = register_netdevice(peer); 354 err = register_netdevice(peer);
352 put_net(net); 355 put_net(net);
353 net = NULL; 356 net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9650c413e11f..cbf8b0625352 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
993 goto done; 993 goto done;
994 994
995 if (v & VIRTIO_NET_S_ANNOUNCE) { 995 if (v & VIRTIO_NET_S_ANNOUNCE) {
996 netif_notify_peers(vi->dev); 996 netdev_notify_peers(vi->dev);
997 virtnet_ack_link_announce(vi); 997 virtnet_ack_link_announce(vi);
998 } 998 }
999 999
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
new file mode 100644
index 000000000000..51de9edb55f5
--- /dev/null
+++ b/drivers/net/vxlan.c
@@ -0,0 +1,1219 @@
1/*
2 * VXLAN: Virtual eXtensiable Local Area Network
3 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
31#include <linux/version.h>
32#include <linux/hash.h>
33#include <net/ip.h>
34#include <net/icmp.h>
35#include <net/udp.h>
36#include <net/rtnetlink.h>
37#include <net/route.h>
38#include <net/dsfield.h>
39#include <net/inet_ecn.h>
40#include <net/net_namespace.h>
41#include <net/netns/generic.h>
42
43#define VXLAN_VERSION "0.1"
44
45#define VNI_HASH_BITS 10
46#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
47#define FDB_HASH_BITS 8
48#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
49#define FDB_AGE_DEFAULT 300 /* 5 min */
50#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
51
52#define VXLAN_N_VID (1u << 24)
53#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
54/* VLAN + IP header + UDP + VXLAN */
55#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
56
57#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
58
59/* VXLAN protocol header */
60struct vxlanhdr {
61 __be32 vx_flags;
62 __be32 vx_vni;
63};
64
65/* UDP port for VXLAN traffic. */
66static unsigned int vxlan_port __read_mostly = 8472;
67module_param_named(udp_port, vxlan_port, uint, 0444);
68MODULE_PARM_DESC(udp_port, "Destination UDP port");
69
70static bool log_ecn_error = true;
71module_param(log_ecn_error, bool, 0644);
72MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
73
74/* per-net private data for this module */
75static unsigned int vxlan_net_id;
76struct vxlan_net {
77 struct socket *sock; /* UDP encap socket */
78 struct hlist_head vni_list[VNI_HASH_SIZE];
79};
80
81/* Forwarding table entry */
82struct vxlan_fdb {
83 struct hlist_node hlist; /* linked list of entries */
84 struct rcu_head rcu;
85 unsigned long updated; /* jiffies */
86 unsigned long used;
87 __be32 remote_ip;
88 u16 state; /* see ndm_state */
89 u8 eth_addr[ETH_ALEN];
90};
91
92/* Per-cpu network traffic stats */
93struct vxlan_stats {
94 u64 rx_packets;
95 u64 rx_bytes;
96 u64 tx_packets;
97 u64 tx_bytes;
98 struct u64_stats_sync syncp;
99};
100
101/* Pseudo network device */
102struct vxlan_dev {
103 struct hlist_node hlist;
104 struct net_device *dev;
105 struct vxlan_stats __percpu *stats;
106 __u32 vni; /* virtual network id */
107 __be32 gaddr; /* multicast group */
108 __be32 saddr; /* source address */
109 unsigned int link; /* link to multicast over */
110 __u8 tos; /* TOS override */
111 __u8 ttl;
112 bool learn;
113
114 unsigned long age_interval;
115 struct timer_list age_timer;
116 spinlock_t hash_lock;
117 unsigned int addrcnt;
118 unsigned int addrmax;
119 unsigned int addrexceeded;
120
121 struct hlist_head fdb_head[FDB_HASH_SIZE];
122};
123
124/* salt for hash table */
125static u32 vxlan_salt __read_mostly;
126
127static inline struct hlist_head *vni_head(struct net *net, u32 id)
128{
129 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
130
131 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
132}
133
134/* Look up VNI in a per net namespace table */
135static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
136{
137 struct vxlan_dev *vxlan;
138 struct hlist_node *node;
139
140 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
141 if (vxlan->vni == id)
142 return vxlan;
143 }
144
145 return NULL;
146}
147
148/* Fill in neighbour message in skbuff. */
149static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
150 const struct vxlan_fdb *fdb,
151 u32 portid, u32 seq, int type, unsigned int flags)
152{
153 unsigned long now = jiffies;
154 struct nda_cacheinfo ci;
155 struct nlmsghdr *nlh;
156 struct ndmsg *ndm;
157
158 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
159 if (nlh == NULL)
160 return -EMSGSIZE;
161
162 ndm = nlmsg_data(nlh);
163 memset(ndm, 0, sizeof(*ndm));
164 ndm->ndm_family = AF_BRIDGE;
165 ndm->ndm_state = fdb->state;
166 ndm->ndm_ifindex = vxlan->dev->ifindex;
167 ndm->ndm_flags = NTF_SELF;
168 ndm->ndm_type = NDA_DST;
169
170 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
171 goto nla_put_failure;
172
173 if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
174 goto nla_put_failure;
175
176 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
177 ci.ndm_confirmed = 0;
178 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
179 ci.ndm_refcnt = 0;
180
181 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
182 goto nla_put_failure;
183
184 return nlmsg_end(skb, nlh);
185
186nla_put_failure:
187 nlmsg_cancel(skb, nlh);
188 return -EMSGSIZE;
189}
190
191static inline size_t vxlan_nlmsg_size(void)
192{
193 return NLMSG_ALIGN(sizeof(struct ndmsg))
194 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
195 + nla_total_size(sizeof(__be32)) /* NDA_DST */
196 + nla_total_size(sizeof(struct nda_cacheinfo));
197}
198
199static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
200 const struct vxlan_fdb *fdb, int type)
201{
202 struct net *net = dev_net(vxlan->dev);
203 struct sk_buff *skb;
204 int err = -ENOBUFS;
205
206 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
207 if (skb == NULL)
208 goto errout;
209
210 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
211 if (err < 0) {
212 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
213 WARN_ON(err == -EMSGSIZE);
214 kfree_skb(skb);
215 goto errout;
216 }
217
218 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
219 return;
220errout:
221 if (err < 0)
222 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
223}
224
225/* Hash Ethernet address */
226static u32 eth_hash(const unsigned char *addr)
227{
228 u64 value = get_unaligned((u64 *)addr);
229
230 /* only want 6 bytes */
231#ifdef __BIG_ENDIAN
232 value <<= 16;
233#else
234 value >>= 16;
235#endif
236 return hash_64(value, FDB_HASH_BITS);
237}
238
239/* Hash chain to use given mac address */
240static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
241 const u8 *mac)
242{
243 return &vxlan->fdb_head[eth_hash(mac)];
244}
245
246/* Look up Ethernet address in forwarding table */
247static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
248 const u8 *mac)
249
250{
251 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
252 struct vxlan_fdb *f;
253 struct hlist_node *node;
254
255 hlist_for_each_entry_rcu(f, node, head, hlist) {
256 if (compare_ether_addr(mac, f->eth_addr) == 0)
257 return f;
258 }
259
260 return NULL;
261}
262
263/* Add new entry to forwarding table -- assumes lock held */
264static int vxlan_fdb_create(struct vxlan_dev *vxlan,
265 const u8 *mac, __be32 ip,
266 __u16 state, __u16 flags)
267{
268 struct vxlan_fdb *f;
269 int notify = 0;
270
271 f = vxlan_find_mac(vxlan, mac);
272 if (f) {
273 if (flags & NLM_F_EXCL) {
274 netdev_dbg(vxlan->dev,
275 "lost race to create %pM\n", mac);
276 return -EEXIST;
277 }
278 if (f->state != state) {
279 f->state = state;
280 f->updated = jiffies;
281 notify = 1;
282 }
283 } else {
284 if (!(flags & NLM_F_CREATE))
285 return -ENOENT;
286
287 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
288 return -ENOSPC;
289
290 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
291 f = kmalloc(sizeof(*f), GFP_ATOMIC);
292 if (!f)
293 return -ENOMEM;
294
295 notify = 1;
296 f->remote_ip = ip;
297 f->state = state;
298 f->updated = f->used = jiffies;
299 memcpy(f->eth_addr, mac, ETH_ALEN);
300
301 ++vxlan->addrcnt;
302 hlist_add_head_rcu(&f->hlist,
303 vxlan_fdb_head(vxlan, mac));
304 }
305
306 if (notify)
307 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
308
309 return 0;
310}
311
312static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
313{
314 netdev_dbg(vxlan->dev,
315 "delete %pM\n", f->eth_addr);
316
317 --vxlan->addrcnt;
318 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
319
320 hlist_del_rcu(&f->hlist);
321 kfree_rcu(f, rcu);
322}
323
324/* Add static entry (via netlink) */
325static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
326 struct net_device *dev,
327 const unsigned char *addr, u16 flags)
328{
329 struct vxlan_dev *vxlan = netdev_priv(dev);
330 __be32 ip;
331 int err;
332
333 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
334 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
335 ndm->ndm_state);
336 return -EINVAL;
337 }
338
339 if (tb[NDA_DST] == NULL)
340 return -EINVAL;
341
342 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
343 return -EAFNOSUPPORT;
344
345 ip = nla_get_be32(tb[NDA_DST]);
346
347 spin_lock_bh(&vxlan->hash_lock);
348 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
349 spin_unlock_bh(&vxlan->hash_lock);
350
351 return err;
352}
353
354/* Delete entry (via netlink) */
355static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
356 const unsigned char *addr)
357{
358 struct vxlan_dev *vxlan = netdev_priv(dev);
359 struct vxlan_fdb *f;
360 int err = -ENOENT;
361
362 spin_lock_bh(&vxlan->hash_lock);
363 f = vxlan_find_mac(vxlan, addr);
364 if (f) {
365 vxlan_fdb_destroy(vxlan, f);
366 err = 0;
367 }
368 spin_unlock_bh(&vxlan->hash_lock);
369
370 return err;
371}
372
373/* Dump forwarding table */
374static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
375 struct net_device *dev, int idx)
376{
377 struct vxlan_dev *vxlan = netdev_priv(dev);
378 unsigned int h;
379
380 for (h = 0; h < FDB_HASH_SIZE; ++h) {
381 struct vxlan_fdb *f;
382 struct hlist_node *n;
383 int err;
384
385 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
386 if (idx < cb->args[0])
387 goto skip;
388
389 err = vxlan_fdb_info(skb, vxlan, f,
390 NETLINK_CB(cb->skb).portid,
391 cb->nlh->nlmsg_seq,
392 RTM_NEWNEIGH,
393 NLM_F_MULTI);
394 if (err < 0)
395 break;
396skip:
397 ++idx;
398 }
399 }
400
401 return idx;
402}
403
404/* Watch incoming packets to learn mapping between Ethernet address
405 * and Tunnel endpoint.
406 */
407static void vxlan_snoop(struct net_device *dev,
408 __be32 src_ip, const u8 *src_mac)
409{
410 struct vxlan_dev *vxlan = netdev_priv(dev);
411 struct vxlan_fdb *f;
412 int err;
413
414 f = vxlan_find_mac(vxlan, src_mac);
415 if (likely(f)) {
416 f->used = jiffies;
417 if (likely(f->remote_ip == src_ip))
418 return;
419
420 if (net_ratelimit())
421 netdev_info(dev,
422 "%pM migrated from %pI4 to %pI4\n",
423 src_mac, &f->remote_ip, &src_ip);
424
425 f->remote_ip = src_ip;
426 f->updated = jiffies;
427 } else {
428 /* learned new entry */
429 spin_lock(&vxlan->hash_lock);
430 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
431 NUD_REACHABLE,
432 NLM_F_EXCL|NLM_F_CREATE);
433 spin_unlock(&vxlan->hash_lock);
434 }
435}
436
437
438/* See if multicast group is already in use by other ID */
439static bool vxlan_group_used(struct vxlan_net *vn,
440 const struct vxlan_dev *this)
441{
442 const struct vxlan_dev *vxlan;
443 struct hlist_node *node;
444 unsigned h;
445
446 for (h = 0; h < VNI_HASH_SIZE; ++h)
447 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
448 if (vxlan == this)
449 continue;
450
451 if (!netif_running(vxlan->dev))
452 continue;
453
454 if (vxlan->gaddr == this->gaddr)
455 return true;
456 }
457
458 return false;
459}
460
461/* kernel equivalent to IP_ADD_MEMBERSHIP */
462static int vxlan_join_group(struct net_device *dev)
463{
464 struct vxlan_dev *vxlan = netdev_priv(dev);
465 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
466 struct sock *sk = vn->sock->sk;
467 struct ip_mreqn mreq = {
468 .imr_multiaddr.s_addr = vxlan->gaddr,
469 };
470 int err;
471
472 /* Already a member of group */
473 if (vxlan_group_used(vn, vxlan))
474 return 0;
475
476 /* Need to drop RTNL to call multicast join */
477 rtnl_unlock();
478 lock_sock(sk);
479 err = ip_mc_join_group(sk, &mreq);
480 release_sock(sk);
481 rtnl_lock();
482
483 return err;
484}
485
486
487/* kernel equivalent to IP_DROP_MEMBERSHIP */
488static int vxlan_leave_group(struct net_device *dev)
489{
490 struct vxlan_dev *vxlan = netdev_priv(dev);
491 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
492 int err = 0;
493 struct sock *sk = vn->sock->sk;
494 struct ip_mreqn mreq = {
495 .imr_multiaddr.s_addr = vxlan->gaddr,
496 };
497
498 /* Only leave group when last vxlan is done. */
499 if (vxlan_group_used(vn, vxlan))
500 return 0;
501
502 /* Need to drop RTNL to call multicast leave */
503 rtnl_unlock();
504 lock_sock(sk);
505 err = ip_mc_leave_group(sk, &mreq);
506 release_sock(sk);
507 rtnl_lock();
508
509 return err;
510}
511
512/* Callback from net/ipv4/udp.c to receive packets */
513static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
514{
515 struct iphdr *oip;
516 struct vxlanhdr *vxh;
517 struct vxlan_dev *vxlan;
518 struct vxlan_stats *stats;
519 __u32 vni;
520 int err;
521
522 /* pop off outer UDP header */
523 __skb_pull(skb, sizeof(struct udphdr));
524
525 /* Need Vxlan and inner Ethernet header to be present */
526 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
527 goto error;
528
529 /* Drop packets with reserved bits set */
530 vxh = (struct vxlanhdr *) skb->data;
531 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
532 (vxh->vx_vni & htonl(0xff))) {
533 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
534 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
535 goto error;
536 }
537
538 __skb_pull(skb, sizeof(struct vxlanhdr));
539 skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
540
541 /* Is this VNI defined? */
542 vni = ntohl(vxh->vx_vni) >> 8;
543 vxlan = vxlan_find_vni(sock_net(sk), vni);
544 if (!vxlan) {
545 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
546 goto drop;
547 }
548
549 if (!pskb_may_pull(skb, ETH_HLEN)) {
550 vxlan->dev->stats.rx_length_errors++;
551 vxlan->dev->stats.rx_errors++;
552 goto drop;
553 }
554
555 /* Re-examine inner Ethernet packet */
556 oip = ip_hdr(skb);
557 skb->protocol = eth_type_trans(skb, vxlan->dev);
558 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
559
560 /* Ignore packet loops (and multicast echo) */
561 if (compare_ether_addr(eth_hdr(skb)->h_source,
562 vxlan->dev->dev_addr) == 0)
563 goto drop;
564
565 if (vxlan->learn)
566 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
567
568 __skb_tunnel_rx(skb, vxlan->dev);
569 skb_reset_network_header(skb);
570
571 err = IP_ECN_decapsulate(oip, skb);
572 if (unlikely(err)) {
573 if (log_ecn_error)
574 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
575 &oip->saddr, oip->tos);
576 if (err > 1) {
577 ++vxlan->dev->stats.rx_frame_errors;
578 ++vxlan->dev->stats.rx_errors;
579 goto drop;
580 }
581 }
582
583 stats = this_cpu_ptr(vxlan->stats);
584 u64_stats_update_begin(&stats->syncp);
585 stats->rx_packets++;
586 stats->rx_bytes += skb->len;
587 u64_stats_update_end(&stats->syncp);
588
589 netif_rx(skb);
590
591 return 0;
592error:
593 /* Put UDP header back */
594 __skb_push(skb, sizeof(struct udphdr));
595
596 return 1;
597drop:
598 /* Consume bad packet */
599 kfree_skb(skb);
600 return 0;
601}
602
603/* Extract dsfield from inner protocol */
604static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
605 const struct sk_buff *skb)
606{
607 if (skb->protocol == htons(ETH_P_IP))
608 return iph->tos;
609 else if (skb->protocol == htons(ETH_P_IPV6))
610 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
611 else
612 return 0;
613}
614
615/* Propogate ECN bits out */
616static inline u8 vxlan_ecn_encap(u8 tos,
617 const struct iphdr *iph,
618 const struct sk_buff *skb)
619{
620 u8 inner = vxlan_get_dsfield(iph, skb);
621
622 return INET_ECN_encapsulate(tos, inner);
623}
624
625/* Transmit local packets over Vxlan
626 *
627 * Outer IP header inherits ECN and DF from inner header.
628 * Outer UDP destination is the VXLAN assigned port.
629 * source port is based on hash of flow if available
630 * otherwise use a random value
631 */
632static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
633{
634 struct vxlan_dev *vxlan = netdev_priv(dev);
635 struct rtable *rt;
636 const struct ethhdr *eth;
637 const struct iphdr *old_iph;
638 struct iphdr *iph;
639 struct vxlanhdr *vxh;
640 struct udphdr *uh;
641 struct flowi4 fl4;
642 struct vxlan_fdb *f;
643 unsigned int pkt_len = skb->len;
644 u32 hash;
645 __be32 dst;
646 __be16 df = 0;
647 __u8 tos, ttl;
648 int err;
649
650 /* Need space for new headers (invalidates iph ptr) */
651 if (skb_cow_head(skb, VXLAN_HEADROOM))
652 goto drop;
653
654 eth = (void *)skb->data;
655 old_iph = ip_hdr(skb);
656
657 if (!is_multicast_ether_addr(eth->h_dest) &&
658 (f = vxlan_find_mac(vxlan, eth->h_dest)))
659 dst = f->remote_ip;
660 else if (vxlan->gaddr) {
661 dst = vxlan->gaddr;
662 } else
663 goto drop;
664
665 ttl = vxlan->ttl;
666 if (!ttl && IN_MULTICAST(ntohl(dst)))
667 ttl = 1;
668
669 tos = vxlan->tos;
670 if (tos == 1)
671 tos = vxlan_get_dsfield(old_iph, skb);
672
673 hash = skb_get_rxhash(skb);
674
675 rt = ip_route_output_gre(dev_net(dev), &fl4, dst,
676 vxlan->saddr, vxlan->vni,
677 RT_TOS(tos), vxlan->link);
678 if (IS_ERR(rt)) {
679 netdev_dbg(dev, "no route to %pI4\n", &dst);
680 dev->stats.tx_carrier_errors++;
681 goto tx_error;
682 }
683
684 if (rt->dst.dev == dev) {
685 netdev_dbg(dev, "circular route to %pI4\n", &dst);
686 ip_rt_put(rt);
687 dev->stats.collisions++;
688 goto tx_error;
689 }
690
691 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
692 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
693 IPSKB_REROUTED);
694 skb_dst_drop(skb);
695 skb_dst_set(skb, &rt->dst);
696
697 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
698 vxh->vx_flags = htonl(VXLAN_FLAGS);
699 vxh->vx_vni = htonl(vxlan->vni << 8);
700
701 __skb_push(skb, sizeof(*uh));
702 skb_reset_transport_header(skb);
703 uh = udp_hdr(skb);
704
705 uh->dest = htons(vxlan_port);
706 uh->source = hash ? :random32();
707
708 uh->len = htons(skb->len);
709 uh->check = 0;
710
711 __skb_push(skb, sizeof(*iph));
712 skb_reset_network_header(skb);
713 iph = ip_hdr(skb);
714 iph->version = 4;
715 iph->ihl = sizeof(struct iphdr) >> 2;
716 iph->frag_off = df;
717 iph->protocol = IPPROTO_UDP;
718 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
719 iph->daddr = fl4.daddr;
720 iph->saddr = fl4.saddr;
721 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
722
723 /* See __IPTUNNEL_XMIT */
724 skb->ip_summed = CHECKSUM_NONE;
725 ip_select_ident(iph, &rt->dst, NULL);
726
727 err = ip_local_out(skb);
728 if (likely(net_xmit_eval(err) == 0)) {
729 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
730
731 u64_stats_update_begin(&stats->syncp);
732 stats->tx_packets++;
733 stats->tx_bytes += pkt_len;
734 u64_stats_update_end(&stats->syncp);
735 } else {
736 dev->stats.tx_errors++;
737 dev->stats.tx_aborted_errors++;
738 }
739 return NETDEV_TX_OK;
740
741drop:
742 dev->stats.tx_dropped++;
743 goto tx_free;
744
745tx_error:
746 dev->stats.tx_errors++;
747tx_free:
748 dev_kfree_skb(skb);
749 return NETDEV_TX_OK;
750}
751
752/* Walk the forwarding table and purge stale entries */
753static void vxlan_cleanup(unsigned long arg)
754{
755 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
756 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
757 unsigned int h;
758
759 if (!netif_running(vxlan->dev))
760 return;
761
762 spin_lock_bh(&vxlan->hash_lock);
763 for (h = 0; h < FDB_HASH_SIZE; ++h) {
764 struct hlist_node *p, *n;
765 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
766 struct vxlan_fdb *f
767 = container_of(p, struct vxlan_fdb, hlist);
768 unsigned long timeout;
769
770 if (f->state == NUD_PERMANENT)
771 continue;
772
773 timeout = f->used + vxlan->age_interval * HZ;
774 if (time_before_eq(timeout, jiffies)) {
775 netdev_dbg(vxlan->dev,
776 "garbage collect %pM\n",
777 f->eth_addr);
778 f->state = NUD_STALE;
779 vxlan_fdb_destroy(vxlan, f);
780 } else if (time_before(timeout, next_timer))
781 next_timer = timeout;
782 }
783 }
784 spin_unlock_bh(&vxlan->hash_lock);
785
786 mod_timer(&vxlan->age_timer, next_timer);
787}
788
789/* Setup stats when device is created */
790static int vxlan_init(struct net_device *dev)
791{
792 struct vxlan_dev *vxlan = netdev_priv(dev);
793
794 vxlan->stats = alloc_percpu(struct vxlan_stats);
795 if (!vxlan->stats)
796 return -ENOMEM;
797
798 return 0;
799}
800
801/* Start ageing timer and join group when device is brought up */
802static int vxlan_open(struct net_device *dev)
803{
804 struct vxlan_dev *vxlan = netdev_priv(dev);
805 int err;
806
807 if (vxlan->gaddr) {
808 err = vxlan_join_group(dev);
809 if (err)
810 return err;
811 }
812
813 if (vxlan->age_interval)
814 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
815
816 return 0;
817}
818
819/* Purge the forwarding table */
820static void vxlan_flush(struct vxlan_dev *vxlan)
821{
822 unsigned h;
823
824 spin_lock_bh(&vxlan->hash_lock);
825 for (h = 0; h < FDB_HASH_SIZE; ++h) {
826 struct hlist_node *p, *n;
827 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
828 struct vxlan_fdb *f
829 = container_of(p, struct vxlan_fdb, hlist);
830 vxlan_fdb_destroy(vxlan, f);
831 }
832 }
833 spin_unlock_bh(&vxlan->hash_lock);
834}
835
836/* Cleanup timer and forwarding table on shutdown */
837static int vxlan_stop(struct net_device *dev)
838{
839 struct vxlan_dev *vxlan = netdev_priv(dev);
840
841 if (vxlan->gaddr)
842 vxlan_leave_group(dev);
843
844 del_timer_sync(&vxlan->age_timer);
845
846 vxlan_flush(vxlan);
847
848 return 0;
849}
850
851/* Merge per-cpu statistics */
852static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
853 struct rtnl_link_stats64 *stats)
854{
855 struct vxlan_dev *vxlan = netdev_priv(dev);
856 struct vxlan_stats tmp, sum = { 0 };
857 unsigned int cpu;
858
859 for_each_possible_cpu(cpu) {
860 unsigned int start;
861 const struct vxlan_stats *stats
862 = per_cpu_ptr(vxlan->stats, cpu);
863
864 do {
865 start = u64_stats_fetch_begin_bh(&stats->syncp);
866 memcpy(&tmp, stats, sizeof(tmp));
867 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
868
869 sum.tx_bytes += tmp.tx_bytes;
870 sum.tx_packets += tmp.tx_packets;
871 sum.rx_bytes += tmp.rx_bytes;
872 sum.rx_packets += tmp.rx_packets;
873 }
874
875 stats->tx_bytes = sum.tx_bytes;
876 stats->tx_packets = sum.tx_packets;
877 stats->rx_bytes = sum.rx_bytes;
878 stats->rx_packets = sum.rx_packets;
879
880 stats->multicast = dev->stats.multicast;
881 stats->rx_length_errors = dev->stats.rx_length_errors;
882 stats->rx_frame_errors = dev->stats.rx_frame_errors;
883 stats->rx_errors = dev->stats.rx_errors;
884
885 stats->tx_dropped = dev->stats.tx_dropped;
886 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
887 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
888 stats->collisions = dev->stats.collisions;
889 stats->tx_errors = dev->stats.tx_errors;
890
891 return stats;
892}
893
894/* Stub, nothing needs to be done. */
895static void vxlan_set_multicast_list(struct net_device *dev)
896{
897}
898
899static const struct net_device_ops vxlan_netdev_ops = {
900 .ndo_init = vxlan_init,
901 .ndo_open = vxlan_open,
902 .ndo_stop = vxlan_stop,
903 .ndo_start_xmit = vxlan_xmit,
904 .ndo_get_stats64 = vxlan_stats64,
905 .ndo_set_rx_mode = vxlan_set_multicast_list,
906 .ndo_change_mtu = eth_change_mtu,
907 .ndo_validate_addr = eth_validate_addr,
908 .ndo_set_mac_address = eth_mac_addr,
909 .ndo_fdb_add = vxlan_fdb_add,
910 .ndo_fdb_del = vxlan_fdb_delete,
911 .ndo_fdb_dump = vxlan_fdb_dump,
912};
913
914/* Info for udev, that this is a virtual tunnel endpoint */
915static struct device_type vxlan_type = {
916 .name = "vxlan",
917};
918
919static void vxlan_free(struct net_device *dev)
920{
921 struct vxlan_dev *vxlan = netdev_priv(dev);
922
923 free_percpu(vxlan->stats);
924 free_netdev(dev);
925}
926
927/* Initialize the device structure. */
928static void vxlan_setup(struct net_device *dev)
929{
930 struct vxlan_dev *vxlan = netdev_priv(dev);
931 unsigned h;
932
933 eth_hw_addr_random(dev);
934 ether_setup(dev);
935
936 dev->netdev_ops = &vxlan_netdev_ops;
937 dev->destructor = vxlan_free;
938 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
939
940 dev->tx_queue_len = 0;
941 dev->features |= NETIF_F_LLTX;
942 dev->features |= NETIF_F_NETNS_LOCAL;
943 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
944
945 spin_lock_init(&vxlan->hash_lock);
946
947 init_timer_deferrable(&vxlan->age_timer);
948 vxlan->age_timer.function = vxlan_cleanup;
949 vxlan->age_timer.data = (unsigned long) vxlan;
950
951 vxlan->dev = dev;
952
953 for (h = 0; h < FDB_HASH_SIZE; ++h)
954 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
955}
956
957static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
958 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
959 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
960 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
961 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
962 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
963 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
964 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
965 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
966 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
967};
968
969static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
970{
971 if (tb[IFLA_ADDRESS]) {
972 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
973 pr_debug("invalid link address (not ethernet)\n");
974 return -EINVAL;
975 }
976
977 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
978 pr_debug("invalid all zero ethernet address\n");
979 return -EADDRNOTAVAIL;
980 }
981 }
982
983 if (!data)
984 return -EINVAL;
985
986 if (data[IFLA_VXLAN_ID]) {
987 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
988 if (id >= VXLAN_VID_MASK)
989 return -ERANGE;
990 }
991
992 if (data[IFLA_VXLAN_GROUP]) {
993 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
994 if (!IN_MULTICAST(ntohl(gaddr))) {
995 pr_debug("group address is not IPv4 multicast\n");
996 return -EADDRNOTAVAIL;
997 }
998 }
999 return 0;
1000}
1001
1002static int vxlan_newlink(struct net *net, struct net_device *dev,
1003 struct nlattr *tb[], struct nlattr *data[])
1004{
1005 struct vxlan_dev *vxlan = netdev_priv(dev);
1006 __u32 vni;
1007 int err;
1008
1009 if (!data[IFLA_VXLAN_ID])
1010 return -EINVAL;
1011
1012 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1013 if (vxlan_find_vni(net, vni)) {
1014 pr_info("duplicate VNI %u\n", vni);
1015 return -EEXIST;
1016 }
1017 vxlan->vni = vni;
1018
1019 if (data[IFLA_VXLAN_GROUP])
1020 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1021
1022 if (data[IFLA_VXLAN_LOCAL])
1023 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1024
1025 if (data[IFLA_VXLAN_LINK]) {
1026 vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
1027
1028 if (!tb[IFLA_MTU]) {
1029 struct net_device *lowerdev;
1030 lowerdev = __dev_get_by_index(net, vxlan->link);
1031 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1032 }
1033 }
1034
1035 if (data[IFLA_VXLAN_TOS])
1036 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1037
1038 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1039 vxlan->learn = true;
1040
1041 if (data[IFLA_VXLAN_AGEING])
1042 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1043 else
1044 vxlan->age_interval = FDB_AGE_DEFAULT;
1045
1046 if (data[IFLA_VXLAN_LIMIT])
1047 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1048
1049 err = register_netdevice(dev);
1050 if (!err)
1051 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1052
1053 return err;
1054}
1055
1056static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1057{
1058 struct vxlan_dev *vxlan = netdev_priv(dev);
1059
1060 hlist_del_rcu(&vxlan->hlist);
1061
1062 unregister_netdevice_queue(dev, head);
1063}
1064
1065static size_t vxlan_get_size(const struct net_device *dev)
1066{
1067
1068 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1069 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1070 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1071 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1072 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1073 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1074 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1075 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1076 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1077 0;
1078}
1079
1080static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1081{
1082 const struct vxlan_dev *vxlan = netdev_priv(dev);
1083
1084 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1085 goto nla_put_failure;
1086
1087 if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
1088 goto nla_put_failure;
1089
1090 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1091 goto nla_put_failure;
1092
1093 if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1094 goto nla_put_failure;
1095
1096 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1097 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1098 nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
1099 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1100 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1101 goto nla_put_failure;
1102
1103 return 0;
1104
1105nla_put_failure:
1106 return -EMSGSIZE;
1107}
1108
1109static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1110 .kind = "vxlan",
1111 .maxtype = IFLA_VXLAN_MAX,
1112 .policy = vxlan_policy,
1113 .priv_size = sizeof(struct vxlan_dev),
1114 .setup = vxlan_setup,
1115 .validate = vxlan_validate,
1116 .newlink = vxlan_newlink,
1117 .dellink = vxlan_dellink,
1118 .get_size = vxlan_get_size,
1119 .fill_info = vxlan_fill_info,
1120};
1121
1122static __net_init int vxlan_init_net(struct net *net)
1123{
1124 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1125 struct sock *sk;
1126 struct sockaddr_in vxlan_addr = {
1127 .sin_family = AF_INET,
1128 .sin_addr.s_addr = htonl(INADDR_ANY),
1129 };
1130 int rc;
1131 unsigned h;
1132
1133 /* Create UDP socket for encapsulation receive. */
1134 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1135 if (rc < 0) {
1136 pr_debug("UDP socket create failed\n");
1137 return rc;
1138 }
1139 /* Put in proper namespace */
1140 sk = vn->sock->sk;
1141 sk_change_net(sk, net);
1142
1143 vxlan_addr.sin_port = htons(vxlan_port);
1144
1145 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1146 sizeof(vxlan_addr));
1147 if (rc < 0) {
1148 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1149 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1150 sk_release_kernel(sk);
1151 vn->sock = NULL;
1152 return rc;
1153 }
1154
1155 /* Disable multicast loopback */
1156 inet_sk(sk)->mc_loop = 0;
1157
1158 /* Mark socket as an encapsulation socket. */
1159 udp_sk(sk)->encap_type = 1;
1160 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1161 udp_encap_enable();
1162
1163 for (h = 0; h < VNI_HASH_SIZE; ++h)
1164 INIT_HLIST_HEAD(&vn->vni_list[h]);
1165
1166 return 0;
1167}
1168
1169static __net_exit void vxlan_exit_net(struct net *net)
1170{
1171 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1172
1173 if (vn->sock) {
1174 sk_release_kernel(vn->sock->sk);
1175 vn->sock = NULL;
1176 }
1177}
1178
1179static struct pernet_operations vxlan_net_ops = {
1180 .init = vxlan_init_net,
1181 .exit = vxlan_exit_net,
1182 .id = &vxlan_net_id,
1183 .size = sizeof(struct vxlan_net),
1184};
1185
1186static int __init vxlan_init_module(void)
1187{
1188 int rc;
1189
1190 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1191
1192 rc = register_pernet_device(&vxlan_net_ops);
1193 if (rc)
1194 goto out1;
1195
1196 rc = rtnl_link_register(&vxlan_link_ops);
1197 if (rc)
1198 goto out2;
1199
1200 return 0;
1201
1202out2:
1203 unregister_pernet_device(&vxlan_net_ops);
1204out1:
1205 return rc;
1206}
1207module_init(vxlan_init_module);
1208
1209static void __exit vxlan_cleanup_module(void)
1210{
1211 rtnl_link_unregister(&vxlan_link_ops);
1212 unregister_pernet_device(&vxlan_net_ops);
1213}
1214module_exit(vxlan_cleanup_module);
1215
1216MODULE_LICENSE("GPL");
1217MODULE_VERSION(VXLAN_VERSION);
1218MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1219MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 025426132754..9c34d2fccfac 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
222 struct sk_buff *skb; 222 struct sk_buff *skb;
223 const struct i2400m_tlv_detailed_device_info *ddi; 223 const struct i2400m_tlv_detailed_device_info *ddi;
224 struct net_device *net_dev = i2400m->wimax_dev.net_dev; 224 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
225 const unsigned char zeromac[ETH_ALEN] = { 0 };
226 225
227 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 226 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
228 skb = i2400m_get_device_info(i2400m); 227 skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
244 "to that of boot mode's\n"); 243 "to that of boot mode's\n");
245 dev_warn(dev, "device reports %pM\n", ddi->mac_address); 244 dev_warn(dev, "device reports %pM\n", ddi->mac_address);
246 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); 245 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
247 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) 246 if (is_zero_ether_addr(ddi->mac_address))
248 dev_err(dev, "device reports an invalid MAC address, " 247 dev_err(dev, "device reports an invalid MAC address, "
249 "not updating\n"); 248 "not updating\n");
250 else { 249 else {
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 689a71c1af71..154a4965be4f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1661} 1661}
1662 1662
1663/* Put adm8211_tx_hdr on skb and transmit */ 1663/* Put adm8211_tx_hdr on skb and transmit */
1664static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1664static void adm8211_tx(struct ieee80211_hw *dev,
1665 struct ieee80211_tx_control *control,
1666 struct sk_buff *skb)
1665{ 1667{
1666 struct adm8211_tx_hdr *txhdr; 1668 struct adm8211_tx_hdr *txhdr;
1667 size_t payload_len, hdrlen; 1669 size_t payload_len, hdrlen;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index c586f78c307f..3cd05a7173f6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -87,7 +87,6 @@ static struct pci_driver airo_driver = {
87/* Include Wireless Extension definition and check version - Jean II */ 87/* Include Wireless Extension definition and check version - Jean II */
88#include <linux/wireless.h> 88#include <linux/wireless.h>
89#define WIRELESS_SPY /* enable iwspy support */ 89#define WIRELESS_SPY /* enable iwspy support */
90#include <net/iw_handler.h> /* New driver API */
91 90
92#define CISCO_EXT /* enable Cisco extensions */ 91#define CISCO_EXT /* enable Cisco extensions */
93#ifdef CISCO_EXT 92#ifdef CISCO_EXT
@@ -5984,13 +5983,11 @@ static int airo_set_wap(struct net_device *dev,
5984 Cmd cmd; 5983 Cmd cmd;
5985 Resp rsp; 5984 Resp rsp;
5986 APListRid APList_rid; 5985 APListRid APList_rid;
5987 static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5988 static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
5989 5986
5990 if (awrq->sa_family != ARPHRD_ETHER) 5987 if (awrq->sa_family != ARPHRD_ETHER)
5991 return -EINVAL; 5988 return -EINVAL;
5992 else if (!memcmp(any, awrq->sa_data, ETH_ALEN) || 5989 else if (is_broadcast_ether_addr(awrq->sa_data) ||
5993 !memcmp(off, awrq->sa_data, ETH_ALEN)) { 5990 is_zero_ether_addr(awrq->sa_data)) {
5994 memset(&cmd, 0, sizeof(cmd)); 5991 memset(&cmd, 0, sizeof(cmd));
5995 cmd.cmd=CMD_LOSE_SYNC; 5992 cmd.cmd=CMD_LOSE_SYNC;
5996 if (down_interruptible(&local->sem)) 5993 if (down_interruptible(&local->sem))
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 88b8d64c90f1..99b9ddf21273 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -498,36 +498,6 @@ exit:
498 return ret; 498 return ret;
499} 499}
500 500
501#define HEX2STR_BUFFERS 4
502#define HEX2STR_MAX_LEN 64
503
504/* Convert binary data into hex string */
505static char *hex2str(void *buf, size_t len)
506{
507 static atomic_t a = ATOMIC_INIT(0);
508 static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1];
509 char *ret = bufs[atomic_inc_return(&a) & (HEX2STR_BUFFERS - 1)];
510 char *obuf = ret;
511 u8 *ibuf = buf;
512
513 if (len > HEX2STR_MAX_LEN)
514 len = HEX2STR_MAX_LEN;
515
516 if (len == 0)
517 goto exit;
518
519 while (len--) {
520 obuf = hex_byte_pack(obuf, *ibuf++);
521 *obuf++ = '-';
522 }
523 obuf--;
524
525exit:
526 *obuf = '\0';
527
528 return ret;
529}
530
531/* LED trigger */ 501/* LED trigger */
532static int tx_activity; 502static int tx_activity;
533static void at76_ledtrig_tx_timerfunc(unsigned long data); 503static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -1004,9 +974,9 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
1004 WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN; 974 WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
1005 975
1006 for (i = 0; i < WEP_KEYS; i++) 976 for (i = 0; i < WEP_KEYS; i++)
1007 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s", 977 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %*phD",
1008 wiphy_name(priv->hw->wiphy), i, 978 wiphy_name(priv->hw->wiphy), i,
1009 hex2str(m->wep_default_keyvalue[i], key_len)); 979 key_len, m->wep_default_keyvalue[i]);
1010exit: 980exit:
1011 kfree(m); 981 kfree(m);
1012} 982}
@@ -1031,7 +1001,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1031 at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration " 1001 at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
1032 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d " 1002 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
1033 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d " 1003 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
1034 "current_bssid %pM current_essid %s current_bss_type %d " 1004 "current_bssid %pM current_essid %*phD current_bss_type %d "
1035 "pm_mode %d ibss_change %d res %d " 1005 "pm_mode %d ibss_change %d res %d "
1036 "multi_domain_capability_implemented %d " 1006 "multi_domain_capability_implemented %d "
1037 "international_roaming %d country_string %.3s", 1007 "international_roaming %d country_string %.3s",
@@ -1041,7 +1011,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1041 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window), 1011 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
1042 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period, 1012 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
1043 m->CFP_period, m->current_bssid, 1013 m->CFP_period, m->current_bssid,
1044 hex2str(m->current_essid, IW_ESSID_MAX_SIZE), 1014 IW_ESSID_MAX_SIZE, m->current_essid,
1045 m->current_bss_type, m->power_mgmt_mode, m->ibss_change, 1015 m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
1046 m->res, m->multi_domain_capability_implemented, 1016 m->res, m->multi_domain_capability_implemented,
1047 m->multi_domain_capability_enabled, m->country_string); 1017 m->multi_domain_capability_enabled, m->country_string);
@@ -1069,7 +1039,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1069 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d " 1039 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
1070 "scan_type %d scan_channel %d probe_delay %u " 1040 "scan_type %d scan_channel %d probe_delay %u "
1071 "min_channel_time %d max_channel_time %d listen_int %d " 1041 "min_channel_time %d max_channel_time %d listen_int %d "
1072 "desired_ssid %s desired_bssid %pM desired_bsstype %d", 1042 "desired_ssid %*phD desired_bssid %pM desired_bsstype %d",
1073 wiphy_name(priv->hw->wiphy), 1043 wiphy_name(priv->hw->wiphy),
1074 le32_to_cpu(m->max_tx_msdu_lifetime), 1044 le32_to_cpu(m->max_tx_msdu_lifetime),
1075 le32_to_cpu(m->max_rx_lifetime), 1045 le32_to_cpu(m->max_rx_lifetime),
@@ -1080,7 +1050,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1080 le16_to_cpu(m->min_channel_time), 1050 le16_to_cpu(m->min_channel_time),
1081 le16_to_cpu(m->max_channel_time), 1051 le16_to_cpu(m->max_channel_time),
1082 le16_to_cpu(m->listen_interval), 1052 le16_to_cpu(m->listen_interval),
1083 hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE), 1053 IW_ESSID_MAX_SIZE, m->desired_ssid,
1084 m->desired_bssid, m->desired_bsstype); 1054 m->desired_bssid, m->desired_bsstype);
1085exit: 1055exit:
1086 kfree(m); 1056 kfree(m);
@@ -1160,13 +1130,13 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
1160 goto exit; 1130 goto exit;
1161 } 1131 }
1162 1132
1163 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s", 1133 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %*phD",
1164 wiphy_name(priv->hw->wiphy), 1134 wiphy_name(priv->hw->wiphy),
1165 hex2str(m->channel_list, sizeof(m->channel_list))); 1135 (int)sizeof(m->channel_list), m->channel_list);
1166 1136
1167 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s", 1137 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %*phD",
1168 wiphy_name(priv->hw->wiphy), 1138 wiphy_name(priv->hw->wiphy),
1169 hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel))); 1139 (int)sizeof(m->tx_powerlevel), m->tx_powerlevel);
1170exit: 1140exit:
1171 kfree(m); 1141 kfree(m);
1172} 1142}
@@ -1369,9 +1339,9 @@ static int at76_startup_device(struct at76_priv *priv)
1369 int ret; 1339 int ret;
1370 1340
1371 at76_dbg(DBG_PARAMS, 1341 at76_dbg(DBG_PARAMS,
1372 "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d " 1342 "%s param: ssid %.*s (%*phD) mode %s ch %d wep %s key %d "
1373 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size, 1343 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
1374 priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE), 1344 priv->essid, IW_ESSID_MAX_SIZE, priv->essid,
1375 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra", 1345 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
1376 priv->channel, priv->wep_enabled ? "enabled" : "disabled", 1346 priv->channel, priv->wep_enabled ? "enabled" : "disabled",
1377 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]); 1347 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
@@ -1726,7 +1696,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
1726 ieee80211_wake_queues(priv->hw); 1696 ieee80211_wake_queues(priv->hw);
1727} 1697}
1728 1698
1729static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1699static void at76_mac80211_tx(struct ieee80211_hw *hw,
1700 struct ieee80211_tx_control *control,
1701 struct sk_buff *skb)
1730{ 1702{
1731 struct at76_priv *priv = hw->priv; 1703 struct at76_priv *priv = hw->priv;
1732 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 1704 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 6169fbd23ed1..4521342c62cc 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -159,6 +159,7 @@ struct ath_common {
159 159
160 bool btcoex_enabled; 160 bool btcoex_enabled;
161 bool disable_ani; 161 bool disable_ani;
162 bool antenna_diversity;
162}; 163};
163 164
164struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 165struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 64a453a6dfe4..3150def17193 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1331,7 +1331,6 @@ struct ath5k_hw {
1331 unsigned int nexttbtt; /* next beacon time in TU */ 1331 unsigned int nexttbtt; /* next beacon time in TU */
1332 struct ath5k_txq *cabq; /* content after beacon */ 1332 struct ath5k_txq *cabq; /* content after beacon */
1333 1333
1334 int power_level; /* Requested tx power in dBm */
1335 bool assoc; /* associate state */ 1334 bool assoc; /* associate state */
1336 bool enable_beacon; /* true if beacons are on */ 1335 bool enable_beacon; /* true if beacons are on */
1337 1336
@@ -1425,6 +1424,7 @@ struct ath5k_hw {
1425 /* Value in dB units */ 1424 /* Value in dB units */
1426 s16 txp_cck_ofdm_pwr_delta; 1425 s16 txp_cck_ofdm_pwr_delta;
1427 bool txp_setup; 1426 bool txp_setup;
1427 int txp_requested; /* Requested tx power in dBm */
1428 } ah_txpower; 1428 } ah_txpower;
1429 1429
1430 struct ath5k_nfcal_hist ah_nfcal_hist; 1430 struct ath5k_nfcal_hist ah_nfcal_hist;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 2aab20ee9f38..9fd6d9a9942e 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
723 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 723 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
724 ieee80211_get_hdrlen_from_skb(skb), padsize, 724 ieee80211_get_hdrlen_from_skb(skb), padsize,
725 get_hw_packet_type(skb), 725 get_hw_packet_type(skb),
726 (ah->power_level * 2), 726 (ah->ah_txpower.txp_requested * 2),
727 hw_rate, 727 hw_rate,
728 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 728 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
729 cts_rate, duration); 729 cts_rate, duration);
@@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1778 ds->ds_data = bf->skbaddr; 1778 ds->ds_data = bf->skbaddr;
1779 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 1779 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1780 ieee80211_get_hdrlen_from_skb(skb), padsize, 1780 ieee80211_get_hdrlen_from_skb(skb), padsize,
1781 AR5K_PKT_TYPE_BEACON, (ah->power_level * 2), 1781 AR5K_PKT_TYPE_BEACON,
1782 (ah->ah_txpower.txp_requested * 2),
1782 ieee80211_get_tx_rate(ah->hw, info)->hw_value, 1783 ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1783 1, AR5K_TXKEYIX_INVALID, 1784 1, AR5K_TXKEYIX_INVALID,
1784 antenna, flags, 0, 0); 1785 antenna, flags, 0, 0);
@@ -2445,6 +2446,7 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2445 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 2446 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2446 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2447 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2447 IEEE80211_HW_SIGNAL_DBM | 2448 IEEE80211_HW_SIGNAL_DBM |
2449 IEEE80211_HW_MFP_CAPABLE |
2448 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2450 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2449 2451
2450 hw->wiphy->interface_modes = 2452 hw->wiphy->interface_modes =
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d56453e43d7e..7a28538e6e05 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -55,7 +55,8 @@
55\********************/ 55\********************/
56 56
57static void 57static void
58ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 58ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
59 struct sk_buff *skb)
59{ 60{
60 struct ath5k_hw *ah = hw->priv; 61 struct ath5k_hw *ah = hw->priv;
61 u16 qnum = skb_get_queue_mapping(skb); 62 u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
207 } 208 }
208 209
209 if ((changed & IEEE80211_CONF_CHANGE_POWER) && 210 if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
210 (ah->power_level != conf->power_level)) { 211 (ah->ah_txpower.txp_requested != conf->power_level)) {
211 ah->power_level = conf->power_level; 212 ah->ah_txpower.txp_requested = conf->power_level;
212 213
213 /* Half dB steps */ 214 /* Half dB steps */
214 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); 215 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -488,6 +489,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
488 if (ath5k_modparam_nohwcrypt) 489 if (ath5k_modparam_nohwcrypt)
489 return -EOPNOTSUPP; 490 return -EOPNOTSUPP;
490 491
492 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
493 return -EOPNOTSUPP;
494
491 if (vif->type == NL80211_IFTYPE_ADHOC && 495 if (vif->type == NL80211_IFTYPE_ADHOC &&
492 (key->cipher == WLAN_CIPHER_SUITE_TKIP || 496 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
493 key->cipher == WLAN_CIPHER_SUITE_CCMP) && 497 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
@@ -522,7 +526,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
522 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) 526 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
523 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 527 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
524 if (key->cipher == WLAN_CIPHER_SUITE_CCMP) 528 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
525 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 529 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
526 ret = 0; 530 ret = 0;
527 } 531 }
528 break; 532 break;
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 8b71a2d947e0..ab363f34b4df 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1975,11 +1975,13 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1975 spur_delta_phase = (spur_offset << 18) / 25; 1975 spur_delta_phase = (spur_offset << 18) / 25;
1976 spur_freq_sigma_delta = (spur_delta_phase >> 10); 1976 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1977 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2; 1977 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
1978 break;
1978 case AR5K_BWMODE_5MHZ: 1979 case AR5K_BWMODE_5MHZ:
1979 /* Both sample_freq and chip_freq are 10MHz (?) */ 1980 /* Both sample_freq and chip_freq are 10MHz (?) */
1980 spur_delta_phase = (spur_offset << 19) / 25; 1981 spur_delta_phase = (spur_offset << 19) / 25;
1981 spur_freq_sigma_delta = (spur_delta_phase >> 10); 1982 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1982 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4; 1983 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
1984 break;
1983 default: 1985 default:
1984 if (channel->band == IEEE80211_BAND_5GHZ) { 1986 if (channel->band == IEEE80211_BAND_5GHZ) {
1985 /* Both sample_freq and chip_freq are 40MHz */ 1987 /* Both sample_freq and chip_freq are 40MHz */
@@ -3516,6 +3518,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
3516{ 3518{
3517 unsigned int i; 3519 unsigned int i;
3518 u16 *rates; 3520 u16 *rates;
3521 s16 rate_idx_scaled = 0;
3519 3522
3520 /* max_pwr is power level we got from driver/user in 0.5dB 3523 /* max_pwr is power level we got from driver/user in 0.5dB
3521 * units, switch to 0.25dB units so we can compare */ 3524 * units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3565,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
3562 for (i = 8; i <= 15; i++) 3565 for (i = 8; i <= 15; i++)
3563 rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta; 3566 rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
3564 3567
3568 /* Save min/max and current tx power for this channel
3569 * in 0.25dB units.
3570 *
3571 * Note: We use rates[0] for current tx power because
3572 * it covers most of the rates, in most cases. It's our
3573 * tx power limit and what the user expects to see. */
3574 ah->ah_txpower.txp_min_pwr = 2 * rates[7];
3575 ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
3576
3577 /* Set max txpower for correct OFDM operation on all rates
3578 * -that is the txpower for 54Mbit-, it's used for the PAPD
3579 * gain probe and it's in 0.5dB units */
3580 ah->ah_txpower.txp_ofdm = rates[7];
3581
3565 /* Now that we have all rates setup use table offset to 3582 /* Now that we have all rates setup use table offset to
3566 * match the power range set by user with the power indices 3583 * match the power range set by user with the power indices
3567 * on PCDAC/PDADC table */ 3584 * on PCDAC/PDADC table */
3568 for (i = 0; i < 16; i++) { 3585 for (i = 0; i < 16; i++) {
3569 rates[i] += ah->ah_txpower.txp_offset; 3586 rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
3570 /* Don't get out of bounds */ 3587 /* Don't get out of bounds */
3571 if (rates[i] > 63) 3588 if (rate_idx_scaled > 63)
3572 rates[i] = 63; 3589 rate_idx_scaled = 63;
3590 if (rate_idx_scaled < 0)
3591 rate_idx_scaled = 0;
3592 rates[i] = rate_idx_scaled;
3573 } 3593 }
3574
3575 /* Min/max in 0.25dB units */
3576 ah->ah_txpower.txp_min_pwr = 2 * rates[7];
3577 ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
3578 ah->ah_txpower.txp_ofdm = rates[7];
3579} 3594}
3580 3595
3581 3596
@@ -3639,10 +3654,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3639 if (!ah->ah_txpower.txp_setup || 3654 if (!ah->ah_txpower.txp_setup ||
3640 (channel->hw_value != curr_channel->hw_value) || 3655 (channel->hw_value != curr_channel->hw_value) ||
3641 (channel->center_freq != curr_channel->center_freq)) { 3656 (channel->center_freq != curr_channel->center_freq)) {
3642 /* Reset TX power values */ 3657 /* Reset TX power values but preserve requested
3658 * tx power from above */
3659 int requested_txpower = ah->ah_txpower.txp_requested;
3660
3643 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 3661 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
3662
3663 /* Restore TPC setting and requested tx power */
3644 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 3664 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
3645 3665
3666 ah->ah_txpower.txp_requested = requested_txpower;
3667
3646 /* Calculate the powertable */ 3668 /* Calculate the powertable */
3647 ret = ath5k_setup_channel_powertable(ah, channel, 3669 ret = ath5k_setup_channel_powertable(ah, channel,
3648 ee_mode, type); 3670 ee_mode, type);
@@ -3789,8 +3811,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3789 * RF buffer settings on 5211/5212+ so that we 3811 * RF buffer settings on 5211/5212+ so that we
3790 * properly set curve indices. 3812 * properly set curve indices.
3791 */ 3813 */
3792 ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ? 3814 ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
3793 ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER); 3815 ah->ah_txpower.txp_requested * 2 :
3816 AR5K_TUNE_MAX_TXPOWER);
3794 if (ret) 3817 if (ret)
3795 return ret; 3818 return ret;
3796 3819
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 86aeef4b9d7e..7089f8160ad5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1488,7 +1488,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1488} 1488}
1489 1489
1490static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy, 1490static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
1491 char *name, 1491 const char *name,
1492 enum nl80211_iftype type, 1492 enum nl80211_iftype type,
1493 u32 *flags, 1493 u32 *flags,
1494 struct vif_params *params) 1494 struct vif_params *params)
@@ -3477,7 +3477,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
3477 ar->num_vif--; 3477 ar->num_vif--;
3478} 3478}
3479 3479
3480struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name, 3480struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
3481 enum nl80211_iftype type, 3481 enum nl80211_iftype type,
3482 u8 fw_vif_idx, u8 nw_type) 3482 u8 fw_vif_idx, u8 nw_type)
3483{ 3483{
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 56b1ebe79812..780f77775a91 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -25,7 +25,7 @@ enum ath6kl_cfg_suspend_mode {
25 ATH6KL_CFG_SUSPEND_SCHED_SCAN, 25 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
26}; 26};
27 27
28struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name, 28struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
29 enum nl80211_iftype type, 29 enum nl80211_iftype type,
30 u8 fw_vif_idx, u8 nw_type); 30 u8 fw_vif_idx, u8 nw_type);
31void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, 31void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ff007f500feb..e09ec40ce71a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -237,7 +237,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
237 entry_cck->fir_step_level); 237 entry_cck->fir_step_level);
238 238
239 /* Skip MRC CCK for pre AR9003 families */ 239 /* Skip MRC CCK for pre AR9003 families */
240 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah)) 240 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
241 return; 241 return;
242 242
243 if (aniState->mrcCCK != entry_cck->mrc_cck_on) 243 if (aniState->mrcCCK != entry_cck->mrc_cck_on)
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index bbcfeb3b2a60..664844c5d3d5 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -311,6 +311,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
311 struct ath_ant_comb *antcomb, 311 struct ath_ant_comb *antcomb,
312 int alt_ratio) 312 int alt_ratio)
313{ 313{
314 ant_conf->main_gaintb = 0;
315 ant_conf->alt_gaintb = 0;
316
314 if (ant_conf->div_group == 0) { 317 if (ant_conf->div_group == 0) {
315 /* Adjust the fast_div_bias based on main and alt lna conf */ 318 /* Adjust the fast_div_bias based on main and alt lna conf */
316 switch ((ant_conf->main_lna_conf << 4) | 319 switch ((ant_conf->main_lna_conf << 4) |
@@ -360,18 +363,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
360 ant_conf->alt_lna_conf) { 363 ant_conf->alt_lna_conf) {
361 case 0x01: /* A-B LNA2 */ 364 case 0x01: /* A-B LNA2 */
362 ant_conf->fast_div_bias = 0x1; 365 ant_conf->fast_div_bias = 0x1;
363 ant_conf->main_gaintb = 0;
364 ant_conf->alt_gaintb = 0;
365 break; 366 break;
366 case 0x02: /* A-B LNA1 */ 367 case 0x02: /* A-B LNA1 */
367 ant_conf->fast_div_bias = 0x1; 368 ant_conf->fast_div_bias = 0x1;
368 ant_conf->main_gaintb = 0;
369 ant_conf->alt_gaintb = 0;
370 break; 369 break;
371 case 0x03: /* A-B A+B */ 370 case 0x03: /* A-B A+B */
372 ant_conf->fast_div_bias = 0x1; 371 ant_conf->fast_div_bias = 0x1;
373 ant_conf->main_gaintb = 0;
374 ant_conf->alt_gaintb = 0;
375 break; 372 break;
376 case 0x10: /* LNA2 A-B */ 373 case 0x10: /* LNA2 A-B */
377 if (!(antcomb->scan) && 374 if (!(antcomb->scan) &&
@@ -379,13 +376,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
379 ant_conf->fast_div_bias = 0x3f; 376 ant_conf->fast_div_bias = 0x3f;
380 else 377 else
381 ant_conf->fast_div_bias = 0x1; 378 ant_conf->fast_div_bias = 0x1;
382 ant_conf->main_gaintb = 0;
383 ant_conf->alt_gaintb = 0;
384 break; 379 break;
385 case 0x12: /* LNA2 LNA1 */ 380 case 0x12: /* LNA2 LNA1 */
386 ant_conf->fast_div_bias = 0x1; 381 ant_conf->fast_div_bias = 0x1;
387 ant_conf->main_gaintb = 0;
388 ant_conf->alt_gaintb = 0;
389 break; 382 break;
390 case 0x13: /* LNA2 A+B */ 383 case 0x13: /* LNA2 A+B */
391 if (!(antcomb->scan) && 384 if (!(antcomb->scan) &&
@@ -393,8 +386,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
393 ant_conf->fast_div_bias = 0x3f; 386 ant_conf->fast_div_bias = 0x3f;
394 else 387 else
395 ant_conf->fast_div_bias = 0x1; 388 ant_conf->fast_div_bias = 0x1;
396 ant_conf->main_gaintb = 0;
397 ant_conf->alt_gaintb = 0;
398 break; 389 break;
399 case 0x20: /* LNA1 A-B */ 390 case 0x20: /* LNA1 A-B */
400 if (!(antcomb->scan) && 391 if (!(antcomb->scan) &&
@@ -402,13 +393,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
402 ant_conf->fast_div_bias = 0x3f; 393 ant_conf->fast_div_bias = 0x3f;
403 else 394 else
404 ant_conf->fast_div_bias = 0x1; 395 ant_conf->fast_div_bias = 0x1;
405 ant_conf->main_gaintb = 0;
406 ant_conf->alt_gaintb = 0;
407 break; 396 break;
408 case 0x21: /* LNA1 LNA2 */ 397 case 0x21: /* LNA1 LNA2 */
409 ant_conf->fast_div_bias = 0x1; 398 ant_conf->fast_div_bias = 0x1;
410 ant_conf->main_gaintb = 0;
411 ant_conf->alt_gaintb = 0;
412 break; 399 break;
413 case 0x23: /* LNA1 A+B */ 400 case 0x23: /* LNA1 A+B */
414 if (!(antcomb->scan) && 401 if (!(antcomb->scan) &&
@@ -416,23 +403,15 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
416 ant_conf->fast_div_bias = 0x3f; 403 ant_conf->fast_div_bias = 0x3f;
417 else 404 else
418 ant_conf->fast_div_bias = 0x1; 405 ant_conf->fast_div_bias = 0x1;
419 ant_conf->main_gaintb = 0;
420 ant_conf->alt_gaintb = 0;
421 break; 406 break;
422 case 0x30: /* A+B A-B */ 407 case 0x30: /* A+B A-B */
423 ant_conf->fast_div_bias = 0x1; 408 ant_conf->fast_div_bias = 0x1;
424 ant_conf->main_gaintb = 0;
425 ant_conf->alt_gaintb = 0;
426 break; 409 break;
427 case 0x31: /* A+B LNA2 */ 410 case 0x31: /* A+B LNA2 */
428 ant_conf->fast_div_bias = 0x1; 411 ant_conf->fast_div_bias = 0x1;
429 ant_conf->main_gaintb = 0;
430 ant_conf->alt_gaintb = 0;
431 break; 412 break;
432 case 0x32: /* A+B LNA1 */ 413 case 0x32: /* A+B LNA1 */
433 ant_conf->fast_div_bias = 0x1; 414 ant_conf->fast_div_bias = 0x1;
434 ant_conf->main_gaintb = 0;
435 ant_conf->alt_gaintb = 0;
436 break; 415 break;
437 default: 416 default:
438 break; 417 break;
@@ -443,18 +422,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
443 ant_conf->alt_lna_conf) { 422 ant_conf->alt_lna_conf) {
444 case 0x01: /* A-B LNA2 */ 423 case 0x01: /* A-B LNA2 */
445 ant_conf->fast_div_bias = 0x1; 424 ant_conf->fast_div_bias = 0x1;
446 ant_conf->main_gaintb = 0;
447 ant_conf->alt_gaintb = 0;
448 break; 425 break;
449 case 0x02: /* A-B LNA1 */ 426 case 0x02: /* A-B LNA1 */
450 ant_conf->fast_div_bias = 0x1; 427 ant_conf->fast_div_bias = 0x1;
451 ant_conf->main_gaintb = 0;
452 ant_conf->alt_gaintb = 0;
453 break; 428 break;
454 case 0x03: /* A-B A+B */ 429 case 0x03: /* A-B A+B */
455 ant_conf->fast_div_bias = 0x1; 430 ant_conf->fast_div_bias = 0x1;
456 ant_conf->main_gaintb = 0;
457 ant_conf->alt_gaintb = 0;
458 break; 431 break;
459 case 0x10: /* LNA2 A-B */ 432 case 0x10: /* LNA2 A-B */
460 if (!(antcomb->scan) && 433 if (!(antcomb->scan) &&
@@ -462,13 +435,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
462 ant_conf->fast_div_bias = 0x1; 435 ant_conf->fast_div_bias = 0x1;
463 else 436 else
464 ant_conf->fast_div_bias = 0x2; 437 ant_conf->fast_div_bias = 0x2;
465 ant_conf->main_gaintb = 0;
466 ant_conf->alt_gaintb = 0;
467 break; 438 break;
468 case 0x12: /* LNA2 LNA1 */ 439 case 0x12: /* LNA2 LNA1 */
469 ant_conf->fast_div_bias = 0x1; 440 ant_conf->fast_div_bias = 0x1;
470 ant_conf->main_gaintb = 0;
471 ant_conf->alt_gaintb = 0;
472 break; 441 break;
473 case 0x13: /* LNA2 A+B */ 442 case 0x13: /* LNA2 A+B */
474 if (!(antcomb->scan) && 443 if (!(antcomb->scan) &&
@@ -476,8 +445,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
476 ant_conf->fast_div_bias = 0x1; 445 ant_conf->fast_div_bias = 0x1;
477 else 446 else
478 ant_conf->fast_div_bias = 0x2; 447 ant_conf->fast_div_bias = 0x2;
479 ant_conf->main_gaintb = 0;
480 ant_conf->alt_gaintb = 0;
481 break; 448 break;
482 case 0x20: /* LNA1 A-B */ 449 case 0x20: /* LNA1 A-B */
483 if (!(antcomb->scan) && 450 if (!(antcomb->scan) &&
@@ -485,13 +452,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
485 ant_conf->fast_div_bias = 0x1; 452 ant_conf->fast_div_bias = 0x1;
486 else 453 else
487 ant_conf->fast_div_bias = 0x2; 454 ant_conf->fast_div_bias = 0x2;
488 ant_conf->main_gaintb = 0;
489 ant_conf->alt_gaintb = 0;
490 break; 455 break;
491 case 0x21: /* LNA1 LNA2 */ 456 case 0x21: /* LNA1 LNA2 */
492 ant_conf->fast_div_bias = 0x1; 457 ant_conf->fast_div_bias = 0x1;
493 ant_conf->main_gaintb = 0;
494 ant_conf->alt_gaintb = 0;
495 break; 458 break;
496 case 0x23: /* LNA1 A+B */ 459 case 0x23: /* LNA1 A+B */
497 if (!(antcomb->scan) && 460 if (!(antcomb->scan) &&
@@ -499,23 +462,77 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
499 ant_conf->fast_div_bias = 0x1; 462 ant_conf->fast_div_bias = 0x1;
500 else 463 else
501 ant_conf->fast_div_bias = 0x2; 464 ant_conf->fast_div_bias = 0x2;
502 ant_conf->main_gaintb = 0;
503 ant_conf->alt_gaintb = 0;
504 break; 465 break;
505 case 0x30: /* A+B A-B */ 466 case 0x30: /* A+B A-B */
506 ant_conf->fast_div_bias = 0x1; 467 ant_conf->fast_div_bias = 0x1;
507 ant_conf->main_gaintb = 0;
508 ant_conf->alt_gaintb = 0;
509 break; 468 break;
510 case 0x31: /* A+B LNA2 */ 469 case 0x31: /* A+B LNA2 */
511 ant_conf->fast_div_bias = 0x1; 470 ant_conf->fast_div_bias = 0x1;
512 ant_conf->main_gaintb = 0;
513 ant_conf->alt_gaintb = 0;
514 break; 471 break;
515 case 0x32: /* A+B LNA1 */ 472 case 0x32: /* A+B LNA1 */
516 ant_conf->fast_div_bias = 0x1; 473 ant_conf->fast_div_bias = 0x1;
517 ant_conf->main_gaintb = 0; 474 break;
518 ant_conf->alt_gaintb = 0; 475 default:
476 break;
477 }
478 } else if (ant_conf->div_group == 3) {
479 switch ((ant_conf->main_lna_conf << 4) |
480 ant_conf->alt_lna_conf) {
481 case 0x01: /* A-B LNA2 */
482 ant_conf->fast_div_bias = 0x1;
483 break;
484 case 0x02: /* A-B LNA1 */
485 ant_conf->fast_div_bias = 0x39;
486 break;
487 case 0x03: /* A-B A+B */
488 ant_conf->fast_div_bias = 0x1;
489 break;
490 case 0x10: /* LNA2 A-B */
491 if ((antcomb->scan == 0) &&
492 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
493 ant_conf->fast_div_bias = 0x3f;
494 } else {
495 ant_conf->fast_div_bias = 0x1;
496 }
497 break;
498 case 0x12: /* LNA2 LNA1 */
499 ant_conf->fast_div_bias = 0x39;
500 break;
501 case 0x13: /* LNA2 A+B */
502 if ((antcomb->scan == 0) &&
503 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
504 ant_conf->fast_div_bias = 0x3f;
505 } else {
506 ant_conf->fast_div_bias = 0x1;
507 }
508 break;
509 case 0x20: /* LNA1 A-B */
510 if ((antcomb->scan == 0) &&
511 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
512 ant_conf->fast_div_bias = 0x3f;
513 } else {
514 ant_conf->fast_div_bias = 0x4;
515 }
516 break;
517 case 0x21: /* LNA1 LNA2 */
518 ant_conf->fast_div_bias = 0x6;
519 break;
520 case 0x23: /* LNA1 A+B */
521 if ((antcomb->scan == 0) &&
522 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
523 ant_conf->fast_div_bias = 0x3f;
524 } else {
525 ant_conf->fast_div_bias = 0x6;
526 }
527 break;
528 case 0x30: /* A+B A-B */
529 ant_conf->fast_div_bias = 0x1;
530 break;
531 case 0x31: /* A+B LNA2 */
532 ant_conf->fast_div_bias = 0x6;
533 break;
534 case 0x32: /* A+B LNA1 */
535 ant_conf->fast_div_bias = 0x1;
519 break; 536 break;
520 default: 537 default:
521 break; 538 break;
@@ -759,6 +776,7 @@ div_comb_done:
759void ath_ant_comb_update(struct ath_softc *sc) 776void ath_ant_comb_update(struct ath_softc *sc)
760{ 777{
761 struct ath_hw *ah = sc->sc_ah; 778 struct ath_hw *ah = sc->sc_ah;
779 struct ath_common *common = ath9k_hw_common(ah);
762 struct ath_hw_antcomb_conf div_ant_conf; 780 struct ath_hw_antcomb_conf div_ant_conf;
763 u8 lna_conf; 781 u8 lna_conf;
764 782
@@ -773,4 +791,7 @@ void ath_ant_comb_update(struct ath_softc *sc)
773 div_ant_conf.alt_lna_conf = lna_conf; 791 div_ant_conf.alt_lna_conf = lna_conf;
774 792
775 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); 793 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
794
795 if (common->antenna_diversity)
796 ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
776} 797}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d066f2516e47..5bbe5057ba18 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -138,7 +138,8 @@ static const struct ar9300_eeprom ar9300_default = {
138 }, 138 },
139 .base_ext1 = { 139 .base_ext1 = {
140 .ant_div_control = 0, 140 .ant_div_control = 0,
141 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 141 .future = {0, 0, 0},
142 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
142 }, 143 },
143 .calFreqPier2G = { 144 .calFreqPier2G = {
144 FREQ2FBIN(2412, 1), 145 FREQ2FBIN(2412, 1),
@@ -713,7 +714,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
713 }, 714 },
714 .base_ext1 = { 715 .base_ext1 = {
715 .ant_div_control = 0, 716 .ant_div_control = 0,
716 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 717 .future = {0, 0, 0},
718 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
717 }, 719 },
718 .calFreqPier2G = { 720 .calFreqPier2G = {
719 FREQ2FBIN(2412, 1), 721 FREQ2FBIN(2412, 1),
@@ -1289,7 +1291,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
1289 }, 1291 },
1290 .base_ext1 = { 1292 .base_ext1 = {
1291 .ant_div_control = 0, 1293 .ant_div_control = 0,
1292 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 1294 .future = {0, 0, 0},
1295 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
1293 }, 1296 },
1294 .calFreqPier2G = { 1297 .calFreqPier2G = {
1295 FREQ2FBIN(2412, 1), 1298 FREQ2FBIN(2412, 1),
@@ -1865,7 +1868,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
1865 }, 1868 },
1866 .base_ext1 = { 1869 .base_ext1 = {
1867 .ant_div_control = 0, 1870 .ant_div_control = 0,
1868 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 1871 .future = {0, 0, 0},
1872 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
1869 }, 1873 },
1870 .calFreqPier2G = { 1874 .calFreqPier2G = {
1871 FREQ2FBIN(2412, 1), 1875 FREQ2FBIN(2412, 1),
@@ -2440,7 +2444,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
2440 }, 2444 },
2441 .base_ext1 = { 2445 .base_ext1 = {
2442 .ant_div_control = 0, 2446 .ant_div_control = 0,
2443 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 2447 .future = {0, 0, 0},
2448 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
2444 }, 2449 },
2445 .calFreqPier2G = { 2450 .calFreqPier2G = {
2446 FREQ2FBIN(2412, 1), 2451 FREQ2FBIN(2412, 1),
@@ -3524,7 +3529,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
3524 3529
3525 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) 3530 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
3526 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); 3531 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
3527 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah)) 3532 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
3528 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3533 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
3529 else { 3534 else {
3530 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3535 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3561,9 +3566,9 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
3561 3566
3562static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) 3567static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3563{ 3568{
3569 struct ath9k_hw_capabilities *pCap = &ah->caps;
3564 int chain; 3570 int chain;
3565 u32 regval; 3571 u32 regval;
3566 u32 ant_div_ctl1;
3567 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { 3572 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
3568 AR_PHY_SWITCH_CHAIN_0, 3573 AR_PHY_SWITCH_CHAIN_0,
3569 AR_PHY_SWITCH_CHAIN_1, 3574 AR_PHY_SWITCH_CHAIN_1,
@@ -3572,7 +3577,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3572 3577
3573 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); 3578 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
3574 3579
3575 if (AR_SREV_9462(ah)) { 3580 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3576 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3581 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3577 AR_SWITCH_TABLE_COM_AR9462_ALL, value); 3582 AR_SWITCH_TABLE_COM_AR9462_ALL, value);
3578 } else if (AR_SREV_9550(ah)) { 3583 } else if (AR_SREV_9550(ah)) {
@@ -3616,7 +3621,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3616 } 3621 }
3617 } 3622 }
3618 3623
3619 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { 3624 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
3620 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); 3625 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
3621 /* 3626 /*
3622 * main_lnaconf, alt_lnaconf, main_tb, alt_tb 3627 * main_lnaconf, alt_lnaconf, main_tb, alt_tb
@@ -3626,41 +3631,44 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3626 regval &= (~AR_ANT_DIV_CTRL_ALL); 3631 regval &= (~AR_ANT_DIV_CTRL_ALL);
3627 regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; 3632 regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
3628 /* enable_lnadiv */ 3633 /* enable_lnadiv */
3629 regval &= (~AR_PHY_9485_ANT_DIV_LNADIV); 3634 regval &= (~AR_PHY_ANT_DIV_LNADIV);
3630 regval |= ((value >> 6) & 0x1) << 3635 regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
3631 AR_PHY_9485_ANT_DIV_LNADIV_S; 3636
3637 if (AR_SREV_9565(ah)) {
3638 if (ah->shared_chain_lnadiv) {
3639 regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
3640 } else {
3641 regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
3642 regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
3643 }
3644 }
3645
3632 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 3646 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3633 3647
3634 /*enable fast_div */ 3648 /*enable fast_div */
3635 regval = REG_READ(ah, AR_PHY_CCK_DETECT); 3649 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
3636 regval &= (~AR_FAST_DIV_ENABLE); 3650 regval &= (~AR_FAST_DIV_ENABLE);
3637 regval |= ((value >> 7) & 0x1) << 3651 regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
3638 AR_FAST_DIV_ENABLE_S;
3639 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); 3652 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
3640 ant_div_ctl1 = 3653
3641 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 3654 if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
3642 /* check whether antenna diversity is enabled */
3643 if ((ant_div_ctl1 >> 0x6) == 0x3) {
3644 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 3655 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
3645 /* 3656 /*
3646 * clear bits 25-30 main_lnaconf, alt_lnaconf, 3657 * clear bits 25-30 main_lnaconf, alt_lnaconf,
3647 * main_tb, alt_tb 3658 * main_tb, alt_tb
3648 */ 3659 */
3649 regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | 3660 regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
3650 AR_PHY_9485_ANT_DIV_ALT_LNACONF | 3661 AR_PHY_ANT_DIV_ALT_LNACONF |
3651 AR_PHY_9485_ANT_DIV_ALT_GAINTB | 3662 AR_PHY_ANT_DIV_ALT_GAINTB |
3652 AR_PHY_9485_ANT_DIV_MAIN_GAINTB)); 3663 AR_PHY_ANT_DIV_MAIN_GAINTB));
3653 /* by default use LNA1 for the main antenna */ 3664 /* by default use LNA1 for the main antenna */
3654 regval |= (AR_PHY_9485_ANT_DIV_LNA1 << 3665 regval |= (AR_PHY_ANT_DIV_LNA1 <<
3655 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S); 3666 AR_PHY_ANT_DIV_MAIN_LNACONF_S);
3656 regval |= (AR_PHY_9485_ANT_DIV_LNA2 << 3667 regval |= (AR_PHY_ANT_DIV_LNA2 <<
3657 AR_PHY_9485_ANT_DIV_ALT_LNACONF_S); 3668 AR_PHY_ANT_DIV_ALT_LNACONF_S);
3658 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 3669 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3659 } 3670 }
3660
3661
3662 } 3671 }
3663
3664} 3672}
3665 3673
3666static void ar9003_hw_drive_strength_apply(struct ath_hw *ah) 3674static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -3847,7 +3855,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3847 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); 3855 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
3848 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) 3856 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
3849 return; 3857 return;
3850 } else if (AR_SREV_9462(ah)) { 3858 } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3851 reg_val = le32_to_cpu(pBase->swreg); 3859 reg_val = le32_to_cpu(pBase->swreg);
3852 REG_WRITE(ah, AR_PHY_PMU1, reg_val); 3860 REG_WRITE(ah, AR_PHY_PMU1, reg_val);
3853 } else { 3861 } else {
@@ -3878,7 +3886,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3878 while (!REG_READ_FIELD(ah, AR_PHY_PMU2, 3886 while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
3879 AR_PHY_PMU2_PGM)) 3887 AR_PHY_PMU2_PGM))
3880 udelay(10); 3888 udelay(10);
3881 } else if (AR_SREV_9462(ah)) 3889 } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
3882 REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); 3890 REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
3883 else { 3891 else {
3884 reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | 3892 reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
@@ -3981,6 +3989,62 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
3981 bias & 0x3); 3989 bias & 0x3);
3982} 3990}
3983 3991
3992static int ar9003_hw_get_thermometer(struct ath_hw *ah)
3993{
3994 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3995 struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
3996 int thermometer = (pBase->miscConfiguration >> 1) & 0x3;
3997
3998 return --thermometer;
3999}
4000
4001static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
4002{
4003 int thermometer = ar9003_hw_get_thermometer(ah);
4004 u8 therm_on = (thermometer < 0) ? 0 : 1;
4005
4006 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
4007 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4008 if (ah->caps.tx_chainmask & BIT(1))
4009 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
4010 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4011 if (ah->caps.tx_chainmask & BIT(2))
4012 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
4013 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4014
4015 therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
4016 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
4017 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4018 if (ah->caps.tx_chainmask & BIT(1)) {
4019 therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
4020 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
4021 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4022 }
4023 if (ah->caps.tx_chainmask & BIT(2)) {
4024 therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
4025 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
4026 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4027 }
4028}
4029
4030static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
4031{
4032 u32 data, ko, kg;
4033
4034 if (!AR_SREV_9462_20(ah))
4035 return;
4036 ar9300_otp_read_word(ah, 1, &data);
4037 ko = data & 0xff;
4038 kg = (data >> 8) & 0xff;
4039 if (ko || kg) {
4040 REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
4041 AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko);
4042 REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
4043 AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN,
4044 kg + 256);
4045 }
4046}
4047
3984static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, 4048static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
3985 struct ath9k_channel *chan) 4049 struct ath9k_channel *chan)
3986{ 4050{
@@ -3996,6 +4060,8 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
3996 ar9003_hw_internal_regulator_apply(ah); 4060 ar9003_hw_internal_regulator_apply(ah);
3997 ar9003_hw_apply_tuning_caps(ah); 4061 ar9003_hw_apply_tuning_caps(ah);
3998 ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); 4062 ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
4063 ar9003_hw_thermometer_apply(ah);
4064 ar9003_hw_thermo_cal_apply(ah);
3999} 4065}
4000 4066
4001static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, 4067static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4532,7 +4598,7 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
4532{ 4598{
4533 int tempSlope = 0; 4599 int tempSlope = 0;
4534 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 4600 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4535 int f[3], t[3]; 4601 int f[8], t[8], i;
4536 4602
4537 REG_RMW(ah, AR_PHY_TPC_11_B0, 4603 REG_RMW(ah, AR_PHY_TPC_11_B0,
4538 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), 4604 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4565,7 +4631,14 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
4565 */ 4631 */
4566 if (frequency < 4000) 4632 if (frequency < 4000)
4567 tempSlope = eep->modalHeader2G.tempSlope; 4633 tempSlope = eep->modalHeader2G.tempSlope;
4568 else if (eep->base_ext2.tempSlopeLow != 0) { 4634 else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
4635 for (i = 0; i < 8; i++) {
4636 t[i] = eep->base_ext1.tempslopextension[i];
4637 f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
4638 }
4639 tempSlope = ar9003_hw_power_interpolate((s32) frequency,
4640 f, t, 8);
4641 } else if (eep->base_ext2.tempSlopeLow != 0) {
4569 t[0] = eep->base_ext2.tempSlopeLow; 4642 t[0] = eep->base_ext2.tempSlopeLow;
4570 f[0] = 5180; 4643 f[0] = 5180;
4571 t[1] = eep->modalHeader5G.tempSlope; 4644 t[1] = eep->modalHeader5G.tempSlope;
@@ -4905,90 +4978,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4905 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], 4978 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
4906 chan->channel); 4979 chan->channel);
4907 4980
4908 /* 4981 /*
4909 * compare test group from regulatory 4982 * compare test group from regulatory
4910 * channel list with test mode from pCtlMode 4983 * channel list with test mode from pCtlMode
4911 * list 4984 * list
4912 */ 4985 */
4913 if ((((cfgCtl & ~CTL_MODE_M) | 4986 if ((((cfgCtl & ~CTL_MODE_M) |
4914 (pCtlMode[ctlMode] & CTL_MODE_M)) == 4987 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4915 ctlIndex[i]) || 4988 ctlIndex[i]) ||
4916 (((cfgCtl & ~CTL_MODE_M) | 4989 (((cfgCtl & ~CTL_MODE_M) |
4917 (pCtlMode[ctlMode] & CTL_MODE_M)) == 4990 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4918 ((ctlIndex[i] & CTL_MODE_M) | 4991 ((ctlIndex[i] & CTL_MODE_M) |
4919 SD_NO_CTL))) { 4992 SD_NO_CTL))) {
4920 twiceMinEdgePower = 4993 twiceMinEdgePower =
4921 ar9003_hw_get_max_edge_power(pEepData, 4994 ar9003_hw_get_max_edge_power(pEepData,
4922 freq, i, 4995 freq, i,
4923 is2ghz); 4996 is2ghz);
4924 4997
4925 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) 4998 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
4926 /* 4999 /*
4927 * Find the minimum of all CTL 5000 * Find the minimum of all CTL
4928 * edge powers that apply to 5001 * edge powers that apply to
4929 * this channel 5002 * this channel
4930 */ 5003 */
4931 twiceMaxEdgePower = 5004 twiceMaxEdgePower =
4932 min(twiceMaxEdgePower, 5005 min(twiceMaxEdgePower,
4933 twiceMinEdgePower); 5006 twiceMinEdgePower);
4934 else { 5007 else {
4935 /* specific */ 5008 /* specific */
4936 twiceMaxEdgePower = 5009 twiceMaxEdgePower = twiceMinEdgePower;
4937 twiceMinEdgePower; 5010 break;
4938 break;
4939 }
4940 } 5011 }
4941 } 5012 }
5013 }
4942 5014
4943 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); 5015 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
4944 5016
4945 ath_dbg(common, REGULATORY, 5017 ath_dbg(common, REGULATORY,
4946 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", 5018 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
4947 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, 5019 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
4948 scaledPower, minCtlPower); 5020 scaledPower, minCtlPower);
4949 5021
4950 /* Apply ctl mode to correct target power set */ 5022 /* Apply ctl mode to correct target power set */
4951 switch (pCtlMode[ctlMode]) { 5023 switch (pCtlMode[ctlMode]) {
4952 case CTL_11B: 5024 case CTL_11B:
4953 for (i = ALL_TARGET_LEGACY_1L_5L; 5025 for (i = ALL_TARGET_LEGACY_1L_5L;
4954 i <= ALL_TARGET_LEGACY_11S; i++) 5026 i <= ALL_TARGET_LEGACY_11S; i++)
4955 pPwrArray[i] = 5027 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4956 (u8)min((u16)pPwrArray[i], 5028 minCtlPower);
4957 minCtlPower); 5029 break;
4958 break; 5030 case CTL_11A:
4959 case CTL_11A: 5031 case CTL_11G:
4960 case CTL_11G: 5032 for (i = ALL_TARGET_LEGACY_6_24;
4961 for (i = ALL_TARGET_LEGACY_6_24; 5033 i <= ALL_TARGET_LEGACY_54; i++)
4962 i <= ALL_TARGET_LEGACY_54; i++) 5034 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4963 pPwrArray[i] = 5035 minCtlPower);
4964 (u8)min((u16)pPwrArray[i], 5036 break;
4965 minCtlPower); 5037 case CTL_5GHT20:
4966 break; 5038 case CTL_2GHT20:
4967 case CTL_5GHT20: 5039 for (i = ALL_TARGET_HT20_0_8_16;
4968 case CTL_2GHT20: 5040 i <= ALL_TARGET_HT20_23; i++)
4969 for (i = ALL_TARGET_HT20_0_8_16; 5041 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4970 i <= ALL_TARGET_HT20_21; i++) 5042 minCtlPower);
4971 pPwrArray[i] = 5043 break;
4972 (u8)min((u16)pPwrArray[i], 5044 case CTL_5GHT40:
4973 minCtlPower); 5045 case CTL_2GHT40:
4974 pPwrArray[ALL_TARGET_HT20_22] = 5046 for (i = ALL_TARGET_HT40_0_8_16;
4975 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], 5047 i <= ALL_TARGET_HT40_23; i++)
4976 minCtlPower); 5048 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4977 pPwrArray[ALL_TARGET_HT20_23] = 5049 minCtlPower);
4978 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], 5050 break;
4979 minCtlPower); 5051 default:
4980 break; 5052 break;
4981 case CTL_5GHT40: 5053 }
4982 case CTL_2GHT40:
4983 for (i = ALL_TARGET_HT40_0_8_16;
4984 i <= ALL_TARGET_HT40_23; i++)
4985 pPwrArray[i] =
4986 (u8)min((u16)pPwrArray[i],
4987 minCtlPower);
4988 break;
4989 default:
4990 break;
4991 }
4992 } /* end ctl mode checking */ 5054 } /* end ctl mode checking */
4993} 5055}
4994 5056
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 3a1ff55bceb9..41b1a75e6bec 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -267,7 +267,8 @@ struct cal_ctl_data_5g {
267 267
268struct ar9300_BaseExtension_1 { 268struct ar9300_BaseExtension_1 {
269 u8 ant_div_control; 269 u8 ant_div_control;
270 u8 future[11]; 270 u8 future[3];
271 u8 tempslopextension[8];
271 int8_t quick_drop_low; 272 int8_t quick_drop_low;
272 int8_t quick_drop_high; 273 int8_t quick_drop_high;
273} __packed; 274} __packed;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1e8a4da5952f..1a36fa262639 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -24,6 +24,7 @@
24#include "ar955x_1p0_initvals.h" 24#include "ar955x_1p0_initvals.h"
25#include "ar9580_1p0_initvals.h" 25#include "ar9580_1p0_initvals.h"
26#include "ar9462_2p0_initvals.h" 26#include "ar9462_2p0_initvals.h"
27#include "ar9565_1p0_initvals.h"
27 28
28/* General hardware code for the AR9003 hadware family */ 29/* General hardware code for the AR9003 hadware family */
29 30
@@ -34,14 +35,12 @@
34 */ 35 */
35static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 36static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
36{ 37{
37#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
38 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
39
40#define AR9462_BB_CTX_COEFJ(x) \ 38#define AR9462_BB_CTX_COEFJ(x) \
41 ar9462_##x##_baseband_core_txfir_coeff_japan_2484 39 ar9462_##x##_baseband_core_txfir_coeff_japan_2484
42 40
43#define AR9462_BBC_TXIFR_COEFFJ \ 41#define AR9462_BBC_TXIFR_COEFFJ \
44 ar9462_2p0_baseband_core_txfir_coeff_japan_2484 42 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
43
45 if (AR_SREV_9330_11(ah)) { 44 if (AR_SREV_9330_11(ah)) {
46 /* mac */ 45 /* mac */
47 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 46 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -220,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
220 219
221 /* Awake -> Sleep Setting */ 220 /* Awake -> Sleep Setting */
222 INIT_INI_ARRAY(&ah->iniPcieSerdes, 221 INIT_INI_ARRAY(&ah->iniPcieSerdes,
223 PCIE_PLL_ON_CREQ_DIS_L1_2P0); 222 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
224 /* Sleep -> Awake Setting */ 223 /* Sleep -> Awake Setting */
225 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 224 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
226 PCIE_PLL_ON_CREQ_DIS_L1_2P0); 225 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
227 226
228 /* Fast clock modal settings */ 227 /* Fast clock modal settings */
229 INIT_INI_ARRAY(&ah->iniModesFastClock, 228 INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -302,6 +301,39 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
302 301
303 INIT_INI_ARRAY(&ah->iniModesFastClock, 302 INIT_INI_ARRAY(&ah->iniModesFastClock,
304 ar9580_1p0_modes_fast_clock); 303 ar9580_1p0_modes_fast_clock);
304 } else if (AR_SREV_9565(ah)) {
305 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
306 ar9565_1p0_mac_core);
307 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
308 ar9565_1p0_mac_postamble);
309
310 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
311 ar9565_1p0_baseband_core);
312 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
313 ar9565_1p0_baseband_postamble);
314
315 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
316 ar9565_1p0_radio_core);
317 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
318 ar9565_1p0_radio_postamble);
319
320 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
321 ar9565_1p0_soc_preamble);
322 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
323 ar9565_1p0_soc_postamble);
324
325 INIT_INI_ARRAY(&ah->iniModesRxGain,
326 ar9565_1p0_Common_rx_gain_table);
327 INIT_INI_ARRAY(&ah->iniModesTxGain,
328 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
329
330 INIT_INI_ARRAY(&ah->iniPcieSerdes,
331 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
332 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
333 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
334
335 INIT_INI_ARRAY(&ah->iniModesFastClock,
336 ar9565_1p0_modes_fast_clock);
305 } else { 337 } else {
306 /* mac */ 338 /* mac */
307 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 339 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -374,6 +406,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
374 else if (AR_SREV_9462_20(ah)) 406 else if (AR_SREV_9462_20(ah))
375 INIT_INI_ARRAY(&ah->iniModesTxGain, 407 INIT_INI_ARRAY(&ah->iniModesTxGain,
376 ar9462_modes_low_ob_db_tx_gain_table_2p0); 408 ar9462_modes_low_ob_db_tx_gain_table_2p0);
409 else if (AR_SREV_9565(ah))
410 INIT_INI_ARRAY(&ah->iniModesTxGain,
411 ar9565_1p0_modes_low_ob_db_tx_gain_table);
377 else 412 else
378 INIT_INI_ARRAY(&ah->iniModesTxGain, 413 INIT_INI_ARRAY(&ah->iniModesTxGain,
379 ar9300Modes_lowest_ob_db_tx_gain_table_2p2); 414 ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
@@ -402,6 +437,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
402 else if (AR_SREV_9462_20(ah)) 437 else if (AR_SREV_9462_20(ah))
403 INIT_INI_ARRAY(&ah->iniModesTxGain, 438 INIT_INI_ARRAY(&ah->iniModesTxGain,
404 ar9462_modes_high_ob_db_tx_gain_table_2p0); 439 ar9462_modes_high_ob_db_tx_gain_table_2p0);
440 else if (AR_SREV_9565(ah))
441 INIT_INI_ARRAY(&ah->iniModesTxGain,
442 ar9565_1p0_modes_high_ob_db_tx_gain_table);
405 else 443 else
406 INIT_INI_ARRAY(&ah->iniModesTxGain, 444 INIT_INI_ARRAY(&ah->iniModesTxGain,
407 ar9300Modes_high_ob_db_tx_gain_table_2p2); 445 ar9300Modes_high_ob_db_tx_gain_table_2p2);
@@ -424,6 +462,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
424 else if (AR_SREV_9580(ah)) 462 else if (AR_SREV_9580(ah))
425 INIT_INI_ARRAY(&ah->iniModesTxGain, 463 INIT_INI_ARRAY(&ah->iniModesTxGain,
426 ar9580_1p0_low_ob_db_tx_gain_table); 464 ar9580_1p0_low_ob_db_tx_gain_table);
465 else if (AR_SREV_9565(ah))
466 INIT_INI_ARRAY(&ah->iniModesTxGain,
467 ar9565_1p0_modes_low_ob_db_tx_gain_table);
427 else 468 else
428 INIT_INI_ARRAY(&ah->iniModesTxGain, 469 INIT_INI_ARRAY(&ah->iniModesTxGain,
429 ar9300Modes_low_ob_db_tx_gain_table_2p2); 470 ar9300Modes_low_ob_db_tx_gain_table_2p2);
@@ -446,6 +487,9 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
446 else if (AR_SREV_9580(ah)) 487 else if (AR_SREV_9580(ah))
447 INIT_INI_ARRAY(&ah->iniModesTxGain, 488 INIT_INI_ARRAY(&ah->iniModesTxGain,
448 ar9580_1p0_high_power_tx_gain_table); 489 ar9580_1p0_high_power_tx_gain_table);
490 else if (AR_SREV_9565(ah))
491 INIT_INI_ARRAY(&ah->iniModesTxGain,
492 ar9565_1p0_modes_high_power_tx_gain_table);
449 else 493 else
450 INIT_INI_ARRAY(&ah->iniModesTxGain, 494 INIT_INI_ARRAY(&ah->iniModesTxGain,
451 ar9300Modes_high_power_tx_gain_table_2p2); 495 ar9300Modes_high_power_tx_gain_table_2p2);
@@ -538,6 +582,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
538 } else if (AR_SREV_9580(ah)) 582 } else if (AR_SREV_9580(ah))
539 INIT_INI_ARRAY(&ah->iniModesRxGain, 583 INIT_INI_ARRAY(&ah->iniModesRxGain,
540 ar9580_1p0_wo_xlna_rx_gain_table); 584 ar9580_1p0_wo_xlna_rx_gain_table);
585 else if (AR_SREV_9565(ah))
586 INIT_INI_ARRAY(&ah->iniModesRxGain,
587 ar9565_1p0_common_wo_xlna_rx_gain_table);
541 else 588 else
542 INIT_INI_ARRAY(&ah->iniModesRxGain, 589 INIT_INI_ARRAY(&ah->iniModesRxGain,
543 ar9300Common_wo_xlna_rx_gain_table_2p2); 590 ar9300Common_wo_xlna_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 78816b8b2173..301bf72c53bf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -31,7 +31,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
31 u32 val, ctl12, ctl17; 31 u32 val, ctl12, ctl17;
32 u8 desc_len; 32 u8 desc_len;
33 33
34 desc_len = (AR_SREV_9462(ah) ? 0x18 : 0x17); 34 desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
35 35
36 val = (ATHEROS_VENDOR_ID << AR_DescId_S) | 36 val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
37 (1 << AR_TxRxDesc_S) | 37 (1 << AR_TxRxDesc_S) |
@@ -182,6 +182,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
182 struct ath9k_hw_capabilities *pCap = &ah->caps; 182 struct ath9k_hw_capabilities *pCap = &ah->caps;
183 struct ath_common *common = ath9k_hw_common(ah); 183 struct ath_common *common = ath9k_hw_common(ah);
184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ; 184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
185 bool fatal_int;
185 186
186 if (ath9k_hw_mci_is_enabled(ah)) 187 if (ath9k_hw_mci_is_enabled(ah))
187 async_mask |= AR_INTR_ASYNC_MASK_MCI; 188 async_mask |= AR_INTR_ASYNC_MASK_MCI;
@@ -310,6 +311,22 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
310 311
311 if (sync_cause) { 312 if (sync_cause) {
312 ath9k_debug_sync_cause(common, sync_cause); 313 ath9k_debug_sync_cause(common, sync_cause);
314 fatal_int =
315 (sync_cause &
316 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
317 ? true : false;
318
319 if (fatal_int) {
320 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
321 ath_dbg(common, ANY,
322 "received PCI FATAL interrupt\n");
323 }
324 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
325 ath_dbg(common, ANY,
326 "received PCI PERR interrupt\n");
327 }
328 *masked |= ATH9K_INT_FATAL;
329 }
313 330
314 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 331 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
315 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 332 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -531,7 +548,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
531 rxs->rs_status |= ATH9K_RXERR_PHY; 548 rxs->rs_status |= ATH9K_RXERR_PHY;
532 rxs->rs_phyerr = phyerr; 549 rxs->rs_phyerr = phyerr;
533 } 550 }
534 }; 551 }
535 } 552 }
536 553
537 if (rxsp->status11 & AR_KeyMiss) 554 if (rxsp->status11 & AR_KeyMiss)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 9a34fcaae3ff..44c202ce6c66 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -714,6 +714,7 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
714 714
715 return true; 715 return true;
716} 716}
717EXPORT_SYMBOL(ar9003_mci_start_reset);
717 718
718int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 719int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
719 struct ath9k_hw_cal_data *caldata) 720 struct ath9k_hw_cal_data *caldata)
@@ -812,8 +813,8 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
812 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1); 813 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
813} 814}
814 815
815void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 816int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
816 bool is_full_sleep) 817 bool is_full_sleep)
817{ 818{
818 struct ath_common *common = ath9k_hw_common(ah); 819 struct ath_common *common = ath9k_hw_common(ah);
819 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 820 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
@@ -823,14 +824,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
823 is_full_sleep, is_2g); 824 is_full_sleep, is_2g);
824 825
825 if (!mci->gpm_addr && !mci->sched_addr) { 826 if (!mci->gpm_addr && !mci->sched_addr) {
826 ath_dbg(common, MCI, 827 ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
827 "MCI GPM and schedule buffers are not allocated\n"); 828 return -ENOMEM;
828 return;
829 } 829 }
830 830
831 if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { 831 if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
832 ath_dbg(common, MCI, "BTCOEX control register is dead\n"); 832 ath_err(common, "BTCOEX control register is dead\n");
833 return; 833 return -EINVAL;
834 } 834 }
835 835
836 /* Program MCI DMA related registers */ 836 /* Program MCI DMA related registers */
@@ -912,6 +912,8 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
912 912
913 if (en_int) 913 if (en_int)
914 ar9003_mci_enable_interrupt(ah); 914 ar9003_mci_enable_interrupt(ah);
915
916 return 0;
915} 917}
916 918
917void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep) 919void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
@@ -1026,6 +1028,7 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
1026 1028
1027 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) 1029 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
1028 ar9003_mci_osla_setup(ah, true); 1030 ar9003_mci_osla_setup(ah, true);
1031 REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
1029 } else { 1032 } else {
1030 ar9003_mci_send_lna_take(ah, true); 1033 ar9003_mci_send_lna_take(ah, true);
1031 udelay(5); 1034 udelay(5);
@@ -1142,8 +1145,8 @@ void ar9003_mci_init_cal_done(struct ath_hw *ah)
1142 ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); 1145 ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
1143} 1146}
1144 1147
1145void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 1148int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
1146 u16 len, u32 sched_addr) 1149 u16 len, u32 sched_addr)
1147{ 1150{
1148 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1151 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1149 1152
@@ -1152,7 +1155,7 @@ void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
1152 mci->gpm_len = len; 1155 mci->gpm_len = len;
1153 mci->sched_addr = sched_addr; 1156 mci->sched_addr = sched_addr;
1154 1157
1155 ar9003_mci_reset(ah, true, true, true); 1158 return ar9003_mci_reset(ah, true, true, true);
1156} 1159}
1157EXPORT_SYMBOL(ar9003_mci_setup); 1160EXPORT_SYMBOL(ar9003_mci_setup);
1158 1161
@@ -1201,12 +1204,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1201 1204
1202 ar9003_mci_2g5g_switch(ah, false); 1205 ar9003_mci_2g5g_switch(ah, false);
1203 break; 1206 break;
1204 case MCI_STATE_SET_BT_CAL_START:
1205 mci->bt_state = MCI_BT_CAL_START;
1206 break;
1207 case MCI_STATE_SET_BT_CAL:
1208 mci->bt_state = MCI_BT_CAL;
1209 break;
1210 case MCI_STATE_RESET_REQ_WAKE: 1207 case MCI_STATE_RESET_REQ_WAKE:
1211 ar9003_mci_reset_req_wakeup(ah); 1208 ar9003_mci_reset_req_wakeup(ah);
1212 mci->update_2g5g = true; 1209 mci->update_2g5g = true;
@@ -1240,6 +1237,10 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1240 case MCI_STATE_NEED_FTP_STOMP: 1237 case MCI_STATE_NEED_FTP_STOMP:
1241 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); 1238 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
1242 break; 1239 break;
1240 case MCI_STATE_NEED_FLUSH_BT_INFO:
1241 value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
1242 mci->need_flush_btinfo = false;
1243 break;
1243 default: 1244 default:
1244 break; 1245 break;
1245 } 1246 }
@@ -1289,7 +1290,7 @@ void ar9003_mci_set_power_awake(struct ath_hw *ah)
1289 } 1290 }
1290 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18))); 1291 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
1291 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3; 1292 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
1292 bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP; 1293 bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
1293 1294
1294 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2); 1295 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
1295 REG_WRITE(ah, AR_DIAG_SW, diag_sw); 1296 REG_WRITE(ah, AR_DIAG_SW, diag_sw);
@@ -1327,6 +1328,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1327 1328
1328 if (first) { 1329 if (first) {
1329 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); 1330 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1331
1332 if (gpm_ptr >= mci->gpm_len)
1333 gpm_ptr = 0;
1334
1330 mci->gpm_idx = gpm_ptr; 1335 mci->gpm_idx = gpm_ptr;
1331 return gpm_ptr; 1336 return gpm_ptr;
1332 } 1337 }
@@ -1371,6 +1376,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1371 more_gpm = MCI_GPM_NOMORE; 1376 more_gpm = MCI_GPM_NOMORE;
1372 1377
1373 temp_index = mci->gpm_idx; 1378 temp_index = mci->gpm_idx;
1379
1380 if (temp_index >= mci->gpm_len)
1381 temp_index = 0;
1382
1374 mci->gpm_idx++; 1383 mci->gpm_idx++;
1375 1384
1376 if (mci->gpm_idx >= mci->gpm_len) 1385 if (mci->gpm_idx >= mci->gpm_len)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index d33b8e128855..2a2d01889613 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -190,8 +190,6 @@ enum mci_bt_state {
190enum mci_state_type { 190enum mci_state_type {
191 MCI_STATE_ENABLE, 191 MCI_STATE_ENABLE,
192 MCI_STATE_SET_BT_AWAKE, 192 MCI_STATE_SET_BT_AWAKE,
193 MCI_STATE_SET_BT_CAL_START,
194 MCI_STATE_SET_BT_CAL,
195 MCI_STATE_LAST_SCHD_MSG_OFFSET, 193 MCI_STATE_LAST_SCHD_MSG_OFFSET,
196 MCI_STATE_REMOTE_SLEEP, 194 MCI_STATE_REMOTE_SLEEP,
197 MCI_STATE_RESET_REQ_WAKE, 195 MCI_STATE_RESET_REQ_WAKE,
@@ -202,6 +200,7 @@ enum mci_state_type {
202 MCI_STATE_RECOVER_RX, 200 MCI_STATE_RECOVER_RX,
203 MCI_STATE_NEED_FTP_STOMP, 201 MCI_STATE_NEED_FTP_STOMP,
204 MCI_STATE_DEBUG, 202 MCI_STATE_DEBUG,
203 MCI_STATE_NEED_FLUSH_BT_INFO,
205 MCI_STATE_MAX 204 MCI_STATE_MAX
206}; 205};
207 206
@@ -213,7 +212,8 @@ enum mci_gpm_coex_opcode {
213 MCI_GPM_COEX_WLAN_CHANNELS, 212 MCI_GPM_COEX_WLAN_CHANNELS,
214 MCI_GPM_COEX_BT_PROFILE_INFO, 213 MCI_GPM_COEX_BT_PROFILE_INFO,
215 MCI_GPM_COEX_BT_STATUS_UPDATE, 214 MCI_GPM_COEX_BT_STATUS_UPDATE,
216 MCI_GPM_COEX_BT_UPDATE_FLAGS 215 MCI_GPM_COEX_BT_UPDATE_FLAGS,
216 MCI_GPM_COEX_NOOP,
217}; 217};
218 218
219#define MCI_GPM_NOMORE 0 219#define MCI_GPM_NOMORE 0
@@ -249,8 +249,8 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
249 u32 *payload, u8 len, bool wait_done, 249 u32 *payload, u8 len, bool wait_done,
250 bool check_bt); 250 bool check_bt);
251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type); 251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
252void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 252int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
253 u16 len, u32 sched_addr); 253 u16 len, u32 sched_addr);
254void ar9003_mci_cleanup(struct ath_hw *ah); 254void ar9003_mci_cleanup(struct ath_hw *ah);
255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, 255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
256 u32 *rx_msg_intr); 256 u32 *rx_msg_intr);
@@ -272,8 +272,8 @@ void ar9003_mci_check_bt(struct ath_hw *ah);
272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); 272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
274 struct ath9k_hw_cal_data *caldata); 274 struct ath9k_hw_cal_data *caldata);
275void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 275int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
276 bool is_full_sleep); 276 bool is_full_sleep);
277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); 277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); 278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
279void ar9003_mci_set_power_awake(struct ath_hw *ah); 279void ar9003_mci_set_power_awake(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e476f9f92ce3..759f5f5a7154 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -88,7 +88,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
88 channelSel = (freq * 4) / div; 88 channelSel = (freq * 4) / div;
89 chan_frac = (((freq * 4) % div) * 0x20000) / div; 89 chan_frac = (((freq * 4) % div) * 0x20000) / div;
90 channelSel = (channelSel << 17) | chan_frac; 90 channelSel = (channelSel << 17) | chan_frac;
91 } else if (AR_SREV_9485(ah)) { 91 } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
92 u32 chan_frac; 92 u32 chan_frac;
93 93
94 /* 94 /*
@@ -206,6 +206,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
206 for (i = 0; i < max_spur_cnts; i++) { 206 for (i = 0; i < max_spur_cnts; i++) {
207 if (AR_SREV_9462(ah) && (i == 0 || i == 3)) 207 if (AR_SREV_9462(ah) && (i == 0 || i == 3))
208 continue; 208 continue;
209
209 negative = 0; 210 negative = 0;
210 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 211 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
211 AR_SREV_9550(ah)) 212 AR_SREV_9550(ah))
@@ -301,7 +302,9 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
301 int freq_offset, 302 int freq_offset,
302 int spur_freq_sd, 303 int spur_freq_sd,
303 int spur_delta_phase, 304 int spur_delta_phase,
304 int spur_subchannel_sd) 305 int spur_subchannel_sd,
306 int range,
307 int synth_freq)
305{ 308{
306 int mask_index = 0; 309 int mask_index = 0;
307 310
@@ -316,8 +319,11 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
316 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd); 319 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
317 REG_RMW_FIELD(ah, AR_PHY_TIMING11, 320 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
318 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1); 321 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
319 REG_RMW_FIELD(ah, AR_PHY_TIMING11, 322
320 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1); 323 if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
324 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
325 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
326
321 REG_RMW_FIELD(ah, AR_PHY_TIMING4, 327 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
322 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1); 328 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
323 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, 329 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
@@ -358,9 +364,44 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
358 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff); 364 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
359} 365}
360 366
367static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
368 int freq_offset)
369{
370 int mask_index = 0;
371
372 mask_index = (freq_offset << 4) / 5;
373 if (mask_index < 0)
374 mask_index = mask_index - 1;
375
376 mask_index = mask_index & 0x7f;
377
378 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
379 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B,
380 mask_index);
381
382 /* A == B */
383 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
384 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
385 mask_index);
386
387 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
388 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B,
389 mask_index);
390 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
391 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B, 0xe);
392 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
393 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
394
395 /* A == B */
396 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
397 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
398}
399
361static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah, 400static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
362 struct ath9k_channel *chan, 401 struct ath9k_channel *chan,
363 int freq_offset) 402 int freq_offset,
403 int range,
404 int synth_freq)
364{ 405{
365 int spur_freq_sd = 0; 406 int spur_freq_sd = 0;
366 int spur_subchannel_sd = 0; 407 int spur_subchannel_sd = 0;
@@ -402,7 +443,8 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
402 freq_offset, 443 freq_offset,
403 spur_freq_sd, 444 spur_freq_sd,
404 spur_delta_phase, 445 spur_delta_phase,
405 spur_subchannel_sd); 446 spur_subchannel_sd,
447 range, synth_freq);
406} 448}
407 449
408/* Spur mitigation for OFDM */ 450/* Spur mitigation for OFDM */
@@ -447,7 +489,17 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
447 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode); 489 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
448 freq_offset -= synth_freq; 490 freq_offset -= synth_freq;
449 if (abs(freq_offset) < range) { 491 if (abs(freq_offset) < range) {
450 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); 492 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
493 range, synth_freq);
494
495 if (AR_SREV_9565(ah) && (i < 4)) {
496 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
497 mode);
498 freq_offset -= synth_freq;
499 if (abs(freq_offset) < range)
500 ar9003_hw_spur_ofdm_9565(ah, freq_offset);
501 }
502
451 break; 503 break;
452 } 504 }
453 } 505 }
@@ -456,7 +508,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
456static void ar9003_hw_spur_mitigate(struct ath_hw *ah, 508static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
457 struct ath9k_channel *chan) 509 struct ath9k_channel *chan)
458{ 510{
459 ar9003_hw_spur_mitigate_mrc_cck(ah, chan); 511 if (!AR_SREV_9565(ah))
512 ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
460 ar9003_hw_spur_mitigate_ofdm(ah, chan); 513 ar9003_hw_spur_mitigate_ofdm(ah, chan);
461} 514}
462 515
@@ -552,9 +605,6 @@ static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
552 605
553 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7)) 606 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
554 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); 607 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
555 else if (AR_SREV_9462(ah))
556 /* xxx only when MCI support is enabled */
557 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
558 else 608 else
559 REG_WRITE(ah, AR_SELFGEN_MASK, tx); 609 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
560 610
@@ -736,7 +786,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
736 if (chan->channel == 2484) 786 if (chan->channel == 2484)
737 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 787 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
738 788
739 if (AR_SREV_9462(ah)) 789 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
740 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, 790 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
741 AR_GLB_SWREG_DISCONT_EN_BT_WLAN); 791 AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
742 792
@@ -746,9 +796,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
746 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 796 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
747 ath9k_hw_apply_txpower(ah, chan, false); 797 ath9k_hw_apply_txpower(ah, chan, false);
748 798
749 if (AR_SREV_9462(ah)) { 799 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
750 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, 800 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
751 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) 801 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
752 ah->enabled_cals |= TX_IQ_CAL; 802 ah->enabled_cals |= TX_IQ_CAL;
753 else 803 else
754 ah->enabled_cals &= ~TX_IQ_CAL; 804 ah->enabled_cals &= ~TX_IQ_CAL;
@@ -1111,7 +1161,7 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
1111 if (AR_SREV_9330(ah)) 1161 if (AR_SREV_9330(ah))
1112 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ; 1162 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
1113 1163
1114 if (AR_SREV_9462(ah)) { 1164 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
1115 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ; 1165 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
1116 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ; 1166 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
1117 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ; 1167 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
@@ -1223,17 +1273,17 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
1223} 1273}
1224 1274
1225static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah, 1275static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
1226 struct ath_hw_antcomb_conf *antconf) 1276 struct ath_hw_antcomb_conf *antconf)
1227{ 1277{
1228 u32 regval; 1278 u32 regval;
1229 1279
1230 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 1280 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1231 antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >> 1281 antconf->main_lna_conf = (regval & AR_PHY_ANT_DIV_MAIN_LNACONF) >>
1232 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S; 1282 AR_PHY_ANT_DIV_MAIN_LNACONF_S;
1233 antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >> 1283 antconf->alt_lna_conf = (regval & AR_PHY_ANT_DIV_ALT_LNACONF) >>
1234 AR_PHY_9485_ANT_DIV_ALT_LNACONF_S; 1284 AR_PHY_ANT_DIV_ALT_LNACONF_S;
1235 antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >> 1285 antconf->fast_div_bias = (regval & AR_PHY_ANT_FAST_DIV_BIAS) >>
1236 AR_PHY_9485_ANT_FAST_DIV_BIAS_S; 1286 AR_PHY_ANT_FAST_DIV_BIAS_S;
1237 1287
1238 if (AR_SREV_9330_11(ah)) { 1288 if (AR_SREV_9330_11(ah)) {
1239 antconf->lna1_lna2_delta = -9; 1289 antconf->lna1_lna2_delta = -9;
@@ -1241,6 +1291,9 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
1241 } else if (AR_SREV_9485(ah)) { 1291 } else if (AR_SREV_9485(ah)) {
1242 antconf->lna1_lna2_delta = -9; 1292 antconf->lna1_lna2_delta = -9;
1243 antconf->div_group = 2; 1293 antconf->div_group = 2;
1294 } else if (AR_SREV_9565(ah)) {
1295 antconf->lna1_lna2_delta = -3;
1296 antconf->div_group = 3;
1244 } else { 1297 } else {
1245 antconf->lna1_lna2_delta = -3; 1298 antconf->lna1_lna2_delta = -3;
1246 antconf->div_group = 0; 1299 antconf->div_group = 0;
@@ -1253,26 +1306,84 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
1253 u32 regval; 1306 u32 regval;
1254 1307
1255 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 1308 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1256 regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | 1309 regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
1257 AR_PHY_9485_ANT_DIV_ALT_LNACONF | 1310 AR_PHY_ANT_DIV_ALT_LNACONF |
1258 AR_PHY_9485_ANT_FAST_DIV_BIAS | 1311 AR_PHY_ANT_FAST_DIV_BIAS |
1259 AR_PHY_9485_ANT_DIV_MAIN_GAINTB | 1312 AR_PHY_ANT_DIV_MAIN_GAINTB |
1260 AR_PHY_9485_ANT_DIV_ALT_GAINTB); 1313 AR_PHY_ANT_DIV_ALT_GAINTB);
1261 regval |= ((antconf->main_lna_conf << 1314 regval |= ((antconf->main_lna_conf << AR_PHY_ANT_DIV_MAIN_LNACONF_S)
1262 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S) 1315 & AR_PHY_ANT_DIV_MAIN_LNACONF);
1263 & AR_PHY_9485_ANT_DIV_MAIN_LNACONF); 1316 regval |= ((antconf->alt_lna_conf << AR_PHY_ANT_DIV_ALT_LNACONF_S)
1264 regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S) 1317 & AR_PHY_ANT_DIV_ALT_LNACONF);
1265 & AR_PHY_9485_ANT_DIV_ALT_LNACONF); 1318 regval |= ((antconf->fast_div_bias << AR_PHY_ANT_FAST_DIV_BIAS_S)
1266 regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S) 1319 & AR_PHY_ANT_FAST_DIV_BIAS);
1267 & AR_PHY_9485_ANT_FAST_DIV_BIAS); 1320 regval |= ((antconf->main_gaintb << AR_PHY_ANT_DIV_MAIN_GAINTB_S)
1268 regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S) 1321 & AR_PHY_ANT_DIV_MAIN_GAINTB);
1269 & AR_PHY_9485_ANT_DIV_MAIN_GAINTB); 1322 regval |= ((antconf->alt_gaintb << AR_PHY_ANT_DIV_ALT_GAINTB_S)
1270 regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S) 1323 & AR_PHY_ANT_DIV_ALT_GAINTB);
1271 & AR_PHY_9485_ANT_DIV_ALT_GAINTB);
1272 1324
1273 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 1325 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1274} 1326}
1275 1327
1328static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
1329 bool enable)
1330{
1331 u8 ant_div_ctl1;
1332 u32 regval;
1333
1334 if (!AR_SREV_9565(ah))
1335 return;
1336
1337 ah->shared_chain_lnadiv = enable;
1338 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
1339
1340 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1341 regval &= (~AR_ANT_DIV_CTRL_ALL);
1342 regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
1343 regval &= ~AR_PHY_ANT_DIV_LNADIV;
1344 regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
1345
1346 if (enable)
1347 regval |= AR_ANT_DIV_ENABLE;
1348
1349 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1350
1351 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
1352 regval &= ~AR_FAST_DIV_ENABLE;
1353 regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
1354
1355 if (enable)
1356 regval |= AR_FAST_DIV_ENABLE;
1357
1358 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
1359
1360 if (enable) {
1361 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1362 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1363 if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
1364 REG_SET_BIT(ah, AR_PHY_RESTART,
1365 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
1366 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
1367 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1368 } else {
1369 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
1370 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1371 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1372 REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
1373 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
1374 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1375
1376 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1377 regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
1378 AR_PHY_ANT_DIV_ALT_LNACONF |
1379 AR_PHY_ANT_DIV_MAIN_GAINTB |
1380 AR_PHY_ANT_DIV_ALT_GAINTB);
1381 regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
1382 regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
1383 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1384 }
1385}
1386
1276static int ar9003_hw_fast_chan_change(struct ath_hw *ah, 1387static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1277 struct ath9k_channel *chan, 1388 struct ath9k_channel *chan,
1278 u8 *ini_reloaded) 1389 u8 *ini_reloaded)
@@ -1312,10 +1423,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1312 ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex); 1423 ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
1313 ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex); 1424 ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
1314 ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex); 1425 ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
1426
1315 if (AR_SREV_9462_20(ah)) 1427 if (AR_SREV_9462_20(ah))
1316 ar9003_hw_prog_ini(ah, 1428 ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
1317 &ah->ini_radio_post_sys2ant, 1429 modesIndex);
1318 modesIndex);
1319 1430
1320 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); 1431 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1321 1432
@@ -1326,6 +1437,9 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1326 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1437 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1327 REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites); 1438 REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
1328 1439
1440 if (AR_SREV_9565(ah))
1441 REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
1442
1329 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 1443 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
1330 1444
1331 ah->modes_index = modesIndex; 1445 ah->modes_index = modesIndex;
@@ -1368,6 +1482,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1368 1482
1369 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; 1483 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
1370 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; 1484 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
1485 ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
1371 1486
1372 ar9003_hw_set_nf_limits(ah); 1487 ar9003_hw_set_nf_limits(ah);
1373 ar9003_hw_set_radar_conf(ah); 1488 ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 84d3d4956861..9a48e3d2f231 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -223,15 +223,24 @@
223#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c) 223#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
224#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20) 224#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
225 225
226#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0 226#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
227#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5 227#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
228#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F 228#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
229#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0 229#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
230#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B 0x00FE0000
231#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_S 17
232#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B 0x0001F000
233#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B_S 12
230 234
231#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0 235#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
232#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5 236#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
233#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F 237#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
234#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0 238#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
239#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B 0x00FE0000
240#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_S 17
241#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B 0x0001F000
242#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B_S 12
243
235 244
236/* 245/*
237 * MRC Feild Definitions 246 * MRC Feild Definitions
@@ -271,23 +280,25 @@
271#define AR_ANT_DIV_ENABLE_S 24 280#define AR_ANT_DIV_ENABLE_S 24
272 281
273 282
274#define AR_PHY_9485_ANT_FAST_DIV_BIAS 0x00007e00 283#define AR_PHY_ANT_FAST_DIV_BIAS 0x00007e00
275#define AR_PHY_9485_ANT_FAST_DIV_BIAS_S 9 284#define AR_PHY_ANT_FAST_DIV_BIAS_S 9
276#define AR_PHY_9485_ANT_DIV_LNADIV 0x01000000 285#define AR_PHY_ANT_SW_RX_PROT 0x00800000
277#define AR_PHY_9485_ANT_DIV_LNADIV_S 24 286#define AR_PHY_ANT_SW_RX_PROT_S 23
278#define AR_PHY_9485_ANT_DIV_ALT_LNACONF 0x06000000 287#define AR_PHY_ANT_DIV_LNADIV 0x01000000
279#define AR_PHY_9485_ANT_DIV_ALT_LNACONF_S 25 288#define AR_PHY_ANT_DIV_LNADIV_S 24
280#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF 0x18000000 289#define AR_PHY_ANT_DIV_ALT_LNACONF 0x06000000
281#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S 27 290#define AR_PHY_ANT_DIV_ALT_LNACONF_S 25
282#define AR_PHY_9485_ANT_DIV_ALT_GAINTB 0x20000000 291#define AR_PHY_ANT_DIV_MAIN_LNACONF 0x18000000
283#define AR_PHY_9485_ANT_DIV_ALT_GAINTB_S 29 292#define AR_PHY_ANT_DIV_MAIN_LNACONF_S 27
284#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB 0x40000000 293#define AR_PHY_ANT_DIV_ALT_GAINTB 0x20000000
285#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S 30 294#define AR_PHY_ANT_DIV_ALT_GAINTB_S 29
286 295#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
287#define AR_PHY_9485_ANT_DIV_LNA1_MINUS_LNA2 0x0 296#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
288#define AR_PHY_9485_ANT_DIV_LNA2 0x1 297
289#define AR_PHY_9485_ANT_DIV_LNA1 0x2 298#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
290#define AR_PHY_9485_ANT_DIV_LNA1_PLUS_LNA2 0x3 299#define AR_PHY_ANT_DIV_LNA2 0x1
300#define AR_PHY_ANT_DIV_LNA1 0x2
301#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
291 302
292#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c) 303#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
293#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30) 304#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
@@ -413,6 +424,8 @@
413#define AR_PHY_FIND_SIG_RELSTEP 0x1f 424#define AR_PHY_FIND_SIG_RELSTEP 0x1f
414#define AR_PHY_FIND_SIG_RELSTEP_S 0 425#define AR_PHY_FIND_SIG_RELSTEP_S 0
415#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5 426#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
427#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG 0x00200000
428#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG_S 21
416#define AR_PHY_RESTART_DIV_GC 0x001C0000 429#define AR_PHY_RESTART_DIV_GC 0x001C0000
417#define AR_PHY_RESTART_DIV_GC_S 18 430#define AR_PHY_RESTART_DIV_GC_S 18
418#define AR_PHY_RESTART_ENA 0x01 431#define AR_PHY_RESTART_ENA 0x01
@@ -609,6 +622,12 @@
609#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff 622#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
610#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0 623#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
611 624
625#define AR_PHY_BB_THERM_ADC_3 (AR_SM_BASE + 0x250)
626#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN 0x0001ff00
627#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN_S 8
628#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET 0x000000ff
629#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_S 0
630
612#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254) 631#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
613#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff 632#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
614#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0 633#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
@@ -630,8 +649,8 @@
630#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1 649#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
631 650
632#define AR_PHY_65NM_CH0_SYNTH4 0x1608c 651#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
633#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002) 652#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
634#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1) 653#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
635#define AR_PHY_65NM_CH0_SYNTH7 0x16098 654#define AR_PHY_65NM_CH0_SYNTH7 0x16098
636#define AR_PHY_65NM_CH0_BIAS1 0x160c0 655#define AR_PHY_65NM_CH0_BIAS1 0x160c0
637#define AR_PHY_65NM_CH0_BIAS2 0x160c4 656#define AR_PHY_65NM_CH0_BIAS2 0x160c4
@@ -641,7 +660,7 @@
641#define AR_PHY_65NM_CH2_RXTX4 0x1690c 660#define AR_PHY_65NM_CH2_RXTX4 0x1690c
642 661
643#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ 662#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
644 ((AR_SREV_9462(ah) ? 0x1628c : 0x16280))) 663 (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
645#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) 664#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
646#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8) 665#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
647 666
@@ -669,7 +688,7 @@
669#define AR_SWITCH_TABLE_ALL_S (0) 688#define AR_SWITCH_TABLE_ALL_S (0)
670 689
671#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ 690#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
672 (AR_SREV_9462(ah) ? 0x16294 : 0x1628c)) 691 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
673 692
674#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 693#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
675#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 694#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -691,17 +710,17 @@
691#define AR_CH0_TOP2_XPABIASLVL_S 12 710#define AR_CH0_TOP2_XPABIASLVL_S 12
692 711
693#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ 712#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
694 (AR_SREV_9462(ah) ? 0x16298 : 0x16290)) 713 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290))
695#define AR_CH0_XTAL_CAPINDAC 0x7f000000 714#define AR_CH0_XTAL_CAPINDAC 0x7f000000
696#define AR_CH0_XTAL_CAPINDAC_S 24 715#define AR_CH0_XTAL_CAPINDAC_S 24
697#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 716#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
698#define AR_CH0_XTAL_CAPOUTDAC_S 17 717#define AR_CH0_XTAL_CAPOUTDAC_S 17
699 718
700#define AR_PHY_PMU1 (AR_SREV_9462(ah) ? 0x16340 : 0x16c40) 719#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40)
701#define AR_PHY_PMU1_PWD 0x1 720#define AR_PHY_PMU1_PWD 0x1
702#define AR_PHY_PMU1_PWD_S 0 721#define AR_PHY_PMU1_PWD_S 0
703 722
704#define AR_PHY_PMU2 (AR_SREV_9462(ah) ? 0x16344 : 0x16c44) 723#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44)
705#define AR_PHY_PMU2_PGM 0x00200000 724#define AR_PHY_PMU2_PGM 0x00200000
706#define AR_PHY_PMU2_PGM_S 21 725#define AR_PHY_PMU2_PGM_S 21
707 726
@@ -881,6 +900,8 @@
881 900
882#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000 901#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
883#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28 902#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
903#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR 0x20000000
904#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR_S 29
884 905
885#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000 906#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000
886#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30 907#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30
@@ -1244,4 +1265,24 @@
1244#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f 1265#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f
1245#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0 1266#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0
1246 1267
1268#define AR_BTCOEX_WL_LNADIV 0x1a64
1269#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD 0x00003FFF
1270#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD_S 0
1271#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY 0x00004000
1272#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY_S 14
1273#define AR_BTCOEX_WL_LNADIV_FORCE_ON 0x00008000
1274#define AR_BTCOEX_WL_LNADIV_FORCE_ON_S 15
1275#define AR_BTCOEX_WL_LNADIV_MODE_OPTION 0x00030000
1276#define AR_BTCOEX_WL_LNADIV_MODE_OPTION_S 16
1277#define AR_BTCOEX_WL_LNADIV_MODE 0x007c0000
1278#define AR_BTCOEX_WL_LNADIV_MODE_S 18
1279#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ 0x00800000
1280#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ_S 23
1281#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE 0x01000000
1282#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE_S 24
1283#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT 0x02000000
1284#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT_S 25
1285#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000
1286#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26
1287
1247#endif /* AR9003_PHY_H */ 1288#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 4ef7dcccaa2f..58f30f65c6b6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -58,7 +58,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
61 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, 61 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
new file mode 100644
index 000000000000..843e79f67ff2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -0,0 +1,1231 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef INITVALS_9565_1P0_H
19#define INITVALS_9565_1P0_H
20
21/* AR9565 1.0 */
22
23static const u32 ar9565_1p0_mac_core[][2] = {
24 /* Addr allmodes */
25 {0x00000008, 0x00000000},
26 {0x00000030, 0x000a0085},
27 {0x00000034, 0x00000005},
28 {0x00000040, 0x00000000},
29 {0x00000044, 0x00000000},
30 {0x00000048, 0x00000008},
31 {0x0000004c, 0x00000010},
32 {0x00000050, 0x00000000},
33 {0x00001040, 0x002ffc0f},
34 {0x00001044, 0x002ffc0f},
35 {0x00001048, 0x002ffc0f},
36 {0x0000104c, 0x002ffc0f},
37 {0x00001050, 0x002ffc0f},
38 {0x00001054, 0x002ffc0f},
39 {0x00001058, 0x002ffc0f},
40 {0x0000105c, 0x002ffc0f},
41 {0x00001060, 0x002ffc0f},
42 {0x00001064, 0x002ffc0f},
43 {0x000010f0, 0x00000100},
44 {0x00001270, 0x00000000},
45 {0x000012b0, 0x00000000},
46 {0x000012f0, 0x00000000},
47 {0x0000143c, 0x00000000},
48 {0x0000147c, 0x00000000},
49 {0x00001810, 0x0f000003},
50 {0x00008000, 0x00000000},
51 {0x00008004, 0x00000000},
52 {0x00008008, 0x00000000},
53 {0x0000800c, 0x00000000},
54 {0x00008018, 0x00000000},
55 {0x00008020, 0x00000000},
56 {0x00008038, 0x00000000},
57 {0x0000803c, 0x00000000},
58 {0x00008040, 0x00000000},
59 {0x00008044, 0x00000000},
60 {0x00008048, 0x00000000},
61 {0x00008054, 0x00000000},
62 {0x00008058, 0x00000000},
63 {0x0000805c, 0x000fc78f},
64 {0x00008060, 0x0000000f},
65 {0x00008064, 0x00000000},
66 {0x00008070, 0x00000310},
67 {0x00008074, 0x00000020},
68 {0x00008078, 0x00000000},
69 {0x0000809c, 0x0000000f},
70 {0x000080a0, 0x00000000},
71 {0x000080a4, 0x02ff0000},
72 {0x000080a8, 0x0e070605},
73 {0x000080ac, 0x0000000d},
74 {0x000080b0, 0x00000000},
75 {0x000080b4, 0x00000000},
76 {0x000080b8, 0x00000000},
77 {0x000080bc, 0x00000000},
78 {0x000080c0, 0x2a800000},
79 {0x000080c4, 0x06900168},
80 {0x000080c8, 0x13881c20},
81 {0x000080cc, 0x01f40000},
82 {0x000080d0, 0x00252500},
83 {0x000080d4, 0x00b00005},
84 {0x000080d8, 0x00400002},
85 {0x000080dc, 0x00000000},
86 {0x000080e0, 0xffffffff},
87 {0x000080e4, 0x0000ffff},
88 {0x000080e8, 0x3f3f3f3f},
89 {0x000080ec, 0x00000000},
90 {0x000080f0, 0x00000000},
91 {0x000080f4, 0x00000000},
92 {0x000080fc, 0x00020000},
93 {0x00008100, 0x00000000},
94 {0x00008108, 0x00000052},
95 {0x0000810c, 0x00000000},
96 {0x00008110, 0x00000000},
97 {0x00008114, 0x000007ff},
98 {0x00008118, 0x000000aa},
99 {0x0000811c, 0x00003210},
100 {0x00008124, 0x00000000},
101 {0x00008128, 0x00000000},
102 {0x0000812c, 0x00000000},
103 {0x00008130, 0x00000000},
104 {0x00008134, 0x00000000},
105 {0x00008138, 0x00000000},
106 {0x0000813c, 0x0000ffff},
107 {0x00008144, 0xffffffff},
108 {0x00008168, 0x00000000},
109 {0x0000816c, 0x00000000},
110 {0x00008170, 0x18486200},
111 {0x00008174, 0x33332210},
112 {0x00008178, 0x00000000},
113 {0x0000817c, 0x00020000},
114 {0x000081c4, 0x33332210},
115 {0x000081c8, 0x00000000},
116 {0x000081cc, 0x00000000},
117 {0x000081d4, 0x00000000},
118 {0x000081ec, 0x00000000},
119 {0x000081f0, 0x00000000},
120 {0x000081f4, 0x00000000},
121 {0x000081f8, 0x00000000},
122 {0x000081fc, 0x00000000},
123 {0x00008240, 0x00100000},
124 {0x00008244, 0x0010f424},
125 {0x00008248, 0x00000800},
126 {0x0000824c, 0x0001e848},
127 {0x00008250, 0x00000000},
128 {0x00008254, 0x00000000},
129 {0x00008258, 0x00000000},
130 {0x0000825c, 0x40000000},
131 {0x00008260, 0x00080922},
132 {0x00008264, 0x9d400010},
133 {0x00008268, 0xffffffff},
134 {0x0000826c, 0x0000ffff},
135 {0x00008270, 0x00000000},
136 {0x00008274, 0x40000000},
137 {0x00008278, 0x003e4180},
138 {0x0000827c, 0x00000004},
139 {0x00008284, 0x0000002c},
140 {0x00008288, 0x0000002c},
141 {0x0000828c, 0x000000ff},
142 {0x00008294, 0x00000000},
143 {0x00008298, 0x00000000},
144 {0x0000829c, 0x00000000},
145 {0x00008300, 0x00000140},
146 {0x00008314, 0x00000000},
147 {0x0000831c, 0x0000010d},
148 {0x00008328, 0x00000000},
149 {0x0000832c, 0x0000001f},
150 {0x00008330, 0x00000302},
151 {0x00008334, 0x00000700},
152 {0x00008338, 0xffff0000},
153 {0x0000833c, 0x02400000},
154 {0x00008340, 0x000107ff},
155 {0x00008344, 0xaa48105b},
156 {0x00008348, 0x008f0000},
157 {0x0000835c, 0x00000000},
158 {0x00008360, 0xffffffff},
159 {0x00008364, 0xffffffff},
160 {0x00008368, 0x00000000},
161 {0x00008370, 0x00000000},
162 {0x00008374, 0x000000ff},
163 {0x00008378, 0x00000000},
164 {0x0000837c, 0x00000000},
165 {0x00008380, 0xffffffff},
166 {0x00008384, 0xffffffff},
167 {0x00008390, 0xffffffff},
168 {0x00008394, 0xffffffff},
169 {0x00008398, 0x00000000},
170 {0x0000839c, 0x00000000},
171 {0x000083a4, 0x0000fa14},
172 {0x000083a8, 0x000f0c00},
173 {0x000083ac, 0x33332210},
174 {0x000083b0, 0x33332210},
175 {0x000083b4, 0x33332210},
176 {0x000083b8, 0x33332210},
177 {0x000083bc, 0x00000000},
178 {0x000083c0, 0x00000000},
179 {0x000083c4, 0x00000000},
180 {0x000083c8, 0x00000000},
181 {0x000083cc, 0x00000200},
182 {0x000083d0, 0x800301ff},
183};
184
185static const u32 ar9565_1p0_mac_postamble[][5] = {
186 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
187 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
188 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
189 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
190 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
191 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
192 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
193 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
194 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
195};
196
197static const u32 ar9565_1p0_baseband_core[][2] = {
198 /* Addr allmodes */
199 {0x00009800, 0xafe68e30},
200 {0x00009804, 0xfd14e000},
201 {0x00009808, 0x9c0a8f6b},
202 {0x0000980c, 0x04800000},
203 {0x00009814, 0x9280c00a},
204 {0x00009818, 0x00000000},
205 {0x0000981c, 0x00020028},
206 {0x00009834, 0x6400a290},
207 {0x00009838, 0x0108ecff},
208 {0x0000983c, 0x0d000600},
209 {0x00009880, 0x201fff00},
210 {0x00009884, 0x00001042},
211 {0x000098a4, 0x00200400},
212 {0x000098b0, 0x32840bbe},
213 {0x000098d0, 0x004b6a8e},
214 {0x000098d4, 0x00000820},
215 {0x000098dc, 0x00000000},
216 {0x000098e4, 0x01ffffff},
217 {0x000098e8, 0x01ffffff},
218 {0x000098ec, 0x01ffffff},
219 {0x000098f0, 0x00000000},
220 {0x000098f4, 0x00000000},
221 {0x00009bf0, 0x80000000},
222 {0x00009c04, 0xff55ff55},
223 {0x00009c08, 0x0320ff55},
224 {0x00009c0c, 0x00000000},
225 {0x00009c10, 0x00000000},
226 {0x00009c14, 0x00046384},
227 {0x00009c18, 0x05b6b440},
228 {0x00009c1c, 0x00b6b440},
229 {0x00009d00, 0xc080a333},
230 {0x00009d04, 0x40206c10},
231 {0x00009d08, 0x009c4060},
232 {0x00009d0c, 0x1883800a},
233 {0x00009d10, 0x01834061},
234 {0x00009d14, 0x00c00400},
235 {0x00009d18, 0x00000000},
236 {0x00009e08, 0x0078230c},
237 {0x00009e24, 0x990bb515},
238 {0x00009e28, 0x126f0000},
239 {0x00009e30, 0x06336f77},
240 {0x00009e34, 0x6af6532f},
241 {0x00009e38, 0x0cc80c00},
242 {0x00009e40, 0x0d261820},
243 {0x00009e4c, 0x00001004},
244 {0x00009e50, 0x00ff03f1},
245 {0x00009e54, 0xe4c355c7},
246 {0x00009e5c, 0xe9198724},
247 {0x00009fc0, 0x823e4fc8},
248 {0x00009fc4, 0x0001efb5},
249 {0x00009fcc, 0x40000014},
250 {0x0000a20c, 0x00000000},
251 {0x0000a220, 0x00000000},
252 {0x0000a224, 0x00000000},
253 {0x0000a228, 0x10002310},
254 {0x0000a23c, 0x00000000},
255 {0x0000a244, 0x0c000000},
256 {0x0000a2a0, 0x00000001},
257 {0x0000a2c0, 0x00000001},
258 {0x0000a2c8, 0x00000000},
259 {0x0000a2cc, 0x18c43433},
260 {0x0000a2d4, 0x00000000},
261 {0x0000a2ec, 0x00000000},
262 {0x0000a2f0, 0x00000000},
263 {0x0000a2f4, 0x00000000},
264 {0x0000a2f8, 0x00000000},
265 {0x0000a344, 0x00000000},
266 {0x0000a34c, 0x00000000},
267 {0x0000a350, 0x0000a000},
268 {0x0000a364, 0x00000000},
269 {0x0000a370, 0x00000000},
270 {0x0000a390, 0x00000001},
271 {0x0000a394, 0x00000444},
272 {0x0000a398, 0x001f0e0f},
273 {0x0000a39c, 0x0075393f},
274 {0x0000a3a0, 0xb79f6427},
275 {0x0000a3a4, 0x00000000},
276 {0x0000a3a8, 0xaaaaaaaa},
277 {0x0000a3ac, 0x3c466478},
278 {0x0000a3c0, 0x20202020},
279 {0x0000a3c4, 0x22222220},
280 {0x0000a3c8, 0x20200020},
281 {0x0000a3cc, 0x20202020},
282 {0x0000a3d0, 0x20202020},
283 {0x0000a3d4, 0x20202020},
284 {0x0000a3d8, 0x20202020},
285 {0x0000a3dc, 0x20202020},
286 {0x0000a3e0, 0x20202020},
287 {0x0000a3e4, 0x20202020},
288 {0x0000a3e8, 0x20202020},
289 {0x0000a3ec, 0x20202020},
290 {0x0000a3f0, 0x00000000},
291 {0x0000a3f4, 0x00000006},
292 {0x0000a3f8, 0x0c9bd380},
293 {0x0000a3fc, 0x000f0f01},
294 {0x0000a400, 0x8fa91f01},
295 {0x0000a404, 0x00000000},
296 {0x0000a408, 0x0e79e5c6},
297 {0x0000a40c, 0x00820820},
298 {0x0000a414, 0x1ce739ce},
299 {0x0000a418, 0x2d001dce},
300 {0x0000a41c, 0x1ce739ce},
301 {0x0000a420, 0x000001ce},
302 {0x0000a424, 0x1ce739ce},
303 {0x0000a428, 0x000001ce},
304 {0x0000a42c, 0x1ce739ce},
305 {0x0000a430, 0x1ce739ce},
306 {0x0000a434, 0x00000000},
307 {0x0000a438, 0x00001801},
308 {0x0000a43c, 0x00000000},
309 {0x0000a440, 0x00000000},
310 {0x0000a444, 0x00000000},
311 {0x0000a448, 0x05000096},
312 {0x0000a44c, 0x00000001},
313 {0x0000a450, 0x00010000},
314 {0x0000a454, 0x03000000},
315 {0x0000a458, 0x00000000},
316 {0x0000a644, 0xbfad9d74},
317 {0x0000a648, 0x0048060a},
318 {0x0000a64c, 0x00003c37},
319 {0x0000a670, 0x03020100},
320 {0x0000a674, 0x09080504},
321 {0x0000a678, 0x0d0c0b0a},
322 {0x0000a67c, 0x13121110},
323 {0x0000a680, 0x31301514},
324 {0x0000a684, 0x35343332},
325 {0x0000a688, 0x00000036},
326 {0x0000a690, 0x00000838},
327 {0x0000a6b4, 0x00512c01},
328 {0x0000a7c0, 0x00000000},
329 {0x0000a7c4, 0xfffffffc},
330 {0x0000a7c8, 0x00000000},
331 {0x0000a7cc, 0x00000000},
332 {0x0000a7d0, 0x00000000},
333 {0x0000a7d4, 0x00000004},
334 {0x0000a7dc, 0x00000001},
335 {0x0000a7f0, 0x80000000},
336};
337
338static const u32 ar9565_1p0_baseband_postamble[][5] = {
339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d},
341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
344 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
348 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
354 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
355 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
356 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
357 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
358 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
359 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
360 {0x0000a204, 0x07318fc0, 0x07318fc4, 0x07318fc4, 0x07318fc0},
361 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
362 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
363 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
364 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
365 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
366 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
367 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
368 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
369 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
370 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
371 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
372 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
373 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
374 {0x0000a288, 0x00100510, 0x00100510, 0x00100510, 0x00100510},
375 {0x0000a28c, 0x00021551, 0x00021551, 0x00021551, 0x00021551},
376 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
377 {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
378 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
379 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
380 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
381 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
382};
383
384static const u32 ar9565_1p0_radio_core[][2] = {
385 /* Addr allmodes */
386 {0x00016000, 0x36db6db6},
387 {0x00016004, 0x6db6db40},
388 {0x00016008, 0x73f00000},
389 {0x0001600c, 0x00000000},
390 {0x00016010, 0x6d823601},
391 {0x00016040, 0x7f80fff8},
392 {0x0001604c, 0x1c99e04f},
393 {0x00016050, 0x6db6db6c},
394 {0x00016058, 0x6c200000},
395 {0x00016080, 0x000c0000},
396 {0x00016084, 0x9a68048c},
397 {0x00016088, 0x54214514},
398 {0x0001608c, 0x1203040b},
399 {0x00016090, 0x24926490},
400 {0x00016098, 0xd28b3330},
401 {0x000160a0, 0x0a108ffe},
402 {0x000160a4, 0x812fc491},
403 {0x000160a8, 0x423c8000},
404 {0x000160b4, 0x92000000},
405 {0x000160b8, 0x0285dddc},
406 {0x000160bc, 0x02908888},
407 {0x000160c0, 0x006db6d0},
408 {0x000160c4, 0x6dd6db60},
409 {0x000160c8, 0x6db6db6c},
410 {0x000160cc, 0x6de6c1b0},
411 {0x00016100, 0x3fffbe04},
412 {0x00016104, 0xfff80000},
413 {0x00016108, 0x00200400},
414 {0x00016110, 0x00000000},
415 {0x00016144, 0x02084080},
416 {0x00016148, 0x000080c0},
417 {0x00016280, 0x050a0001},
418 {0x00016284, 0x3d841440},
419 {0x00016288, 0x00000000},
420 {0x0001628c, 0xe3000000},
421 {0x00016290, 0xa1004080},
422 {0x00016294, 0x40000028},
423 {0x00016298, 0x55aa2900},
424 {0x00016340, 0x131c827a},
425 {0x00016344, 0x00300000},
426};
427
428static const u32 ar9565_1p0_radio_postamble[][5] = {
429 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
430 {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
431 {0x000160ac, 0xa4646c08, 0xa4646c08, 0xa4646c08, 0xa4646c08},
432 {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
433 {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
434 {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
435};
436
437static const u32 ar9565_1p0_soc_preamble[][2] = {
438 /* Addr allmodes */
439 {0x00004078, 0x00000002},
440 {0x000040a4, 0x00a0c9c9},
441 {0x00007020, 0x00000000},
442 {0x00007034, 0x00000002},
443 {0x00007038, 0x000004c2},
444};
445
446static const u32 ar9565_1p0_soc_postamble[][5] = {
447 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
448 {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
449};
450
451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
452 /* Addr allmodes */
453 {0x0000a000, 0x00010000},
454 {0x0000a004, 0x00030002},
455 {0x0000a008, 0x00050004},
456 {0x0000a00c, 0x00810080},
457 {0x0000a010, 0x00830082},
458 {0x0000a014, 0x01810180},
459 {0x0000a018, 0x01830182},
460 {0x0000a01c, 0x01850184},
461 {0x0000a020, 0x01890188},
462 {0x0000a024, 0x018b018a},
463 {0x0000a028, 0x018d018c},
464 {0x0000a02c, 0x01910190},
465 {0x0000a030, 0x01930192},
466 {0x0000a034, 0x01950194},
467 {0x0000a038, 0x038a0196},
468 {0x0000a03c, 0x038c038b},
469 {0x0000a040, 0x0390038d},
470 {0x0000a044, 0x03920391},
471 {0x0000a048, 0x03940393},
472 {0x0000a04c, 0x03960395},
473 {0x0000a050, 0x00000000},
474 {0x0000a054, 0x00000000},
475 {0x0000a058, 0x00000000},
476 {0x0000a05c, 0x00000000},
477 {0x0000a060, 0x00000000},
478 {0x0000a064, 0x00000000},
479 {0x0000a068, 0x00000000},
480 {0x0000a06c, 0x00000000},
481 {0x0000a070, 0x00000000},
482 {0x0000a074, 0x00000000},
483 {0x0000a078, 0x00000000},
484 {0x0000a07c, 0x00000000},
485 {0x0000a080, 0x22222229},
486 {0x0000a084, 0x1d1d1d1d},
487 {0x0000a088, 0x1d1d1d1d},
488 {0x0000a08c, 0x1d1d1d1d},
489 {0x0000a090, 0x171d1d1d},
490 {0x0000a094, 0x11111717},
491 {0x0000a098, 0x00030311},
492 {0x0000a09c, 0x00000000},
493 {0x0000a0a0, 0x00000000},
494 {0x0000a0a4, 0x00000000},
495 {0x0000a0a8, 0x00000000},
496 {0x0000a0ac, 0x00000000},
497 {0x0000a0b0, 0x00000000},
498 {0x0000a0b4, 0x00000000},
499 {0x0000a0b8, 0x00000000},
500 {0x0000a0bc, 0x00000000},
501 {0x0000a0c0, 0x001f0000},
502 {0x0000a0c4, 0x01000101},
503 {0x0000a0c8, 0x011e011f},
504 {0x0000a0cc, 0x011c011d},
505 {0x0000a0d0, 0x02030204},
506 {0x0000a0d4, 0x02010202},
507 {0x0000a0d8, 0x021f0200},
508 {0x0000a0dc, 0x0302021e},
509 {0x0000a0e0, 0x03000301},
510 {0x0000a0e4, 0x031e031f},
511 {0x0000a0e8, 0x0402031d},
512 {0x0000a0ec, 0x04000401},
513 {0x0000a0f0, 0x041e041f},
514 {0x0000a0f4, 0x0502041d},
515 {0x0000a0f8, 0x05000501},
516 {0x0000a0fc, 0x051e051f},
517 {0x0000a100, 0x06010602},
518 {0x0000a104, 0x061f0600},
519 {0x0000a108, 0x061d061e},
520 {0x0000a10c, 0x07020703},
521 {0x0000a110, 0x07000701},
522 {0x0000a114, 0x00000000},
523 {0x0000a118, 0x00000000},
524 {0x0000a11c, 0x00000000},
525 {0x0000a120, 0x00000000},
526 {0x0000a124, 0x00000000},
527 {0x0000a128, 0x00000000},
528 {0x0000a12c, 0x00000000},
529 {0x0000a130, 0x00000000},
530 {0x0000a134, 0x00000000},
531 {0x0000a138, 0x00000000},
532 {0x0000a13c, 0x00000000},
533 {0x0000a140, 0x001f0000},
534 {0x0000a144, 0x01000101},
535 {0x0000a148, 0x011e011f},
536 {0x0000a14c, 0x011c011d},
537 {0x0000a150, 0x02030204},
538 {0x0000a154, 0x02010202},
539 {0x0000a158, 0x021f0200},
540 {0x0000a15c, 0x0302021e},
541 {0x0000a160, 0x03000301},
542 {0x0000a164, 0x031e031f},
543 {0x0000a168, 0x0402031d},
544 {0x0000a16c, 0x04000401},
545 {0x0000a170, 0x041e041f},
546 {0x0000a174, 0x0502041d},
547 {0x0000a178, 0x05000501},
548 {0x0000a17c, 0x051e051f},
549 {0x0000a180, 0x06010602},
550 {0x0000a184, 0x061f0600},
551 {0x0000a188, 0x061d061e},
552 {0x0000a18c, 0x07020703},
553 {0x0000a190, 0x07000701},
554 {0x0000a194, 0x00000000},
555 {0x0000a198, 0x00000000},
556 {0x0000a19c, 0x00000000},
557 {0x0000a1a0, 0x00000000},
558 {0x0000a1a4, 0x00000000},
559 {0x0000a1a8, 0x00000000},
560 {0x0000a1ac, 0x00000000},
561 {0x0000a1b0, 0x00000000},
562 {0x0000a1b4, 0x00000000},
563 {0x0000a1b8, 0x00000000},
564 {0x0000a1bc, 0x00000000},
565 {0x0000a1c0, 0x00000000},
566 {0x0000a1c4, 0x00000000},
567 {0x0000a1c8, 0x00000000},
568 {0x0000a1cc, 0x00000000},
569 {0x0000a1d0, 0x00000000},
570 {0x0000a1d4, 0x00000000},
571 {0x0000a1d8, 0x00000000},
572 {0x0000a1dc, 0x00000000},
573 {0x0000a1e0, 0x00000000},
574 {0x0000a1e4, 0x00000000},
575 {0x0000a1e8, 0x00000000},
576 {0x0000a1ec, 0x00000000},
577 {0x0000a1f0, 0x00000396},
578 {0x0000a1f4, 0x00000396},
579 {0x0000a1f8, 0x00000396},
580 {0x0000a1fc, 0x00000196},
581 {0x0000b000, 0x00010000},
582 {0x0000b004, 0x00030002},
583 {0x0000b008, 0x00050004},
584 {0x0000b00c, 0x00810080},
585 {0x0000b010, 0x00830082},
586 {0x0000b014, 0x01810180},
587 {0x0000b018, 0x01830182},
588 {0x0000b01c, 0x01850184},
589 {0x0000b020, 0x02810280},
590 {0x0000b024, 0x02830282},
591 {0x0000b028, 0x02850284},
592 {0x0000b02c, 0x02890288},
593 {0x0000b030, 0x028b028a},
594 {0x0000b034, 0x0388028c},
595 {0x0000b038, 0x038a0389},
596 {0x0000b03c, 0x038c038b},
597 {0x0000b040, 0x0390038d},
598 {0x0000b044, 0x03920391},
599 {0x0000b048, 0x03940393},
600 {0x0000b04c, 0x03960395},
601 {0x0000b050, 0x00000000},
602 {0x0000b054, 0x00000000},
603 {0x0000b058, 0x00000000},
604 {0x0000b05c, 0x00000000},
605 {0x0000b060, 0x00000000},
606 {0x0000b064, 0x00000000},
607 {0x0000b068, 0x00000000},
608 {0x0000b06c, 0x00000000},
609 {0x0000b070, 0x00000000},
610 {0x0000b074, 0x00000000},
611 {0x0000b078, 0x00000000},
612 {0x0000b07c, 0x00000000},
613 {0x0000b080, 0x32323232},
614 {0x0000b084, 0x2f2f3232},
615 {0x0000b088, 0x23282a2d},
616 {0x0000b08c, 0x1c1e2123},
617 {0x0000b090, 0x14171919},
618 {0x0000b094, 0x0e0e1214},
619 {0x0000b098, 0x03050707},
620 {0x0000b09c, 0x00030303},
621 {0x0000b0a0, 0x00000000},
622 {0x0000b0a4, 0x00000000},
623 {0x0000b0a8, 0x00000000},
624 {0x0000b0ac, 0x00000000},
625 {0x0000b0b0, 0x00000000},
626 {0x0000b0b4, 0x00000000},
627 {0x0000b0b8, 0x00000000},
628 {0x0000b0bc, 0x00000000},
629 {0x0000b0c0, 0x003f0020},
630 {0x0000b0c4, 0x00400041},
631 {0x0000b0c8, 0x0140005f},
632 {0x0000b0cc, 0x0160015f},
633 {0x0000b0d0, 0x017e017f},
634 {0x0000b0d4, 0x02410242},
635 {0x0000b0d8, 0x025f0240},
636 {0x0000b0dc, 0x027f0260},
637 {0x0000b0e0, 0x0341027e},
638 {0x0000b0e4, 0x035f0340},
639 {0x0000b0e8, 0x037f0360},
640 {0x0000b0ec, 0x04400441},
641 {0x0000b0f0, 0x0460045f},
642 {0x0000b0f4, 0x0541047f},
643 {0x0000b0f8, 0x055f0540},
644 {0x0000b0fc, 0x057f0560},
645 {0x0000b100, 0x06400641},
646 {0x0000b104, 0x0660065f},
647 {0x0000b108, 0x067e067f},
648 {0x0000b10c, 0x07410742},
649 {0x0000b110, 0x075f0740},
650 {0x0000b114, 0x077f0760},
651 {0x0000b118, 0x07800781},
652 {0x0000b11c, 0x07a0079f},
653 {0x0000b120, 0x07c107bf},
654 {0x0000b124, 0x000007c0},
655 {0x0000b128, 0x00000000},
656 {0x0000b12c, 0x00000000},
657 {0x0000b130, 0x00000000},
658 {0x0000b134, 0x00000000},
659 {0x0000b138, 0x00000000},
660 {0x0000b13c, 0x00000000},
661 {0x0000b140, 0x003f0020},
662 {0x0000b144, 0x00400041},
663 {0x0000b148, 0x0140005f},
664 {0x0000b14c, 0x0160015f},
665 {0x0000b150, 0x017e017f},
666 {0x0000b154, 0x02410242},
667 {0x0000b158, 0x025f0240},
668 {0x0000b15c, 0x027f0260},
669 {0x0000b160, 0x0341027e},
670 {0x0000b164, 0x035f0340},
671 {0x0000b168, 0x037f0360},
672 {0x0000b16c, 0x04400441},
673 {0x0000b170, 0x0460045f},
674 {0x0000b174, 0x0541047f},
675 {0x0000b178, 0x055f0540},
676 {0x0000b17c, 0x057f0560},
677 {0x0000b180, 0x06400641},
678 {0x0000b184, 0x0660065f},
679 {0x0000b188, 0x067e067f},
680 {0x0000b18c, 0x07410742},
681 {0x0000b190, 0x075f0740},
682 {0x0000b194, 0x077f0760},
683 {0x0000b198, 0x07800781},
684 {0x0000b19c, 0x07a0079f},
685 {0x0000b1a0, 0x07c107bf},
686 {0x0000b1a4, 0x000007c0},
687 {0x0000b1a8, 0x00000000},
688 {0x0000b1ac, 0x00000000},
689 {0x0000b1b0, 0x00000000},
690 {0x0000b1b4, 0x00000000},
691 {0x0000b1b8, 0x00000000},
692 {0x0000b1bc, 0x00000000},
693 {0x0000b1c0, 0x00000000},
694 {0x0000b1c4, 0x00000000},
695 {0x0000b1c8, 0x00000000},
696 {0x0000b1cc, 0x00000000},
697 {0x0000b1d0, 0x00000000},
698 {0x0000b1d4, 0x00000000},
699 {0x0000b1d8, 0x00000000},
700 {0x0000b1dc, 0x00000000},
701 {0x0000b1e0, 0x00000000},
702 {0x0000b1e4, 0x00000000},
703 {0x0000b1e8, 0x00000000},
704 {0x0000b1ec, 0x00000000},
705 {0x0000b1f0, 0x00000396},
706 {0x0000b1f4, 0x00000396},
707 {0x0000b1f8, 0x00000396},
708 {0x0000b1fc, 0x00000196},
709};
710
711static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
712 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
713 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
714 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
715 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
716 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
717 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
718 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
719 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
720 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
721 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
722 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
723 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
724 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
725 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
726 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
727 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
728 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
729 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
730 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
731 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
732 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
733 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
734 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
735 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
736 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
737 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
738 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
739 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
740 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
741 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
742 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
743 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
744 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
745 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
746 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
747 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
748 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
749 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
750 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
751 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
752 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
753 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
754 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
755 {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
756 {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
757 {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
758 {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
759 {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
760 {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
761 {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
762 {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
763 {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
764 {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
765 {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
766 {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
767 {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
768 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
769};
770
771static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
772 /* Addr allmodes */
773 {0x00018c00, 0x18212ede},
774 {0x00018c04, 0x000801d8},
775 {0x00018c08, 0x0003780c},
776};
777
778static const u32 ar9565_1p0_modes_fast_clock[][3] = {
779 /* Addr 5G_HT20 5G_HT40 */
780 {0x00001030, 0x00000268, 0x000004d0},
781 {0x00001070, 0x0000018c, 0x00000318},
782 {0x000010b0, 0x00000fd0, 0x00001fa0},
783 {0x00008014, 0x044c044c, 0x08980898},
784 {0x0000801c, 0x148ec02b, 0x148ec057},
785 {0x00008318, 0x000044c0, 0x00008980},
786 {0x00009e00, 0x03721821, 0x03721821},
787 {0x0000a230, 0x0000400b, 0x00004016},
788 {0x0000a254, 0x00000898, 0x00001130},
789};
790
791static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
792 /* Addr allmodes */
793 {0x0000a000, 0x00010000},
794 {0x0000a004, 0x00030002},
795 {0x0000a008, 0x00050004},
796 {0x0000a00c, 0x00810080},
797 {0x0000a010, 0x00830082},
798 {0x0000a014, 0x01810180},
799 {0x0000a018, 0x01830182},
800 {0x0000a01c, 0x01850184},
801 {0x0000a020, 0x01890188},
802 {0x0000a024, 0x018b018a},
803 {0x0000a028, 0x018d018c},
804 {0x0000a02c, 0x03820190},
805 {0x0000a030, 0x03840383},
806 {0x0000a034, 0x03880385},
807 {0x0000a038, 0x038a0389},
808 {0x0000a03c, 0x038c038b},
809 {0x0000a040, 0x0390038d},
810 {0x0000a044, 0x03920391},
811 {0x0000a048, 0x03940393},
812 {0x0000a04c, 0x03960395},
813 {0x0000a050, 0x00000000},
814 {0x0000a054, 0x00000000},
815 {0x0000a058, 0x00000000},
816 {0x0000a05c, 0x00000000},
817 {0x0000a060, 0x00000000},
818 {0x0000a064, 0x00000000},
819 {0x0000a068, 0x00000000},
820 {0x0000a06c, 0x00000000},
821 {0x0000a070, 0x00000000},
822 {0x0000a074, 0x00000000},
823 {0x0000a078, 0x00000000},
824 {0x0000a07c, 0x00000000},
825 {0x0000a080, 0x29292929},
826 {0x0000a084, 0x29292929},
827 {0x0000a088, 0x29292929},
828 {0x0000a08c, 0x29292929},
829 {0x0000a090, 0x22292929},
830 {0x0000a094, 0x1d1d2222},
831 {0x0000a098, 0x0c111117},
832 {0x0000a09c, 0x00030303},
833 {0x0000a0a0, 0x00000000},
834 {0x0000a0a4, 0x00000000},
835 {0x0000a0a8, 0x00000000},
836 {0x0000a0ac, 0x00000000},
837 {0x0000a0b0, 0x00000000},
838 {0x0000a0b4, 0x00000000},
839 {0x0000a0b8, 0x00000000},
840 {0x0000a0bc, 0x00000000},
841 {0x0000a0c0, 0x00bf00a0},
842 {0x0000a0c4, 0x11a011a1},
843 {0x0000a0c8, 0x11be11bf},
844 {0x0000a0cc, 0x11bc11bd},
845 {0x0000a0d0, 0x22632264},
846 {0x0000a0d4, 0x22612262},
847 {0x0000a0d8, 0x227f2260},
848 {0x0000a0dc, 0x4322227e},
849 {0x0000a0e0, 0x43204321},
850 {0x0000a0e4, 0x433e433f},
851 {0x0000a0e8, 0x4462433d},
852 {0x0000a0ec, 0x44604461},
853 {0x0000a0f0, 0x447e447f},
854 {0x0000a0f4, 0x5582447d},
855 {0x0000a0f8, 0x55805581},
856 {0x0000a0fc, 0x559e559f},
857 {0x0000a100, 0x66816682},
858 {0x0000a104, 0x669f6680},
859 {0x0000a108, 0x669d669e},
860 {0x0000a10c, 0x77627763},
861 {0x0000a110, 0x77607761},
862 {0x0000a114, 0x00000000},
863 {0x0000a118, 0x00000000},
864 {0x0000a11c, 0x00000000},
865 {0x0000a120, 0x00000000},
866 {0x0000a124, 0x00000000},
867 {0x0000a128, 0x00000000},
868 {0x0000a12c, 0x00000000},
869 {0x0000a130, 0x00000000},
870 {0x0000a134, 0x00000000},
871 {0x0000a138, 0x00000000},
872 {0x0000a13c, 0x00000000},
873 {0x0000a140, 0x00bf00a0},
874 {0x0000a144, 0x11a011a1},
875 {0x0000a148, 0x11be11bf},
876 {0x0000a14c, 0x11bc11bd},
877 {0x0000a150, 0x22632264},
878 {0x0000a154, 0x22612262},
879 {0x0000a158, 0x227f2260},
880 {0x0000a15c, 0x4322227e},
881 {0x0000a160, 0x43204321},
882 {0x0000a164, 0x433e433f},
883 {0x0000a168, 0x4462433d},
884 {0x0000a16c, 0x44604461},
885 {0x0000a170, 0x447e447f},
886 {0x0000a174, 0x5582447d},
887 {0x0000a178, 0x55805581},
888 {0x0000a17c, 0x559e559f},
889 {0x0000a180, 0x66816682},
890 {0x0000a184, 0x669f6680},
891 {0x0000a188, 0x669d669e},
892 {0x0000a18c, 0x77627763},
893 {0x0000a190, 0x77607761},
894 {0x0000a194, 0x00000000},
895 {0x0000a198, 0x00000000},
896 {0x0000a19c, 0x00000000},
897 {0x0000a1a0, 0x00000000},
898 {0x0000a1a4, 0x00000000},
899 {0x0000a1a8, 0x00000000},
900 {0x0000a1ac, 0x00000000},
901 {0x0000a1b0, 0x00000000},
902 {0x0000a1b4, 0x00000000},
903 {0x0000a1b8, 0x00000000},
904 {0x0000a1bc, 0x00000000},
905 {0x0000a1c0, 0x00000000},
906 {0x0000a1c4, 0x00000000},
907 {0x0000a1c8, 0x00000000},
908 {0x0000a1cc, 0x00000000},
909 {0x0000a1d0, 0x00000000},
910 {0x0000a1d4, 0x00000000},
911 {0x0000a1d8, 0x00000000},
912 {0x0000a1dc, 0x00000000},
913 {0x0000a1e0, 0x00000000},
914 {0x0000a1e4, 0x00000000},
915 {0x0000a1e8, 0x00000000},
916 {0x0000a1ec, 0x00000000},
917 {0x0000a1f0, 0x00000396},
918 {0x0000a1f4, 0x00000396},
919 {0x0000a1f8, 0x00000396},
920 {0x0000a1fc, 0x00000196},
921 {0x0000b000, 0x00010000},
922 {0x0000b004, 0x00030002},
923 {0x0000b008, 0x00050004},
924 {0x0000b00c, 0x00810080},
925 {0x0000b010, 0x00830082},
926 {0x0000b014, 0x01810180},
927 {0x0000b018, 0x01830182},
928 {0x0000b01c, 0x01850184},
929 {0x0000b020, 0x02810280},
930 {0x0000b024, 0x02830282},
931 {0x0000b028, 0x02850284},
932 {0x0000b02c, 0x02890288},
933 {0x0000b030, 0x028b028a},
934 {0x0000b034, 0x0388028c},
935 {0x0000b038, 0x038a0389},
936 {0x0000b03c, 0x038c038b},
937 {0x0000b040, 0x0390038d},
938 {0x0000b044, 0x03920391},
939 {0x0000b048, 0x03940393},
940 {0x0000b04c, 0x03960395},
941 {0x0000b050, 0x00000000},
942 {0x0000b054, 0x00000000},
943 {0x0000b058, 0x00000000},
944 {0x0000b05c, 0x00000000},
945 {0x0000b060, 0x00000000},
946 {0x0000b064, 0x00000000},
947 {0x0000b068, 0x00000000},
948 {0x0000b06c, 0x00000000},
949 {0x0000b070, 0x00000000},
950 {0x0000b074, 0x00000000},
951 {0x0000b078, 0x00000000},
952 {0x0000b07c, 0x00000000},
953 {0x0000b080, 0x32323232},
954 {0x0000b084, 0x2f2f3232},
955 {0x0000b088, 0x23282a2d},
956 {0x0000b08c, 0x1c1e2123},
957 {0x0000b090, 0x14171919},
958 {0x0000b094, 0x0e0e1214},
959 {0x0000b098, 0x03050707},
960 {0x0000b09c, 0x00030303},
961 {0x0000b0a0, 0x00000000},
962 {0x0000b0a4, 0x00000000},
963 {0x0000b0a8, 0x00000000},
964 {0x0000b0ac, 0x00000000},
965 {0x0000b0b0, 0x00000000},
966 {0x0000b0b4, 0x00000000},
967 {0x0000b0b8, 0x00000000},
968 {0x0000b0bc, 0x00000000},
969 {0x0000b0c0, 0x003f0020},
970 {0x0000b0c4, 0x00400041},
971 {0x0000b0c8, 0x0140005f},
972 {0x0000b0cc, 0x0160015f},
973 {0x0000b0d0, 0x017e017f},
974 {0x0000b0d4, 0x02410242},
975 {0x0000b0d8, 0x025f0240},
976 {0x0000b0dc, 0x027f0260},
977 {0x0000b0e0, 0x0341027e},
978 {0x0000b0e4, 0x035f0340},
979 {0x0000b0e8, 0x037f0360},
980 {0x0000b0ec, 0x04400441},
981 {0x0000b0f0, 0x0460045f},
982 {0x0000b0f4, 0x0541047f},
983 {0x0000b0f8, 0x055f0540},
984 {0x0000b0fc, 0x057f0560},
985 {0x0000b100, 0x06400641},
986 {0x0000b104, 0x0660065f},
987 {0x0000b108, 0x067e067f},
988 {0x0000b10c, 0x07410742},
989 {0x0000b110, 0x075f0740},
990 {0x0000b114, 0x077f0760},
991 {0x0000b118, 0x07800781},
992 {0x0000b11c, 0x07a0079f},
993 {0x0000b120, 0x07c107bf},
994 {0x0000b124, 0x000007c0},
995 {0x0000b128, 0x00000000},
996 {0x0000b12c, 0x00000000},
997 {0x0000b130, 0x00000000},
998 {0x0000b134, 0x00000000},
999 {0x0000b138, 0x00000000},
1000 {0x0000b13c, 0x00000000},
1001 {0x0000b140, 0x003f0020},
1002 {0x0000b144, 0x00400041},
1003 {0x0000b148, 0x0140005f},
1004 {0x0000b14c, 0x0160015f},
1005 {0x0000b150, 0x017e017f},
1006 {0x0000b154, 0x02410242},
1007 {0x0000b158, 0x025f0240},
1008 {0x0000b15c, 0x027f0260},
1009 {0x0000b160, 0x0341027e},
1010 {0x0000b164, 0x035f0340},
1011 {0x0000b168, 0x037f0360},
1012 {0x0000b16c, 0x04400441},
1013 {0x0000b170, 0x0460045f},
1014 {0x0000b174, 0x0541047f},
1015 {0x0000b178, 0x055f0540},
1016 {0x0000b17c, 0x057f0560},
1017 {0x0000b180, 0x06400641},
1018 {0x0000b184, 0x0660065f},
1019 {0x0000b188, 0x067e067f},
1020 {0x0000b18c, 0x07410742},
1021 {0x0000b190, 0x075f0740},
1022 {0x0000b194, 0x077f0760},
1023 {0x0000b198, 0x07800781},
1024 {0x0000b19c, 0x07a0079f},
1025 {0x0000b1a0, 0x07c107bf},
1026 {0x0000b1a4, 0x000007c0},
1027 {0x0000b1a8, 0x00000000},
1028 {0x0000b1ac, 0x00000000},
1029 {0x0000b1b0, 0x00000000},
1030 {0x0000b1b4, 0x00000000},
1031 {0x0000b1b8, 0x00000000},
1032 {0x0000b1bc, 0x00000000},
1033 {0x0000b1c0, 0x00000000},
1034 {0x0000b1c4, 0x00000000},
1035 {0x0000b1c8, 0x00000000},
1036 {0x0000b1cc, 0x00000000},
1037 {0x0000b1d0, 0x00000000},
1038 {0x0000b1d4, 0x00000000},
1039 {0x0000b1d8, 0x00000000},
1040 {0x0000b1dc, 0x00000000},
1041 {0x0000b1e0, 0x00000000},
1042 {0x0000b1e4, 0x00000000},
1043 {0x0000b1e8, 0x00000000},
1044 {0x0000b1ec, 0x00000000},
1045 {0x0000b1f0, 0x00000396},
1046 {0x0000b1f4, 0x00000396},
1047 {0x0000b1f8, 0x00000396},
1048 {0x0000b1fc, 0x00000196},
1049};
1050
1051static const u32 ar9565_1p0_modes_low_ob_db_tx_gain_table[][5] = {
1052 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1053 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
1054 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1055 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1056 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1057 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1058 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1059 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1060 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1061 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1062 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1063 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1064 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
1065 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1066 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1067 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1068 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1069 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1070 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1071 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1072 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1073 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1074 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1075 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1076 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1077 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
1078 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
1079 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
1080 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
1081 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
1082 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
1083 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1084 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1085 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1086 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1087 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1088 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1089 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1090 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1091 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1092 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1093 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1094 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1095 {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1096 {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1097 {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1098 {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1099 {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1100 {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1101 {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1102 {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1103 {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1104 {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1105 {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1106 {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
1107 {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1108 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1109};
1110
1111static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1112 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1113 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
1114 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1115 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1116 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1117 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1118 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1119 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1120 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
1121 {0x0000a50c, 0x10022223, 0x10022223, 0x0c000200, 0x0c000200},
1122 {0x0000a510, 0x15022620, 0x15022620, 0x10000202, 0x10000202},
1123 {0x0000a514, 0x19022622, 0x19022622, 0x13000400, 0x13000400},
1124 {0x0000a518, 0x1c022822, 0x1c022822, 0x17000402, 0x17000402},
1125 {0x0000a51c, 0x21022842, 0x21022842, 0x1b000404, 0x1b000404},
1126 {0x0000a520, 0x24022c41, 0x24022c41, 0x1e000603, 0x1e000603},
1127 {0x0000a524, 0x29023042, 0x29023042, 0x23000a02, 0x23000a02},
1128 {0x0000a528, 0x2d023044, 0x2d023044, 0x27000a04, 0x27000a04},
1129 {0x0000a52c, 0x31023644, 0x31023644, 0x2a000a20, 0x2a000a20},
1130 {0x0000a530, 0x36025643, 0x36025643, 0x2e000e20, 0x2e000e20},
1131 {0x0000a534, 0x3a025a44, 0x3a025a44, 0x32000e22, 0x32000e22},
1132 {0x0000a538, 0x3d025e45, 0x3d025e45, 0x36000e24, 0x36000e24},
1133 {0x0000a53c, 0x43025e4a, 0x43025e4a, 0x3a001640, 0x3a001640},
1134 {0x0000a540, 0x4a025e6c, 0x4a025e6c, 0x3e001660, 0x3e001660},
1135 {0x0000a544, 0x50025e8e, 0x50025e8e, 0x41001861, 0x41001861},
1136 {0x0000a548, 0x56025eb2, 0x56025eb2, 0x45001a81, 0x45001a81},
1137 {0x0000a54c, 0x5c025eb5, 0x5c025eb5, 0x49001a83, 0x49001a83},
1138 {0x0000a550, 0x62025ef6, 0x62025ef6, 0x4c001c84, 0x4c001c84},
1139 {0x0000a554, 0x65025f56, 0x65025f56, 0x4f001ce3, 0x4f001ce3},
1140 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
1141 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
1142 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
1143 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1144 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1145 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1146 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1147 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1148 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1149 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1150 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1152 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1153 {0x0000a60c, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1154 {0x0000a610, 0x00804201, 0x00804201, 0x00000000, 0x00000000},
1155 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1156 {0x0000a618, 0x00804201, 0x00804201, 0x01404501, 0x01404501},
1157 {0x0000a61c, 0x02008201, 0x02008201, 0x02008501, 0x02008501},
1158 {0x0000a620, 0x02c10a03, 0x02c10a03, 0x0280ca03, 0x0280ca03},
1159 {0x0000a624, 0x04815205, 0x04815205, 0x02c10b04, 0x02c10b04},
1160 {0x0000a628, 0x0581d406, 0x0581d406, 0x03814b04, 0x03814b04},
1161 {0x0000a62c, 0x0581d607, 0x0581d607, 0x05018e05, 0x05018e05},
1162 {0x0000a630, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1163 {0x0000a634, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1164 {0x0000a638, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1165 {0x0000a63c, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1166 {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
1167 {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
1168 {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
1169};
1170
1171static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1172 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1173 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
1174 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1175 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1176 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1178 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1179 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1180 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
1181 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
1182 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
1183 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
1184 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
1185 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
1186 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
1187 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
1188 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
1189 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
1190 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
1191 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
1192 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
1193 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
1194 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
1195 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
1196 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
1197 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
1198 {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
1199 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
1200 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
1201 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
1202 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
1203 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1204 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1205 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1206 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1207 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1208 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1209 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1210 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1211 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1212 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1213 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1214 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1215 {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1216 {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1217 {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1218 {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1219 {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1220 {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1221 {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1222 {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1223 {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1224 {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1225 {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1226 {0x00016044, 0x056d82e6, 0x056d82e6, 0x056d82e6, 0x056d82e6},
1227 {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1228 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1229};
1230
1231#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b09285c36c4a..dfe6a4707fd2 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -173,6 +173,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
173 173
174#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)]) 174#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
175 175
176#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
177
176#define ATH_TX_COMPLETE_POLL_INT 1000 178#define ATH_TX_COMPLETE_POLL_INT 1000
177 179
178enum ATH_AGGR_STATUS { 180enum ATH_AGGR_STATUS {
@@ -280,6 +282,7 @@ struct ath_tx_control {
280 struct ath_txq *txq; 282 struct ath_txq *txq;
281 struct ath_node *an; 283 struct ath_node *an;
282 u8 paprd; 284 u8 paprd;
285 struct ieee80211_sta *sta;
283}; 286};
284 287
285#define ATH_TX_ERROR 0x01 288#define ATH_TX_ERROR 0x01
@@ -422,7 +425,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
422void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif); 425void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
423void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif); 426void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
424void ath9k_set_beacon(struct ath_softc *sc); 427void ath9k_set_beacon(struct ath_softc *sc);
425void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
426 428
427/*******************/ 429/*******************/
428/* Link Monitoring */ 430/* Link Monitoring */
@@ -472,7 +474,7 @@ struct ath_btcoex {
472 unsigned long op_flags; 474 unsigned long op_flags;
473 int bt_stomp_type; /* Types of BT stomping */ 475 int bt_stomp_type; /* Types of BT stomping */
474 u32 btcoex_no_stomp; /* in usec */ 476 u32 btcoex_no_stomp; /* in usec */
475 u32 btcoex_period; /* in usec */ 477 u32 btcoex_period; /* in msec */
476 u32 btscan_no_stomp; /* in usec */ 478 u32 btscan_no_stomp; /* in usec */
477 u32 duty_cycle; 479 u32 duty_cycle;
478 u32 bt_wait_time; 480 u32 bt_wait_time;
@@ -537,6 +539,7 @@ struct ath9k_wow_pattern {
537#ifdef CONFIG_MAC80211_LEDS 539#ifdef CONFIG_MAC80211_LEDS
538void ath_init_leds(struct ath_softc *sc); 540void ath_init_leds(struct ath_softc *sc);
539void ath_deinit_leds(struct ath_softc *sc); 541void ath_deinit_leds(struct ath_softc *sc);
542void ath_fill_led_pin(struct ath_softc *sc);
540#else 543#else
541static inline void ath_init_leds(struct ath_softc *sc) 544static inline void ath_init_leds(struct ath_softc *sc)
542{ 545{
@@ -545,6 +548,9 @@ static inline void ath_init_leds(struct ath_softc *sc)
545static inline void ath_deinit_leds(struct ath_softc *sc) 548static inline void ath_deinit_leds(struct ath_softc *sc)
546{ 549{
547} 550}
551static inline void ath_fill_led_pin(struct ath_softc *sc)
552{
553}
548#endif 554#endif
549 555
550/*******************************/ 556/*******************************/
@@ -596,8 +602,6 @@ struct ath_ant_comb {
596 int main_conf; 602 int main_conf;
597 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf; 603 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
598 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf; 604 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
599 int first_bias;
600 int second_bias;
601 bool first_ratio; 605 bool first_ratio;
602 bool second_ratio; 606 bool second_ratio;
603 unsigned long scan_start_time; 607 unsigned long scan_start_time;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index acd437384fe4..419e9a3f2fed 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -43,8 +43,8 @@ static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
43 { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */ 43 { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
44}; 44};
45 45
46static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX] 46static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
47 [AR9300_NUM_WLAN_WEIGHTS] = { 47 [AR9300_NUM_WLAN_WEIGHTS] = {
48 { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */ 48 { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */ 49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */ 50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
@@ -208,14 +208,37 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
208 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); 208 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
209} 209}
210 210
211/*
212 * For AR9002, bt_weight/wlan_weight are used.
213 * For AR9003 and above, stomp_type is used.
214 */
211void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, 215void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
212 u32 bt_weight, 216 u32 bt_weight,
213 u32 wlan_weight) 217 u32 wlan_weight,
218 enum ath_stomp_type stomp_type)
214{ 219{
215 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 220 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
216 221
217 btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) | 222 if (AR_SREV_9300_20_OR_LATER(ah)) {
218 SM(wlan_weight, AR_BTCOEX_WL_WGHT); 223 const u32 *weight = ar9003_wlan_weights[stomp_type];
224 int i;
225
226 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
227 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
228 btcoex_hw->mci.stomp_ftp)
229 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
230 weight = mci_wlan_weights[stomp_type];
231 }
232
233 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
234 btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
235 btcoex_hw->wlan_weight[i] = weight[i];
236 }
237 } else {
238 btcoex_hw->bt_coex_weights =
239 SM(bt_weight, AR_BTCOEX_BT_WGHT) |
240 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
241 }
219} 242}
220EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); 243EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
221 244
@@ -282,7 +305,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
282 ath9k_hw_btcoex_enable_2wire(ah); 305 ath9k_hw_btcoex_enable_2wire(ah);
283 break; 306 break;
284 case ATH_BTCOEX_CFG_3WIRE: 307 case ATH_BTCOEX_CFG_3WIRE:
285 if (AR_SREV_9462(ah)) { 308 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
286 ath9k_hw_btcoex_enable_mci(ah); 309 ath9k_hw_btcoex_enable_mci(ah);
287 return; 310 return;
288 } 311 }
@@ -304,7 +327,7 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
304 int i; 327 int i;
305 328
306 btcoex_hw->enabled = false; 329 btcoex_hw->enabled = false;
307 if (AR_SREV_9462(ah)) { 330 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
308 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 331 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
309 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) 332 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
310 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), 333 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
@@ -332,26 +355,6 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
332} 355}
333EXPORT_SYMBOL(ath9k_hw_btcoex_disable); 356EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
334 357
335static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
336 enum ath_stomp_type stomp_type)
337{
338 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
339 const u32 *weight = ar9003_wlan_weights[stomp_type];
340 int i;
341
342 if (AR_SREV_9462(ah)) {
343 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
344 btcoex->mci.stomp_ftp)
345 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
346 weight = ar9462_wlan_weights[stomp_type];
347 }
348
349 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
350 btcoex->bt_weight[i] = AR9300_BT_WGHT;
351 btcoex->wlan_weight[i] = weight[i];
352 }
353}
354
355/* 358/*
356 * Configures appropriate weight based on stomp type. 359 * Configures appropriate weight based on stomp type.
357 */ 360 */
@@ -359,22 +362,22 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
359 enum ath_stomp_type stomp_type) 362 enum ath_stomp_type stomp_type)
360{ 363{
361 if (AR_SREV_9300_20_OR_LATER(ah)) { 364 if (AR_SREV_9300_20_OR_LATER(ah)) {
362 ar9003_btcoex_bt_stomp(ah, stomp_type); 365 ath9k_hw_btcoex_set_weight(ah, 0, 0, stomp_type);
363 return; 366 return;
364 } 367 }
365 368
366 switch (stomp_type) { 369 switch (stomp_type) {
367 case ATH_BTCOEX_STOMP_ALL: 370 case ATH_BTCOEX_STOMP_ALL:
368 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 371 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
369 AR_STOMP_ALL_WLAN_WGHT); 372 AR_STOMP_ALL_WLAN_WGHT, 0);
370 break; 373 break;
371 case ATH_BTCOEX_STOMP_LOW: 374 case ATH_BTCOEX_STOMP_LOW:
372 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 375 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
373 AR_STOMP_LOW_WLAN_WGHT); 376 AR_STOMP_LOW_WLAN_WGHT, 0);
374 break; 377 break;
375 case ATH_BTCOEX_STOMP_NONE: 378 case ATH_BTCOEX_STOMP_NONE:
376 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 379 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
377 AR_STOMP_NONE_WLAN_WGHT); 380 AR_STOMP_NONE_WLAN_WGHT, 0);
378 break; 381 break;
379 default: 382 default:
380 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n"); 383 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 20092f98658f..385197ad79b0 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -107,7 +107,8 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
107void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum); 107void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
108void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, 108void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
109 u32 bt_weight, 109 u32 bt_weight,
110 u32 wlan_weight); 110 u32 wlan_weight,
111 enum ath_stomp_type stomp_type);
111void ath9k_hw_btcoex_disable(struct ath_hw *ah); 112void ath9k_hw_btcoex_disable(struct ath_hw *ah);
112void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, 113void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
113 enum ath_stomp_type stomp_type); 114 enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c8ef30127adb..6727b566d294 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -222,6 +222,57 @@ static const struct file_operations fops_disable_ani = {
222 .llseek = default_llseek, 222 .llseek = default_llseek,
223}; 223};
224 224
225static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
227{
228 struct ath_softc *sc = file->private_data;
229 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
230 char buf[32];
231 unsigned int len;
232
233 len = sprintf(buf, "%d\n", common->antenna_diversity);
234 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
235}
236
237static ssize_t write_file_ant_diversity(struct file *file,
238 const char __user *user_buf,
239 size_t count, loff_t *ppos)
240{
241 struct ath_softc *sc = file->private_data;
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 unsigned long antenna_diversity;
244 char buf[32];
245 ssize_t len;
246
247 len = min(count, sizeof(buf) - 1);
248 if (copy_from_user(buf, user_buf, len))
249 return -EFAULT;
250
251 if (!AR_SREV_9565(sc->sc_ah))
252 goto exit;
253
254 buf[len] = '\0';
255 if (strict_strtoul(buf, 0, &antenna_diversity))
256 return -EINVAL;
257
258 common->antenna_diversity = !!antenna_diversity;
259 ath9k_ps_wakeup(sc);
260 ath_ant_comb_update(sc);
261 ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
262 common->antenna_diversity);
263 ath9k_ps_restore(sc);
264exit:
265 return count;
266}
267
268static const struct file_operations fops_ant_diversity = {
269 .read = read_file_ant_diversity,
270 .write = write_file_ant_diversity,
271 .open = simple_open,
272 .owner = THIS_MODULE,
273 .llseek = default_llseek,
274};
275
225static ssize_t read_file_dma(struct file *file, char __user *user_buf, 276static ssize_t read_file_dma(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos) 277 size_t count, loff_t *ppos)
227{ 278{
@@ -373,6 +424,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
373 sc->debug.stats.istats.tsfoor++; 424 sc->debug.stats.istats.tsfoor++;
374 if (status & ATH9K_INT_MCI) 425 if (status & ATH9K_INT_MCI)
375 sc->debug.stats.istats.mci++; 426 sc->debug.stats.istats.mci++;
427 if (status & ATH9K_INT_GENTIMER)
428 sc->debug.stats.istats.gen_timer++;
376} 429}
377 430
378static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, 431static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +471,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
418 PR_IS("DTIM", dtim); 471 PR_IS("DTIM", dtim);
419 PR_IS("TSFOOR", tsfoor); 472 PR_IS("TSFOOR", tsfoor);
420 PR_IS("MCI", mci); 473 PR_IS("MCI", mci);
474 PR_IS("GENTIMER", gen_timer);
421 PR_IS("TOTAL", total); 475 PR_IS("TOTAL", total);
422 476
423 len += snprintf(buf + len, mxlen - len, 477 len += snprintf(buf + len, mxlen - len,
@@ -1598,12 +1652,12 @@ int ath9k_init_debug(struct ath_hw *ah)
1598 debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc, 1652 debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
1599 &fops_samps); 1653 &fops_samps);
1600#endif 1654#endif
1601
1602 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, 1655 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1603 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1656 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1604
1605 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, 1657 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
1606 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); 1658 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1659 debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
1660 sc->debug.debugfs_phy, sc, &fops_ant_diversity);
1607 1661
1608 return 0; 1662 return 0;
1609} 1663}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 8b9d080d89da..2ed9785a38fa 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -41,7 +41,6 @@ enum ath_reset_type {
41 RESET_TYPE_PLL_HANG, 41 RESET_TYPE_PLL_HANG,
42 RESET_TYPE_MAC_HANG, 42 RESET_TYPE_MAC_HANG,
43 RESET_TYPE_BEACON_STUCK, 43 RESET_TYPE_BEACON_STUCK,
44 RESET_TYPE_MCI,
45 __RESET_TYPE_MAX 44 __RESET_TYPE_MAX
46}; 45};
47 46
@@ -74,6 +73,8 @@ enum ath_reset_type {
74 * from a beacon differs from the PCU's internal TSF by more than a 73 * from a beacon differs from the PCU's internal TSF by more than a
75 * (programmable) threshold 74 * (programmable) threshold
76 * @local_timeout: Internal bus timeout. 75 * @local_timeout: Internal bus timeout.
76 * @mci: MCI interrupt, specific to MCI based BTCOEX chipsets
77 * @gen_timer: Generic hardware timer interrupt
77 */ 78 */
78struct ath_interrupt_stats { 79struct ath_interrupt_stats {
79 u32 total; 80 u32 total;
@@ -100,6 +101,7 @@ struct ath_interrupt_stats {
100 u32 bb_watchdog; 101 u32 bb_watchdog;
101 u32 tsfoor; 102 u32 tsfoor;
102 u32 mci; 103 u32 mci;
104 u32 gen_timer;
103 105
104 /* Sync-cause stats */ 106 /* Sync-cause stats */
105 u32 sync_cause_all; 107 u32 sync_cause_all;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 484b31305906..319c651fa6c5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -96,6 +96,7 @@
96 96
97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
99#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
99#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) 100#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
100 101
101#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) 102#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
@@ -108,7 +109,7 @@
108#define EEP_RFSILENT_ENABLED_S 0 109#define EEP_RFSILENT_ENABLED_S 0
109#define EEP_RFSILENT_POLARITY 0x0002 110#define EEP_RFSILENT_POLARITY 0x0002
110#define EEP_RFSILENT_POLARITY_S 1 111#define EEP_RFSILENT_POLARITY_S 1
111#define EEP_RFSILENT_GPIO_SEL (AR_SREV_9462(ah) ? 0x00fc : 0x001c) 112#define EEP_RFSILENT_GPIO_SEL ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00fc : 0x001c)
112#define EEP_RFSILENT_GPIO_SEL_S 2 113#define EEP_RFSILENT_GPIO_SEL_S 2
113 114
114#define AR5416_OPFLAGS_11A 0x01 115#define AR5416_OPFLAGS_11A 0x01
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 9f83f71742a5..d9ed141a053e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -44,25 +44,6 @@ void ath_init_leds(struct ath_softc *sc)
44 if (AR_SREV_9100(sc->sc_ah)) 44 if (AR_SREV_9100(sc->sc_ah))
45 return; 45 return;
46 46
47 if (sc->sc_ah->led_pin < 0) {
48 if (AR_SREV_9287(sc->sc_ah))
49 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
50 else if (AR_SREV_9485(sc->sc_ah))
51 sc->sc_ah->led_pin = ATH_LED_PIN_9485;
52 else if (AR_SREV_9300(sc->sc_ah))
53 sc->sc_ah->led_pin = ATH_LED_PIN_9300;
54 else if (AR_SREV_9462(sc->sc_ah))
55 sc->sc_ah->led_pin = ATH_LED_PIN_9462;
56 else
57 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
58 }
59
60 /* Configure gpio 1 for output */
61 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
62 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
63 /* LED off, active low */
64 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
65
66 if (!led_blink) 47 if (!led_blink)
67 sc->led_cdev.default_trigger = 48 sc->led_cdev.default_trigger =
68 ieee80211_get_radio_led_name(sc->hw); 49 ieee80211_get_radio_led_name(sc->hw);
@@ -78,6 +59,31 @@ void ath_init_leds(struct ath_softc *sc)
78 59
79 sc->led_registered = true; 60 sc->led_registered = true;
80} 61}
62
63void ath_fill_led_pin(struct ath_softc *sc)
64{
65 struct ath_hw *ah = sc->sc_ah;
66
67 if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
68 return;
69
70 if (AR_SREV_9287(ah))
71 ah->led_pin = ATH_LED_PIN_9287;
72 else if (AR_SREV_9485(sc->sc_ah))
73 ah->led_pin = ATH_LED_PIN_9485;
74 else if (AR_SREV_9300(sc->sc_ah))
75 ah->led_pin = ATH_LED_PIN_9300;
76 else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
77 ah->led_pin = ATH_LED_PIN_9462;
78 else
79 ah->led_pin = ATH_LED_PIN_DEF;
80
81 /* Configure gpio 1 for output */
82 ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
83
84 /* LED off, active low */
85 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
86}
81#endif 87#endif
82 88
83/*******************/ 89/*******************/
@@ -228,7 +234,12 @@ static void ath_btcoex_period_timer(unsigned long data)
228 ath9k_hw_btcoex_enable(ah); 234 ath9k_hw_btcoex_enable(ah);
229 spin_unlock_bh(&btcoex->btcoex_lock); 235 spin_unlock_bh(&btcoex->btcoex_lock);
230 236
231 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { 237 /*
238 * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
239 * ensure that we properly convert btcoex_period to usec
240 * for any comparision with (btcoex/btscan_)no_stomp.
241 */
242 if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
232 if (btcoex->hw_timer_enabled) 243 if (btcoex->hw_timer_enabled)
233 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); 244 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
234 245
@@ -309,8 +320,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
309 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); 320 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
310 321
311 /* make sure duty cycle timer is also stopped when resuming */ 322 /* make sure duty cycle timer is also stopped when resuming */
312 if (btcoex->hw_timer_enabled) 323 if (btcoex->hw_timer_enabled) {
313 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); 324 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
325 btcoex->hw_timer_enabled = false;
326 }
314 327
315 btcoex->bt_priority_cnt = 0; 328 btcoex->bt_priority_cnt = 0;
316 btcoex->bt_priority_time = jiffies; 329 btcoex->bt_priority_time = jiffies;
@@ -331,18 +344,20 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
331 344
332 del_timer_sync(&btcoex->period_timer); 345 del_timer_sync(&btcoex->period_timer);
333 346
334 if (btcoex->hw_timer_enabled) 347 if (btcoex->hw_timer_enabled) {
335 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); 348 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
336 349 btcoex->hw_timer_enabled = false;
337 btcoex->hw_timer_enabled = false; 350 }
338} 351}
339 352
340void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc) 353void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
341{ 354{
342 struct ath_btcoex *btcoex = &sc->btcoex; 355 struct ath_btcoex *btcoex = &sc->btcoex;
343 356
344 if (btcoex->hw_timer_enabled) 357 if (btcoex->hw_timer_enabled) {
345 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); 358 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
359 btcoex->hw_timer_enabled = false;
360 }
346} 361}
347 362
348u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen) 363u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -380,7 +395,10 @@ void ath9k_start_btcoex(struct ath_softc *sc)
380 !ah->btcoex_hw.enabled) { 395 !ah->btcoex_hw.enabled) {
381 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) 396 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
382 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 397 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
383 AR_STOMP_LOW_WLAN_WGHT); 398 AR_STOMP_LOW_WLAN_WGHT, 0);
399 else
400 ath9k_hw_btcoex_set_weight(ah, 0, 0,
401 ATH_BTCOEX_STOMP_NONE);
384 ath9k_hw_btcoex_enable(ah); 402 ath9k_hw_btcoex_enable(ah);
385 403
386 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 404 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
@@ -397,7 +415,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
397 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 415 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
398 ath9k_btcoex_timer_pause(sc); 416 ath9k_btcoex_timer_pause(sc);
399 ath9k_hw_btcoex_disable(ah); 417 ath9k_hw_btcoex_disable(ah);
400 if (AR_SREV_9462(ah)) 418 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
401 ath_mci_flush_profile(&sc->btcoex.mci); 419 ath_mci_flush_profile(&sc->btcoex.mci);
402 } 420 }
403} 421}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index aa327adcc3d8..924c4616c3d9 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -973,8 +973,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
973static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) 973static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
974{ 974{
975 int transfer, err; 975 int transfer, err;
976 const void *data = hif_dev->firmware->data; 976 const void *data = hif_dev->fw_data;
977 size_t len = hif_dev->firmware->size; 977 size_t len = hif_dev->fw_size;
978 u32 addr = AR9271_FIRMWARE; 978 u32 addr = AR9271_FIRMWARE;
979 u8 *buf = kzalloc(4096, GFP_KERNEL); 979 u8 *buf = kzalloc(4096, GFP_KERNEL);
980 u32 firm_offset; 980 u32 firm_offset;
@@ -1017,7 +1017,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
1017 return -EIO; 1017 return -EIO;
1018 1018
1019 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n", 1019 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
1020 hif_dev->fw_name, (unsigned long) hif_dev->firmware->size); 1020 hif_dev->fw_name, (unsigned long) hif_dev->fw_size);
1021 1021
1022 return 0; 1022 return 0;
1023} 1023}
@@ -1072,14 +1072,15 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
1072 */ 1072 */
1073static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev) 1073static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
1074{ 1074{
1075 struct device *parent = hif_dev->udev->dev.parent; 1075 struct device *dev = &hif_dev->udev->dev;
1076 struct device *parent = dev->parent;
1076 1077
1077 complete(&hif_dev->fw_done); 1078 complete(&hif_dev->fw_done);
1078 1079
1079 if (parent) 1080 if (parent)
1080 device_lock(parent); 1081 device_lock(parent);
1081 1082
1082 device_release_driver(&hif_dev->udev->dev); 1083 device_release_driver(dev);
1083 1084
1084 if (parent) 1085 if (parent)
1085 device_unlock(parent); 1086 device_unlock(parent);
@@ -1099,11 +1100,11 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
1099 1100
1100 hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb, 1101 hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
1101 &hif_dev->udev->dev); 1102 &hif_dev->udev->dev);
1102 if (hif_dev->htc_handle == NULL) { 1103 if (hif_dev->htc_handle == NULL)
1103 goto err_fw; 1104 goto err_dev_alloc;
1104 }
1105 1105
1106 hif_dev->firmware = fw; 1106 hif_dev->fw_data = fw->data;
1107 hif_dev->fw_size = fw->size;
1107 1108
1108 /* Proceed with initialization */ 1109 /* Proceed with initialization */
1109 1110
@@ -1121,6 +1122,8 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
1121 goto err_htc_hw_init; 1122 goto err_htc_hw_init;
1122 } 1123 }
1123 1124
1125 release_firmware(fw);
1126 hif_dev->flags |= HIF_USB_READY;
1124 complete(&hif_dev->fw_done); 1127 complete(&hif_dev->fw_done);
1125 1128
1126 return; 1129 return;
@@ -1129,8 +1132,8 @@ err_htc_hw_init:
1129 ath9k_hif_usb_dev_deinit(hif_dev); 1132 ath9k_hif_usb_dev_deinit(hif_dev);
1130err_dev_init: 1133err_dev_init:
1131 ath9k_htc_hw_free(hif_dev->htc_handle); 1134 ath9k_htc_hw_free(hif_dev->htc_handle);
1135err_dev_alloc:
1132 release_firmware(fw); 1136 release_firmware(fw);
1133 hif_dev->firmware = NULL;
1134err_fw: 1137err_fw:
1135 ath9k_hif_usb_firmware_fail(hif_dev); 1138 ath9k_hif_usb_firmware_fail(hif_dev);
1136} 1139}
@@ -1277,11 +1280,10 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1277 1280
1278 wait_for_completion(&hif_dev->fw_done); 1281 wait_for_completion(&hif_dev->fw_done);
1279 1282
1280 if (hif_dev->firmware) { 1283 if (hif_dev->flags & HIF_USB_READY) {
1281 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); 1284 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
1282 ath9k_htc_hw_free(hif_dev->htc_handle); 1285 ath9k_htc_hw_free(hif_dev->htc_handle);
1283 ath9k_hif_usb_dev_deinit(hif_dev); 1286 ath9k_hif_usb_dev_deinit(hif_dev);
1284 release_firmware(hif_dev->firmware);
1285 } 1287 }
1286 1288
1287 usb_set_intfdata(interface, NULL); 1289 usb_set_intfdata(interface, NULL);
@@ -1317,13 +1319,23 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1317 struct hif_device_usb *hif_dev = usb_get_intfdata(interface); 1319 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1318 struct htc_target *htc_handle = hif_dev->htc_handle; 1320 struct htc_target *htc_handle = hif_dev->htc_handle;
1319 int ret; 1321 int ret;
1322 const struct firmware *fw;
1320 1323
1321 ret = ath9k_hif_usb_alloc_urbs(hif_dev); 1324 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
1322 if (ret) 1325 if (ret)
1323 return ret; 1326 return ret;
1324 1327
1325 if (hif_dev->firmware) { 1328 if (hif_dev->flags & HIF_USB_READY) {
1329 /* request cached firmware during suspend/resume cycle */
1330 ret = request_firmware(&fw, hif_dev->fw_name,
1331 &hif_dev->udev->dev);
1332 if (ret)
1333 goto fail_resume;
1334
1335 hif_dev->fw_data = fw->data;
1336 hif_dev->fw_size = fw->size;
1326 ret = ath9k_hif_usb_download_fw(hif_dev); 1337 ret = ath9k_hif_usb_download_fw(hif_dev);
1338 release_firmware(fw);
1327 if (ret) 1339 if (ret)
1328 goto fail_resume; 1340 goto fail_resume;
1329 } else { 1341 } else {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 487ff658b4c1..51496e74b83e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -85,12 +85,14 @@ struct cmd_buf {
85}; 85};
86 86
87#define HIF_USB_START BIT(0) 87#define HIF_USB_START BIT(0)
88#define HIF_USB_READY BIT(1)
88 89
89struct hif_device_usb { 90struct hif_device_usb {
90 struct usb_device *udev; 91 struct usb_device *udev;
91 struct usb_interface *interface; 92 struct usb_interface *interface;
92 const struct usb_device_id *usb_device_id; 93 const struct usb_device_id *usb_device_id;
93 const struct firmware *firmware; 94 const void *fw_data;
95 size_t fw_size;
94 struct completion fw_done; 96 struct completion fw_done;
95 struct htc_target *htc_handle; 97 struct htc_target *htc_handle;
96 struct hif_usb_tx tx; 98 struct hif_usb_tx tx;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 936e920fb88e..b30596fcf73a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
542 542
543int ath9k_tx_init(struct ath9k_htc_priv *priv); 543int ath9k_tx_init(struct ath9k_htc_priv *priv);
544int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, 544int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
545 struct ieee80211_sta *sta,
545 struct sk_buff *skb, u8 slot, bool is_cab); 546 struct sk_buff *skb, u8 slot, bool is_cab);
546void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); 547void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
547bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype); 548bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 77d541feb910..f42d2eb6af99 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
326 goto next; 326 goto next;
327 } 327 }
328 328
329 ret = ath9k_htc_tx_start(priv, skb, tx_slot, true); 329 ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
330 if (ret != 0) { 330 if (ret != 0) {
331 ath9k_htc_tx_clear_slot(priv, tx_slot); 331 ath9k_htc_tx_clear_slot(priv, tx_slot);
332 dev_kfree_skb_any(skb); 332 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 07df279c8d46..0eacfc13c915 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -161,7 +161,7 @@ void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
161 161
162 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) { 162 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
163 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 163 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
164 AR_STOMP_LOW_WLAN_WGHT); 164 AR_STOMP_LOW_WLAN_WGHT, 0);
165 ath9k_hw_btcoex_enable(ah); 165 ath9k_hw_btcoex_enable(ah);
166 ath_htc_resume_btcoex_work(priv); 166 ath_htc_resume_btcoex_work(priv);
167 } 167 }
@@ -173,17 +173,26 @@ void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
173 173
174 if (ah->btcoex_hw.enabled && 174 if (ah->btcoex_hw.enabled &&
175 ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 175 ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
176 ath9k_hw_btcoex_disable(ah);
177 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 176 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
178 ath_htc_cancel_btcoex_work(priv); 177 ath_htc_cancel_btcoex_work(priv);
178 ath9k_hw_btcoex_disable(ah);
179 } 179 }
180} 180}
181 181
182void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product) 182void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
183{ 183{
184 struct ath_hw *ah = priv->ah; 184 struct ath_hw *ah = priv->ah;
185 struct ath_common *common = ath9k_hw_common(ah);
185 int qnum; 186 int qnum;
186 187
188 /*
189 * Check if BTCOEX is globally disabled.
190 */
191 if (!common->btcoex_enabled) {
192 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_NONE;
193 return;
194 }
195
187 if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) { 196 if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
188 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE; 197 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
189 } 198 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a035a380d669..d98255eb1b9a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -30,6 +30,10 @@ int htc_modparam_nohwcrypt;
30module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444); 30module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
31MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 31MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
32 32
33static int ath9k_htc_btcoex_enable;
34module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
35MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
36
33#define CHAN2G(_freq, _idx) { \ 37#define CHAN2G(_freq, _idx) { \
34 .center_freq = (_freq), \ 38 .center_freq = (_freq), \
35 .hw_value = (_idx), \ 39 .hw_value = (_idx), \
@@ -635,6 +639,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
635 common->hw = priv->hw; 639 common->hw = priv->hw;
636 common->priv = priv; 640 common->priv = priv;
637 common->debug_mask = ath9k_debug; 641 common->debug_mask = ath9k_debug;
642 common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
638 643
639 spin_lock_init(&priv->beacon_lock); 644 spin_lock_init(&priv->beacon_lock);
640 spin_lock_init(&priv->tx.tx_lock); 645 spin_lock_init(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c785129692ff..ca78e33ca23e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -489,24 +489,20 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
489 ista = (struct ath9k_htc_sta *) sta->drv_priv; 489 ista = (struct ath9k_htc_sta *) sta->drv_priv;
490 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN); 490 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
491 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN); 491 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
492 tsta.is_vif_sta = 0;
493 ista->index = sta_idx; 492 ista->index = sta_idx;
493 tsta.is_vif_sta = 0;
494 maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
495 sta->ht_cap.ampdu_factor);
496 tsta.maxampdu = cpu_to_be16(maxampdu);
494 } else { 497 } else {
495 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); 498 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
496 tsta.is_vif_sta = 1; 499 tsta.is_vif_sta = 1;
500 tsta.maxampdu = cpu_to_be16(0xffff);
497 } 501 }
498 502
499 tsta.sta_index = sta_idx; 503 tsta.sta_index = sta_idx;
500 tsta.vif_index = avp->index; 504 tsta.vif_index = avp->index;
501 505
502 if (!sta) {
503 tsta.maxampdu = cpu_to_be16(0xffff);
504 } else {
505 maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
506 sta->ht_cap.ampdu_factor);
507 tsta.maxampdu = cpu_to_be16(maxampdu);
508 }
509
510 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 506 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
511 if (ret) { 507 if (ret) {
512 if (sta) 508 if (sta)
@@ -856,7 +852,9 @@ set_timer:
856/* mac80211 Callbacks */ 852/* mac80211 Callbacks */
857/**********************/ 853/**********************/
858 854
859static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 855static void ath9k_htc_tx(struct ieee80211_hw *hw,
856 struct ieee80211_tx_control *control,
857 struct sk_buff *skb)
860{ 858{
861 struct ieee80211_hdr *hdr; 859 struct ieee80211_hdr *hdr;
862 struct ath9k_htc_priv *priv = hw->priv; 860 struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +881,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
883 goto fail_tx; 881 goto fail_tx;
884 } 882 }
885 883
886 ret = ath9k_htc_tx_start(priv, skb, slot, false); 884 ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
887 if (ret != 0) { 885 if (ret != 0) {
888 ath_dbg(common, XMIT, "Tx failed\n"); 886 ath_dbg(common, XMIT, "Tx failed\n");
889 goto clear_slot; 887 goto clear_slot;
@@ -1331,6 +1329,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1331 return ret; 1329 return ret;
1332} 1330}
1333 1331
1332static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
1333 struct ieee80211_vif *vif,
1334 struct ieee80211_sta *sta, u32 changed)
1335{
1336 struct ath9k_htc_priv *priv = hw->priv;
1337 struct ath_common *common = ath9k_hw_common(priv->ah);
1338 struct ath9k_htc_target_rate trate;
1339
1340 mutex_lock(&priv->mutex);
1341 ath9k_htc_ps_wakeup(priv);
1342
1343 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
1344 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
1345 ath9k_htc_setup_rate(priv, sta, &trate);
1346 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1347 ath_dbg(common, CONFIG,
1348 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1349 sta->addr, be32_to_cpu(trate.capflags));
1350 else
1351 ath_dbg(common, CONFIG,
1352 "Unable to update supported rates for sta: %pM\n",
1353 sta->addr);
1354 }
1355
1356 ath9k_htc_ps_restore(priv);
1357 mutex_unlock(&priv->mutex);
1358}
1359
1334static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, 1360static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
1335 struct ieee80211_vif *vif, u16 queue, 1361 struct ieee80211_vif *vif, u16 queue,
1336 const struct ieee80211_tx_queue_params *params) 1362 const struct ieee80211_tx_queue_params *params)
@@ -1419,7 +1445,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1419 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1445 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1420 if (priv->ah->sw_mgmt_crypto && 1446 if (priv->ah->sw_mgmt_crypto &&
1421 key->cipher == WLAN_CIPHER_SUITE_CCMP) 1447 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1422 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1448 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1423 ret = 0; 1449 ret = 0;
1424 } 1450 }
1425 break; 1451 break;
@@ -1758,6 +1784,7 @@ struct ieee80211_ops ath9k_htc_ops = {
1758 .sta_add = ath9k_htc_sta_add, 1784 .sta_add = ath9k_htc_sta_add,
1759 .sta_remove = ath9k_htc_sta_remove, 1785 .sta_remove = ath9k_htc_sta_remove,
1760 .conf_tx = ath9k_htc_conf_tx, 1786 .conf_tx = ath9k_htc_conf_tx,
1787 .sta_rc_update = ath9k_htc_sta_rc_update,
1761 .bss_info_changed = ath9k_htc_bss_info_changed, 1788 .bss_info_changed = ath9k_htc_bss_info_changed,
1762 .set_key = ath9k_htc_set_key, 1789 .set_key = ath9k_htc_set_key,
1763 .get_tsf = ath9k_htc_get_tsf, 1790 .get_tsf = ath9k_htc_get_tsf,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 47e61d0da33b..06cdcb772d78 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
333} 333}
334 334
335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, 335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
336 struct ieee80211_sta *sta,
336 struct sk_buff *skb, 337 struct sk_buff *skb,
337 u8 slot, bool is_cab) 338 u8 slot, bool is_cab)
338{ 339{
339 struct ieee80211_hdr *hdr; 340 struct ieee80211_hdr *hdr;
340 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 341 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
341 struct ieee80211_sta *sta = tx_info->control.sta;
342 struct ieee80211_vif *vif = tx_info->control.vif; 342 struct ieee80211_vif *vif = tx_info->control.vif;
343 struct ath9k_htc_sta *ista; 343 struct ath9k_htc_sta *ista;
344 struct ath9k_htc_vif *avp = NULL; 344 struct ath9k_htc_vif *avp = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 265bf77598a2..0f2b97f6b739 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,13 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf); 78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
79} 79}
80 80
81static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
82 bool enable)
83{
84 if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
85 ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
86}
87
81/* Private hardware call ops */ 88/* Private hardware call ops */
82 89
83/* PHY ops */ 90/* PHY ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4faf0a395876..f9a6ec5cf470 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -24,6 +24,7 @@
24#include "rc.h" 24#include "rc.h"
25#include "ar9003_mac.h" 25#include "ar9003_mac.h"
26#include "ar9003_mci.h" 26#include "ar9003_mci.h"
27#include "ar9003_phy.h"
27#include "debug.h" 28#include "debug.h"
28#include "ath9k.h" 29#include "ath9k.h"
29 30
@@ -355,7 +356,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
355 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; 356 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
356 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); 357 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
357 358
358 if (AR_SREV_9462(ah)) 359 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
359 ah->is_pciexpress = true; 360 ah->is_pciexpress = true;
360 else 361 else
361 ah->is_pciexpress = (val & 362 ah->is_pciexpress = (val &
@@ -602,6 +603,11 @@ static int __ath9k_hw_init(struct ath_hw *ah)
602 if (AR_SREV_9462(ah)) 603 if (AR_SREV_9462(ah))
603 ah->WARegVal &= ~AR_WA_D3_L1_DISABLE; 604 ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
604 605
606 if (AR_SREV_9565(ah)) {
607 ah->WARegVal |= AR_WA_BIT22;
608 REG_WRITE(ah, AR_WA, ah->WARegVal);
609 }
610
605 ath9k_hw_init_defaults(ah); 611 ath9k_hw_init_defaults(ah);
606 ath9k_hw_init_config(ah); 612 ath9k_hw_init_config(ah);
607 613
@@ -647,6 +653,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
647 case AR_SREV_VERSION_9340: 653 case AR_SREV_VERSION_9340:
648 case AR_SREV_VERSION_9462: 654 case AR_SREV_VERSION_9462:
649 case AR_SREV_VERSION_9550: 655 case AR_SREV_VERSION_9550:
656 case AR_SREV_VERSION_9565:
650 break; 657 break;
651 default: 658 default:
652 ath_err(common, 659 ath_err(common,
@@ -708,7 +715,7 @@ int ath9k_hw_init(struct ath_hw *ah)
708 int ret; 715 int ret;
709 struct ath_common *common = ath9k_hw_common(ah); 716 struct ath_common *common = ath9k_hw_common(ah);
710 717
711 /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ 718 /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */
712 switch (ah->hw_version.devid) { 719 switch (ah->hw_version.devid) {
713 case AR5416_DEVID_PCI: 720 case AR5416_DEVID_PCI:
714 case AR5416_DEVID_PCIE: 721 case AR5416_DEVID_PCIE:
@@ -728,6 +735,7 @@ int ath9k_hw_init(struct ath_hw *ah)
728 case AR9300_DEVID_AR9580: 735 case AR9300_DEVID_AR9580:
729 case AR9300_DEVID_AR9462: 736 case AR9300_DEVID_AR9462:
730 case AR9485_DEVID_AR1111: 737 case AR9485_DEVID_AR1111:
738 case AR9300_DEVID_AR9565:
731 break; 739 break;
732 default: 740 default:
733 if (common->bus_ops->ath_bus_type == ATH_USB) 741 if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -800,8 +808,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
800{ 808{
801 u32 pll; 809 u32 pll;
802 810
803 if (AR_SREV_9485(ah)) { 811 if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
804
805 /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ 812 /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
806 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 813 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
807 AR_CH0_BB_DPLL2_PLL_PWD, 0x1); 814 AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
@@ -912,7 +919,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
912 } 919 }
913 920
914 pll = ath9k_hw_compute_pll_control(ah, chan); 921 pll = ath9k_hw_compute_pll_control(ah, chan);
915 922 if (AR_SREV_9565(ah))
923 pll |= 0x40000;
916 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 924 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
917 925
918 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 926 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
@@ -1726,12 +1734,12 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1726 if (!ret) 1734 if (!ret)
1727 goto fail; 1735 goto fail;
1728 1736
1729 ath9k_hw_loadnf(ah, ah->curchan);
1730 ath9k_hw_start_nfcal(ah, true);
1731
1732 if (ath9k_hw_mci_is_enabled(ah)) 1737 if (ath9k_hw_mci_is_enabled(ah))
1733 ar9003_mci_2g5g_switch(ah, false); 1738 ar9003_mci_2g5g_switch(ah, false);
1734 1739
1740 ath9k_hw_loadnf(ah, ah->curchan);
1741 ath9k_hw_start_nfcal(ah, true);
1742
1735 if (AR_SREV_9271(ah)) 1743 if (AR_SREV_9271(ah))
1736 ar9002_hw_load_ani_reg(ah, chan); 1744 ar9002_hw_load_ani_reg(ah, chan);
1737 1745
@@ -2018,6 +2026,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2018 2026
2019 ath9k_hw_apply_gpio_override(ah); 2027 ath9k_hw_apply_gpio_override(ah);
2020 2028
2029 if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
2030 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
2031
2021 return 0; 2032 return 0;
2022} 2033}
2023EXPORT_SYMBOL(ath9k_hw_reset); 2034EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2034,7 +2045,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
2034{ 2045{
2035 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2046 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2036 2047
2037 if (AR_SREV_9462(ah)) { 2048 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
2038 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff); 2049 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
2039 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff); 2050 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
2040 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff); 2051 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
@@ -2401,7 +2412,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2401 if (eeval & AR5416_OPFLAGS_11G) 2412 if (eeval & AR5416_OPFLAGS_11G)
2402 pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; 2413 pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
2403 2414
2404 if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) 2415 if (AR_SREV_9485(ah) ||
2416 AR_SREV_9285(ah) ||
2417 AR_SREV_9330(ah) ||
2418 AR_SREV_9565(ah))
2405 chip_chainmask = 1; 2419 chip_chainmask = 1;
2406 else if (AR_SREV_9462(ah)) 2420 else if (AR_SREV_9462(ah))
2407 chip_chainmask = 3; 2421 chip_chainmask = 3;
@@ -2489,7 +2503,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2489 2503
2490 if (AR_SREV_9300_20_OR_LATER(ah)) { 2504 if (AR_SREV_9300_20_OR_LATER(ah)) {
2491 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; 2505 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
2492 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah)) 2506 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah))
2493 pCap->hw_caps |= ATH9K_HW_CAP_LDPC; 2507 pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
2494 2508
2495 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; 2509 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2525,7 +2539,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2525 } 2539 }
2526 2540
2527 2541
2528 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { 2542 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
2529 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 2543 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
2530 /* 2544 /*
2531 * enable the diversity-combining algorithm only when 2545 * enable the diversity-combining algorithm only when
@@ -2568,14 +2582,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2568 ah->enabled_cals |= TX_IQ_ON_AGC_CAL; 2582 ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
2569 } 2583 }
2570 2584
2571 if (AR_SREV_9462(ah)) { 2585 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
2572
2573 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) 2586 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
2574 pCap->hw_caps |= ATH9K_HW_CAP_MCI; 2587 pCap->hw_caps |= ATH9K_HW_CAP_MCI;
2575 2588
2576 if (AR_SREV_9462_20(ah)) 2589 if (AR_SREV_9462_20(ah))
2577 pCap->hw_caps |= ATH9K_HW_CAP_RTT; 2590 pCap->hw_caps |= ATH9K_HW_CAP_RTT;
2578
2579 } 2591 }
2580 2592
2581 2593
@@ -2741,7 +2753,7 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
2741 2753
2742 ENABLE_REGWRITE_BUFFER(ah); 2754 ENABLE_REGWRITE_BUFFER(ah);
2743 2755
2744 if (AR_SREV_9462(ah)) 2756 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
2745 bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER; 2757 bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
2746 2758
2747 REG_WRITE(ah, AR_RX_FILTER, bits); 2759 REG_WRITE(ah, AR_RX_FILTER, bits);
@@ -3038,7 +3050,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
3038 REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, 3050 REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
3039 gen_tmr_configuration[timer->index].mode_mask); 3051 gen_tmr_configuration[timer->index].mode_mask);
3040 3052
3041 if (AR_SREV_9462(ah)) { 3053 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3042 /* 3054 /*
3043 * Starting from AR9462, each generic timer can select which tsf 3055 * Starting from AR9462, each generic timer can select which tsf
3044 * to use. But we still follow the old rule, 0 - 7 use tsf and 3056 * to use. But we still follow the old rule, 0 - 7 use tsf and
@@ -3072,6 +3084,16 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
3072 REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, 3084 REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
3073 gen_tmr_configuration[timer->index].mode_mask); 3085 gen_tmr_configuration[timer->index].mode_mask);
3074 3086
3087 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3088 /*
3089 * Need to switch back to TSF if it was using TSF2.
3090 */
3091 if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) {
3092 REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
3093 (1 << timer->index));
3094 }
3095 }
3096
3075 /* Disable both trigger and thresh interrupt masks */ 3097 /* Disable both trigger and thresh interrupt masks */
3076 REG_CLR_BIT(ah, AR_IMR_S5, 3098 REG_CLR_BIT(ah, AR_IMR_S5,
3077 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | 3099 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
@@ -3153,6 +3175,7 @@ static struct {
3153 { AR_SREV_VERSION_9485, "9485" }, 3175 { AR_SREV_VERSION_9485, "9485" },
3154 { AR_SREV_VERSION_9462, "9462" }, 3176 { AR_SREV_VERSION_9462, "9462" },
3155 { AR_SREV_VERSION_9550, "9550" }, 3177 { AR_SREV_VERSION_9550, "9550" },
3178 { AR_SREV_VERSION_9565, "9565" },
3156}; 3179};
3157 3180
3158/* For devices with external radios */ 3181/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index de6968fc64f4..566a4ce4f156 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -50,6 +50,7 @@
50#define AR9300_DEVID_AR9330 0x0035 50#define AR9300_DEVID_AR9330 0x0035
51#define AR9300_DEVID_QCA955X 0x0038 51#define AR9300_DEVID_QCA955X 0x0038
52#define AR9485_DEVID_AR1111 0x0037 52#define AR9485_DEVID_AR1111 0x0037
53#define AR9300_DEVID_AR9565 0x0036
53 54
54#define AR5416_AR9100_DEVID 0x000b 55#define AR5416_AR9100_DEVID 0x000b
55 56
@@ -685,7 +686,7 @@ struct ath_hw_ops {
685 struct ath_hw_antcomb_conf *antconf); 686 struct ath_hw_antcomb_conf *antconf);
686 void (*antdiv_comb_conf_set)(struct ath_hw *ah, 687 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
687 struct ath_hw_antcomb_conf *antconf); 688 struct ath_hw_antcomb_conf *antconf);
688 689 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
689}; 690};
690 691
691struct ath_nf_limits { 692struct ath_nf_limits {
@@ -729,6 +730,7 @@ struct ath_hw {
729 bool aspm_enabled; 730 bool aspm_enabled;
730 bool is_monitoring; 731 bool is_monitoring;
731 bool need_an_top2_fixup; 732 bool need_an_top2_fixup;
733 bool shared_chain_lnadiv;
732 u16 tx_trig_level; 734 u16 tx_trig_level;
733 735
734 u32 nf_regs[6]; 736 u32 nf_regs[6];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f33712140fa5..fad3ccd5cd91 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -46,6 +46,10 @@ static int ath9k_btcoex_enable;
46module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 46module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
47MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 47MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
48 48
49static int ath9k_enable_diversity;
50module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
51MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
52
49bool is_ath9k_unloaded; 53bool is_ath9k_unloaded;
50/* We use the hw_value as an index into our private channel structure */ 54/* We use the hw_value as an index into our private channel structure */
51 55
@@ -258,7 +262,7 @@ static void setup_ht_cap(struct ath_softc *sc,
258 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 262 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
259 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 263 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
260 264
261 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) 265 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
262 max_streams = 1; 266 max_streams = 1;
263 else if (AR_SREV_9462(ah)) 267 else if (AR_SREV_9462(ah))
264 max_streams = 2; 268 max_streams = 2;
@@ -546,6 +550,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
546 common->debug_mask = ath9k_debug; 550 common->debug_mask = ath9k_debug;
547 common->btcoex_enabled = ath9k_btcoex_enable == 1; 551 common->btcoex_enabled = ath9k_btcoex_enable == 1;
548 common->disable_ani = false; 552 common->disable_ani = false;
553
554 /*
555 * Enable Antenna diversity only when BTCOEX is disabled
556 * and the user manually requests the feature.
557 */
558 if (!common->btcoex_enabled && ath9k_enable_diversity)
559 common->antenna_diversity = 1;
560
549 spin_lock_init(&common->cc_lock); 561 spin_lock_init(&common->cc_lock);
550 562
551 spin_lock_init(&sc->sc_serial_rw); 563 spin_lock_init(&sc->sc_serial_rw);
@@ -597,6 +609,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
597 609
598 ath9k_cmn_init_crypto(sc->sc_ah); 610 ath9k_cmn_init_crypto(sc->sc_ah);
599 ath9k_init_misc(sc); 611 ath9k_init_misc(sc);
612 ath_fill_led_pin(sc);
600 613
601 if (common->bus_ops->aspm_init) 614 if (common->bus_ops->aspm_init)
602 common->bus_ops->aspm_init(common); 615 common->bus_ops->aspm_init(common);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a22df749b8db..31ab82e3ba85 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -696,7 +696,9 @@ mutex_unlock:
696 return r; 696 return r;
697} 697}
698 698
699static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 699static void ath9k_tx(struct ieee80211_hw *hw,
700 struct ieee80211_tx_control *control,
701 struct sk_buff *skb)
700{ 702{
701 struct ath_softc *sc = hw->priv; 703 struct ath_softc *sc = hw->priv;
702 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 704 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
756 758
757 memset(&txctl, 0, sizeof(struct ath_tx_control)); 759 memset(&txctl, 0, sizeof(struct ath_tx_control));
758 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 760 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
761 txctl.sta = control->sta;
759 762
760 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); 763 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
761 764
@@ -983,47 +986,21 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
983 struct ath_softc *sc = hw->priv; 986 struct ath_softc *sc = hw->priv;
984 struct ath_hw *ah = sc->sc_ah; 987 struct ath_hw *ah = sc->sc_ah;
985 struct ath_common *common = ath9k_hw_common(ah); 988 struct ath_common *common = ath9k_hw_common(ah);
986 int ret = 0;
987 989
988 ath9k_ps_wakeup(sc);
989 mutex_lock(&sc->mutex); 990 mutex_lock(&sc->mutex);
990 991
991 switch (vif->type) {
992 case NL80211_IFTYPE_STATION:
993 case NL80211_IFTYPE_WDS:
994 case NL80211_IFTYPE_ADHOC:
995 case NL80211_IFTYPE_AP:
996 case NL80211_IFTYPE_MESH_POINT:
997 break;
998 default:
999 ath_err(common, "Interface type %d not yet supported\n",
1000 vif->type);
1001 ret = -EOPNOTSUPP;
1002 goto out;
1003 }
1004
1005 if (ath9k_uses_beacons(vif->type)) {
1006 if (sc->nbcnvifs >= ATH_BCBUF) {
1007 ath_err(common, "Not enough beacon buffers when adding"
1008 " new interface of type: %i\n",
1009 vif->type);
1010 ret = -ENOBUFS;
1011 goto out;
1012 }
1013 }
1014
1015 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 992 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1016
1017 sc->nvifs++; 993 sc->nvifs++;
1018 994
995 ath9k_ps_wakeup(sc);
1019 ath9k_calculate_summary_state(hw, vif); 996 ath9k_calculate_summary_state(hw, vif);
997 ath9k_ps_restore(sc);
998
1020 if (ath9k_uses_beacons(vif->type)) 999 if (ath9k_uses_beacons(vif->type))
1021 ath9k_beacon_assign_slot(sc, vif); 1000 ath9k_beacon_assign_slot(sc, vif);
1022 1001
1023out:
1024 mutex_unlock(&sc->mutex); 1002 mutex_unlock(&sc->mutex);
1025 ath9k_ps_restore(sc); 1003 return 0;
1026 return ret;
1027} 1004}
1028 1005
1029static int ath9k_change_interface(struct ieee80211_hw *hw, 1006static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1033,21 +1010,9 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1033{ 1010{
1034 struct ath_softc *sc = hw->priv; 1011 struct ath_softc *sc = hw->priv;
1035 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1012 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1036 int ret = 0;
1037 1013
1038 ath_dbg(common, CONFIG, "Change Interface\n"); 1014 ath_dbg(common, CONFIG, "Change Interface\n");
1039
1040 mutex_lock(&sc->mutex); 1015 mutex_lock(&sc->mutex);
1041 ath9k_ps_wakeup(sc);
1042
1043 if (ath9k_uses_beacons(new_type) &&
1044 !ath9k_uses_beacons(vif->type)) {
1045 if (sc->nbcnvifs >= ATH_BCBUF) {
1046 ath_err(common, "No beacon slot available\n");
1047 ret = -ENOBUFS;
1048 goto out;
1049 }
1050 }
1051 1016
1052 if (ath9k_uses_beacons(vif->type)) 1017 if (ath9k_uses_beacons(vif->type))
1053 ath9k_beacon_remove_slot(sc, vif); 1018 ath9k_beacon_remove_slot(sc, vif);
@@ -1055,14 +1020,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1055 vif->type = new_type; 1020 vif->type = new_type;
1056 vif->p2p = p2p; 1021 vif->p2p = p2p;
1057 1022
1023 ath9k_ps_wakeup(sc);
1058 ath9k_calculate_summary_state(hw, vif); 1024 ath9k_calculate_summary_state(hw, vif);
1025 ath9k_ps_restore(sc);
1026
1059 if (ath9k_uses_beacons(vif->type)) 1027 if (ath9k_uses_beacons(vif->type))
1060 ath9k_beacon_assign_slot(sc, vif); 1028 ath9k_beacon_assign_slot(sc, vif);
1061 1029
1062out:
1063 ath9k_ps_restore(sc);
1064 mutex_unlock(&sc->mutex); 1030 mutex_unlock(&sc->mutex);
1065 return ret; 1031 return 0;
1066} 1032}
1067 1033
1068static void ath9k_remove_interface(struct ieee80211_hw *hw, 1034static void ath9k_remove_interface(struct ieee80211_hw *hw,
@@ -1073,7 +1039,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1073 1039
1074 ath_dbg(common, CONFIG, "Detach Interface\n"); 1040 ath_dbg(common, CONFIG, "Detach Interface\n");
1075 1041
1076 ath9k_ps_wakeup(sc);
1077 mutex_lock(&sc->mutex); 1042 mutex_lock(&sc->mutex);
1078 1043
1079 sc->nvifs--; 1044 sc->nvifs--;
@@ -1081,10 +1046,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1081 if (ath9k_uses_beacons(vif->type)) 1046 if (ath9k_uses_beacons(vif->type))
1082 ath9k_beacon_remove_slot(sc, vif); 1047 ath9k_beacon_remove_slot(sc, vif);
1083 1048
1049 ath9k_ps_wakeup(sc);
1084 ath9k_calculate_summary_state(hw, NULL); 1050 ath9k_calculate_summary_state(hw, NULL);
1051 ath9k_ps_restore(sc);
1085 1052
1086 mutex_unlock(&sc->mutex); 1053 mutex_unlock(&sc->mutex);
1087 ath9k_ps_restore(sc);
1088} 1054}
1089 1055
1090static void ath9k_enable_ps(struct ath_softc *sc) 1056static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1440,7 +1406,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1440 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1406 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1441 if (sc->sc_ah->sw_mgmt_crypto && 1407 if (sc->sc_ah->sw_mgmt_crypto &&
1442 key->cipher == WLAN_CIPHER_SUITE_CCMP) 1408 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1443 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1409 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1444 ret = 0; 1410 ret = 0;
1445 } 1411 }
1446 break; 1412 break;
@@ -2257,7 +2223,7 @@ static int ath9k_suspend(struct ieee80211_hw *hw,
2257 mutex_lock(&sc->mutex); 2223 mutex_lock(&sc->mutex);
2258 2224
2259 ath_cancel_work(sc); 2225 ath_cancel_work(sc);
2260 del_timer_sync(&common->ani.timer); 2226 ath_stop_ani(sc);
2261 del_timer_sync(&sc->rx_poll_timer); 2227 del_timer_sync(&sc->rx_poll_timer);
2262 2228
2263 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) { 2229 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index fb536e7e661b..ec2d7c807567 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -80,6 +80,7 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
80 struct ath_mci_profile_info *info, *tinfo; 80 struct ath_mci_profile_info *info, *tinfo;
81 81
82 mci->aggr_limit = 0; 82 mci->aggr_limit = 0;
83 mci->num_mgmt = 0;
83 84
84 if (list_empty(&mci->info)) 85 if (list_empty(&mci->info))
85 return; 86 return;
@@ -120,7 +121,14 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
120 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) 121 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
121 goto skip_tuning; 122 goto skip_tuning;
122 123
124 mci->aggr_limit = 0;
123 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile]; 125 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
126 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
127 if (NUM_PROF(mci))
128 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
129 else
130 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
131 ATH_BTCOEX_STOMP_LOW;
124 132
125 if (num_profile == 1) { 133 if (num_profile == 1) {
126 info = list_first_entry(&mci->info, 134 info = list_first_entry(&mci->info,
@@ -132,7 +140,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
132 else if (info->T == 6) { 140 else if (info->T == 6) {
133 mci->aggr_limit = 6; 141 mci->aggr_limit = 6;
134 btcoex->duty_cycle = 30; 142 btcoex->duty_cycle = 30;
135 } 143 } else
144 mci->aggr_limit = 6;
136 ath_dbg(common, MCI, 145 ath_dbg(common, MCI,
137 "Single SCO, aggregation limit %d 1/4 ms\n", 146 "Single SCO, aggregation limit %d 1/4 ms\n",
138 mci->aggr_limit); 147 mci->aggr_limit);
@@ -191,6 +200,23 @@ skip_tuning:
191 ath9k_btcoex_timer_resume(sc); 200 ath9k_btcoex_timer_resume(sc);
192} 201}
193 202
203static void ath_mci_wait_btcal_done(struct ath_softc *sc)
204{
205 struct ath_hw *ah = sc->sc_ah;
206
207 /* Stop tx & rx */
208 ieee80211_stop_queues(sc->hw);
209 ath_stoprecv(sc);
210 ath_drain_all_txq(sc, false);
211
212 /* Wait for cal done */
213 ar9003_mci_start_reset(ah, ah->curchan);
214
215 /* Resume tx & rx */
216 ath_startrecv(sc);
217 ieee80211_wake_queues(sc->hw);
218}
219
194static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 220static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
195{ 221{
196 struct ath_hw *ah = sc->sc_ah; 222 struct ath_hw *ah = sc->sc_ah;
@@ -201,8 +227,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
201 switch (opcode) { 227 switch (opcode) {
202 case MCI_GPM_BT_CAL_REQ: 228 case MCI_GPM_BT_CAL_REQ:
203 if (mci_hw->bt_state == MCI_BT_AWAKE) { 229 if (mci_hw->bt_state == MCI_BT_AWAKE) {
204 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START); 230 mci_hw->bt_state = MCI_BT_CAL_START;
205 ath9k_queue_reset(sc, RESET_TYPE_MCI); 231 ath_mci_wait_btcal_done(sc);
206 } 232 }
207 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); 233 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
208 break; 234 break;
@@ -224,8 +250,8 @@ static void ath9k_mci_work(struct work_struct *work)
224 ath_mci_update_scheme(sc); 250 ath_mci_update_scheme(sc);
225} 251}
226 252
227static void ath_mci_process_profile(struct ath_softc *sc, 253static u8 ath_mci_process_profile(struct ath_softc *sc,
228 struct ath_mci_profile_info *info) 254 struct ath_mci_profile_info *info)
229{ 255{
230 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 256 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
231 struct ath_btcoex *btcoex = &sc->btcoex; 257 struct ath_btcoex *btcoex = &sc->btcoex;
@@ -251,25 +277,15 @@ static void ath_mci_process_profile(struct ath_softc *sc,
251 277
252 if (info->start) { 278 if (info->start) {
253 if (!entry && !ath_mci_add_profile(common, mci, info)) 279 if (!entry && !ath_mci_add_profile(common, mci, info))
254 return; 280 return 0;
255 } else 281 } else
256 ath_mci_del_profile(common, mci, entry); 282 ath_mci_del_profile(common, mci, entry);
257 283
258 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; 284 return 1;
259 mci->aggr_limit = mci->num_sco ? 6 : 0;
260
261 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
262 if (NUM_PROF(mci))
263 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
264 else
265 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
266 ATH_BTCOEX_STOMP_LOW;
267
268 ieee80211_queue_work(sc->hw, &sc->mci_work);
269} 285}
270 286
271static void ath_mci_process_status(struct ath_softc *sc, 287static u8 ath_mci_process_status(struct ath_softc *sc,
272 struct ath_mci_profile_status *status) 288 struct ath_mci_profile_status *status)
273{ 289{
274 struct ath_btcoex *btcoex = &sc->btcoex; 290 struct ath_btcoex *btcoex = &sc->btcoex;
275 struct ath_mci_profile *mci = &btcoex->mci; 291 struct ath_mci_profile *mci = &btcoex->mci;
@@ -278,14 +294,14 @@ static void ath_mci_process_status(struct ath_softc *sc,
278 294
279 /* Link status type are not handled */ 295 /* Link status type are not handled */
280 if (status->is_link) 296 if (status->is_link)
281 return; 297 return 0;
282 298
283 info.conn_handle = status->conn_handle; 299 info.conn_handle = status->conn_handle;
284 if (ath_mci_find_profile(mci, &info)) 300 if (ath_mci_find_profile(mci, &info))
285 return; 301 return 0;
286 302
287 if (status->conn_handle >= ATH_MCI_MAX_PROFILE) 303 if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
288 return; 304 return 0;
289 305
290 if (status->is_critical) 306 if (status->is_critical)
291 __set_bit(status->conn_handle, mci->status); 307 __set_bit(status->conn_handle, mci->status);
@@ -299,7 +315,9 @@ static void ath_mci_process_status(struct ath_softc *sc,
299 } while (++i < ATH_MCI_MAX_PROFILE); 315 } while (++i < ATH_MCI_MAX_PROFILE);
300 316
301 if (old_num_mgmt != mci->num_mgmt) 317 if (old_num_mgmt != mci->num_mgmt)
302 ieee80211_queue_work(sc->hw, &sc->mci_work); 318 return 1;
319
320 return 0;
303} 321}
304 322
305static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 323static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -308,9 +326,16 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
308 struct ath_mci_profile_info profile_info; 326 struct ath_mci_profile_info profile_info;
309 struct ath_mci_profile_status profile_status; 327 struct ath_mci_profile_status profile_status;
310 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 328 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
311 u8 major, minor; 329 u8 major, minor, update_scheme = 0;
312 u32 seq_num; 330 u32 seq_num;
313 331
332 if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
333 ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
334 ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
335 ath_mci_flush_profile(&sc->btcoex.mci);
336 ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
337 }
338
314 switch (opcode) { 339 switch (opcode) {
315 case MCI_GPM_COEX_VERSION_QUERY: 340 case MCI_GPM_COEX_VERSION_QUERY:
316 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION); 341 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
@@ -336,7 +361,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
336 break; 361 break;
337 } 362 }
338 363
339 ath_mci_process_profile(sc, &profile_info); 364 update_scheme += ath_mci_process_profile(sc, &profile_info);
340 break; 365 break;
341 case MCI_GPM_COEX_BT_STATUS_UPDATE: 366 case MCI_GPM_COEX_BT_STATUS_UPDATE:
342 profile_status.is_link = *(rx_payload + 367 profile_status.is_link = *(rx_payload +
@@ -352,12 +377,14 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
352 profile_status.is_link, profile_status.conn_handle, 377 profile_status.is_link, profile_status.conn_handle,
353 profile_status.is_critical, seq_num); 378 profile_status.is_critical, seq_num);
354 379
355 ath_mci_process_status(sc, &profile_status); 380 update_scheme += ath_mci_process_status(sc, &profile_status);
356 break; 381 break;
357 default: 382 default:
358 ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode); 383 ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
359 break; 384 break;
360 } 385 }
386 if (update_scheme)
387 ieee80211_queue_work(sc->hw, &sc->mci_work);
361} 388}
362 389
363int ath_mci_setup(struct ath_softc *sc) 390int ath_mci_setup(struct ath_softc *sc)
@@ -365,6 +392,7 @@ int ath_mci_setup(struct ath_softc *sc)
365 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 392 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
366 struct ath_mci_coex *mci = &sc->mci_coex; 393 struct ath_mci_coex *mci = &sc->mci_coex;
367 struct ath_mci_buf *buf = &mci->sched_buf; 394 struct ath_mci_buf *buf = &mci->sched_buf;
395 int ret;
368 396
369 buf->bf_addr = dma_alloc_coherent(sc->dev, 397 buf->bf_addr = dma_alloc_coherent(sc->dev,
370 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, 398 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
@@ -384,9 +412,13 @@ int ath_mci_setup(struct ath_softc *sc)
384 mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len; 412 mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
385 mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len; 413 mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
386 414
387 ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr, 415 ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
388 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), 416 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
389 mci->sched_buf.bf_paddr); 417 mci->sched_buf.bf_paddr);
418 if (ret) {
419 ath_err(common, "Failed to initialize MCI\n");
420 return ret;
421 }
390 422
391 INIT_WORK(&sc->mci_work, ath9k_mci_work); 423 INIT_WORK(&sc->mci_work, ath9k_mci_work);
392 ath_dbg(common, MCI, "MCI Initialized\n"); 424 ath_dbg(common, MCI, "MCI Initialized\n");
@@ -551,9 +583,11 @@ void ath_mci_intr(struct ath_softc *sc)
551 } 583 }
552 584
553 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 585 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
554 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) 586 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
555 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | 587 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
556 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); 588 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
589 ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
590 }
557} 591}
558 592
559void ath_mci_enable(struct ath_softc *sc) 593void ath_mci_enable(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ef11dc639461..0e630a99b68b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -38,6 +38,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ 39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ 40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
41 { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
41 { 0 } 42 { 0 }
42}; 43};
43 44
@@ -122,7 +123,8 @@ static void ath_pci_aspm_init(struct ath_common *common)
122 if (!parent) 123 if (!parent)
123 return; 124 return;
124 125
125 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 126 if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
127 (AR_SREV_9285(ah))) {
126 /* Bluetooth coexistance requires disabling ASPM. */ 128 /* Bluetooth coexistance requires disabling ASPM. */
127 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, 129 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
128 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 130 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e034add9cd5a..27ed80b54881 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
25 8, /* MCS start */ 25 8, /* MCS start */
26 { 26 {
27 [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, 27 [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
28 5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */ 28 5400, 0, 12 }, /* 6 Mb */
29 [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, 29 [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
30 7800, 1, 18, 0, 1, 1, 1 }, /* 9 Mb */ 30 7800, 1, 18 }, /* 9 Mb */
31 [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, 31 [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
32 10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */ 32 10000, 2, 24 }, /* 12 Mb */
33 [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, 33 [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
34 13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */ 34 13900, 3, 36 }, /* 18 Mb */
35 [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, 35 [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
36 17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */ 36 17300, 4, 48 }, /* 24 Mb */
37 [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, 37 [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
38 23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */ 38 23000, 5, 72 }, /* 36 Mb */
39 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, 39 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
40 27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */ 40 27400, 6, 96 }, /* 48 Mb */
41 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, 41 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
42 29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */ 42 29300, 7, 108 }, /* 54 Mb */
43 [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500, 43 [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
44 6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */ 44 6400, 0, 0 }, /* 6.5 Mb */
45 [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, 45 [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
46 12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */ 46 12700, 1, 1 }, /* 13 Mb */
47 [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, 47 [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
48 18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */ 48 18800, 2, 2 }, /* 19.5 Mb */
49 [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, 49 [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
50 25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */ 50 25000, 3, 3 }, /* 26 Mb */
51 [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, 51 [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
52 36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */ 52 36700, 4, 4 }, /* 39 Mb */
53 [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, 53 [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
54 48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */ 54 48100, 5, 5 }, /* 52 Mb */
55 [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, 55 [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
56 53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */ 56 53500, 6, 6 }, /* 58.5 Mb */
57 [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, 57 [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
58 59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */ 58 59000, 7, 7 }, /* 65 Mb */
59 [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, 59 [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
60 65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */ 60 65400, 7, 7 }, /* 75 Mb */
61 [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, 61 [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
62 12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */ 62 12700, 8, 8 }, /* 13 Mb */
63 [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, 63 [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
64 24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */ 64 24800, 9, 9 }, /* 26 Mb */
65 [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, 65 [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
66 36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */ 66 36600, 10, 10 }, /* 39 Mb */
67 [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, 67 [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
68 48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */ 68 48100, 11, 11 }, /* 52 Mb */
69 [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, 69 [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
70 69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */ 70 69500, 12, 12 }, /* 78 Mb */
71 [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, 71 [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
72 89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */ 72 89500, 13, 13 }, /* 104 Mb */
73 [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, 73 [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
74 98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */ 74 98900, 14, 14 }, /* 117 Mb */
75 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, 75 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
76 108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */ 76 108300, 15, 15 }, /* 130 Mb */
77 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, 77 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
78 120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */ 78 120000, 15, 15 }, /* 144.4 Mb */
79 [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, 79 [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
80 17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */ 80 17400, 16, 16 }, /* 19.5 Mb */
81 [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, 81 [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
82 35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */ 82 35100, 17, 17 }, /* 39 Mb */
83 [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, 83 [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
84 52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */ 84 52600, 18, 18 }, /* 58.5 Mb */
85 [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, 85 [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
86 70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */ 86 70400, 19, 19 }, /* 78 Mb */
87 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, 87 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
88 104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */ 88 104900, 20, 20 }, /* 117 Mb */
89 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, 89 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
90 115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/ 90 115800, 20, 20 }, /* 130 Mb*/
91 [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, 91 [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
92 137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */ 92 137200, 21, 21 }, /* 156 Mb */
93 [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, 93 [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
94 151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */ 94 151100, 21, 21 }, /* 173.3 Mb */
95 [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, 95 [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
96 152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */ 96 152800, 22, 22 }, /* 175.5 Mb */
97 [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, 97 [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
98 168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/ 98 168400, 22, 22 }, /* 195 Mb*/
99 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, 99 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
100 168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */ 100 168400, 23, 23 }, /* 195 Mb */
101 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, 101 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
102 185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */ 102 185000, 23, 23 }, /* 216.7 Mb */
103 [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, 103 [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
104 13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/ 104 13200, 0, 0 }, /* 13.5 Mb*/
105 [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, 105 [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
106 25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/ 106 25900, 1, 1 }, /* 27.0 Mb*/
107 [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, 107 [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
108 38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/ 108 38600, 2, 2 }, /* 40.5 Mb*/
109 [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, 109 [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
110 49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */ 110 49800, 3, 3 }, /* 54 Mb */
111 [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, 111 [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
112 72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */ 112 72200, 4, 4 }, /* 81 Mb */
113 [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000, 113 [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
114 92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */ 114 92900, 5, 5 }, /* 108 Mb */
115 [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, 115 [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
116 102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/ 116 102700, 6, 6 }, /* 121.5 Mb*/
117 [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, 117 [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
118 112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */ 118 112000, 7, 7 }, /* 135 Mb */
119 [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, 119 [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
120 122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */ 120 122000, 7, 7 }, /* 150 Mb */
121 [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, 121 [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
122 25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */ 122 25800, 8, 8 }, /* 27 Mb */
123 [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, 123 [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
124 49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */ 124 49800, 9, 9 }, /* 54 Mb */
125 [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, 125 [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
126 71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */ 126 71900, 10, 10 }, /* 81 Mb */
127 [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, 127 [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
128 92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */ 128 92500, 11, 11 }, /* 108 Mb */
129 [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, 129 [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
130 130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */ 130 130300, 12, 12 }, /* 162 Mb */
131 [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, 131 [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
132 162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */ 132 162800, 13, 13 }, /* 216 Mb */
133 [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, 133 [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
134 178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */ 134 178200, 14, 14 }, /* 243 Mb */
135 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, 135 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
136 192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */ 136 192100, 15, 15 }, /* 270 Mb */
137 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, 137 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
138 207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */ 138 207000, 15, 15 }, /* 300 Mb */
139 [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, 139 [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
140 36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */ 140 36100, 16, 16 }, /* 40.5 Mb */
141 [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, 141 [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
142 72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */ 142 72900, 17, 17 }, /* 81 Mb */
143 [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, 143 [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
144 108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */ 144 108300, 18, 18 }, /* 121.5 Mb */
145 [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, 145 [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
146 142000, 19, 19, 4, 59, 59, 59 }, /* 162 Mb */ 146 142000, 19, 19 }, /* 162 Mb */
147 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, 147 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
148 205100, 20, 20, 4, 60, 61, 61 }, /* 243 Mb */ 148 205100, 20, 20 }, /* 243 Mb */
149 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, 149 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
150 224700, 20, 20, 4, 60, 61, 61 }, /* 270 Mb */ 150 224700, 20, 20 }, /* 270 Mb */
151 [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, 151 [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
152 263100, 21, 21, 4, 62, 63, 63 }, /* 324 Mb */ 152 263100, 21, 21 }, /* 324 Mb */
153 [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, 153 [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
154 288000, 21, 21, 4, 62, 63, 63 }, /* 360 Mb */ 154 288000, 21, 21 }, /* 360 Mb */
155 [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, 155 [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
156 290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */ 156 290700, 22, 22 }, /* 364.5 Mb */
157 [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, 157 [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
158 317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */ 158 317200, 22, 22 }, /* 405 Mb */
159 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, 159 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
160 317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */ 160 317200, 23, 23 }, /* 405 Mb */
161 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, 161 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
162 346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */ 162 346400, 23, 23 }, /* 450 Mb */
163 }, 163 },
164 50, /* probe interval */ 164 50, /* probe interval */
165 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 165 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
173 12, /* MCS start */ 173 12, /* MCS start */
174 { 174 {
175 [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000, 175 [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
176 900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */ 176 900, 0, 2 }, /* 1 Mb */
177 [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000, 177 [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
178 1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */ 178 1900, 1, 4 }, /* 2 Mb */
179 [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500, 179 [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
180 4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */ 180 4900, 2, 11 }, /* 5.5 Mb */
181 [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000, 181 [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
182 8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */ 182 8100, 3, 22 }, /* 11 Mb */
183 [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, 183 [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
184 5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */ 184 5400, 4, 12 }, /* 6 Mb */
185 [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, 185 [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
186 7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */ 186 7800, 5, 18 }, /* 9 Mb */
187 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, 187 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
188 10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */ 188 10100, 6, 24 }, /* 12 Mb */
189 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, 189 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
190 14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */ 190 14100, 7, 36 }, /* 18 Mb */
191 [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, 191 [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
192 17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */ 192 17700, 8, 48 }, /* 24 Mb */
193 [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, 193 [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
194 23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */ 194 23700, 9, 72 }, /* 36 Mb */
195 [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, 195 [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
196 27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */ 196 27400, 10, 96 }, /* 48 Mb */
197 [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, 197 [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
198 30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */ 198 30900, 11, 108 }, /* 54 Mb */
199 [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500, 199 [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
200 6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */ 200 6400, 0, 0 }, /* 6.5 Mb */
201 [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, 201 [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
202 12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */ 202 12700, 1, 1 }, /* 13 Mb */
203 [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, 203 [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
204 18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/ 204 18800, 2, 2 }, /* 19.5 Mb*/
205 [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, 205 [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
206 25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */ 206 25000, 3, 3 }, /* 26 Mb */
207 [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, 207 [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
208 36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */ 208 36700, 4, 4 }, /* 39 Mb */
209 [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, 209 [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
210 48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */ 210 48100, 5, 5 }, /* 52 Mb */
211 [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, 211 [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
212 53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */ 212 53500, 6, 6 }, /* 58.5 Mb */
213 [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, 213 [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
214 59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */ 214 59000, 7, 7 }, /* 65 Mb */
215 [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, 215 [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
216 65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/ 216 65400, 7, 7 }, /* 65 Mb*/
217 [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, 217 [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
218 12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */ 218 12700, 8, 8 }, /* 13 Mb */
219 [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, 219 [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
220 24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */ 220 24800, 9, 9 }, /* 26 Mb */
221 [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, 221 [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
222 36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */ 222 36600, 10, 10 }, /* 39 Mb */
223 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, 223 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
224 48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */ 224 48100, 11, 11 }, /* 52 Mb */
225 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, 225 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
226 69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */ 226 69500, 12, 12 }, /* 78 Mb */
227 [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, 227 [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
228 89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */ 228 89500, 13, 13 }, /* 104 Mb */
229 [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, 229 [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
230 98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */ 230 98900, 14, 14 }, /* 117 Mb */
231 [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, 231 [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
232 108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */ 232 108300, 15, 15 }, /* 130 Mb */
233 [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, 233 [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
234 120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */ 234 120000, 15, 15 }, /* 144.4 Mb */
235 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, 235 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
236 17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */ 236 17400, 16, 16 }, /* 19.5 Mb */
237 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, 237 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
238 35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */ 238 35100, 17, 17 }, /* 39 Mb */
239 [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, 239 [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
240 52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */ 240 52600, 18, 18 }, /* 58.5 Mb */
241 [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, 241 [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
242 70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */ 242 70400, 19, 19 }, /* 78 Mb */
243 [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, 243 [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
244 104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */ 244 104900, 20, 20 }, /* 117 Mb */
245 [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, 245 [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
246 115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */ 246 115800, 20, 20 }, /* 130 Mb */
247 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, 247 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
248 137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */ 248 137200, 21, 21 }, /* 156 Mb */
249 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, 249 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
250 151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */ 250 151100, 21, 21 }, /* 173.3 Mb */
251 [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, 251 [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
252 152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */ 252 152800, 22, 22 }, /* 175.5 Mb */
253 [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, 253 [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
254 168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */ 254 168400, 22, 22 }, /* 195 Mb */
255 [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, 255 [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
256 168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */ 256 168400, 23, 23 }, /* 195 Mb */
257 [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, 257 [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
258 185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */ 258 185000, 23, 23 }, /* 216.7 Mb */
259 [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, 259 [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
260 13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */ 260 13200, 0, 0 }, /* 13.5 Mb */
261 [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, 261 [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
262 25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */ 262 25900, 1, 1 }, /* 27.0 Mb */
263 [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, 263 [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
264 38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */ 264 38600, 2, 2 }, /* 40.5 Mb */
265 [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, 265 [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
266 49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */ 266 49800, 3, 3 }, /* 54 Mb */
267 [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, 267 [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
268 72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */ 268 72200, 4, 4 }, /* 81 Mb */
269 [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000, 269 [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
270 92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */ 270 92900, 5, 5 }, /* 108 Mb */
271 [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, 271 [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
272 102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */ 272 102700, 6, 6 }, /* 121.5 Mb */
273 [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, 273 [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
274 112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */ 274 112000, 7, 7 }, /* 135 Mb */
275 [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, 275 [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
276 122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */ 276 122000, 7, 7 }, /* 150 Mb */
277 [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, 277 [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
278 25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */ 278 25800, 8, 8 }, /* 27 Mb */
279 [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, 279 [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
280 49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */ 280 49800, 9, 9 }, /* 54 Mb */
281 [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, 281 [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
282 71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */ 282 71900, 10, 10 }, /* 81 Mb */
283 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, 283 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
284 92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */ 284 92500, 11, 11 }, /* 108 Mb */
285 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, 285 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
286 130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */ 286 130300, 12, 12 }, /* 162 Mb */
287 [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, 287 [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
288 162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */ 288 162800, 13, 13 }, /* 216 Mb */
289 [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, 289 [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
290 178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */ 290 178200, 14, 14 }, /* 243 Mb */
291 [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, 291 [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
292 192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */ 292 192100, 15, 15 }, /* 270 Mb */
293 [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, 293 [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
294 207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */ 294 207000, 15, 15 }, /* 300 Mb */
295 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, 295 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
296 36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */ 296 36100, 16, 16 }, /* 40.5 Mb */
297 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, 297 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
298 72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */ 298 72900, 17, 17 }, /* 81 Mb */
299 [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, 299 [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
300 108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */ 300 108300, 18, 18 }, /* 121.5 Mb */
301 [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, 301 [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
302 142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */ 302 142000, 19, 19 }, /* 162 Mb */
303 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, 303 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
304 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */ 304 205100, 20, 20 }, /* 243 Mb */
305 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, 305 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
306 224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */ 306 224700, 20, 20 }, /* 270 Mb */
307 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, 307 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
308 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */ 308 263100, 21, 21 }, /* 324 Mb */
309 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, 309 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
310 288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */ 310 288000, 21, 21 }, /* 360 Mb */
311 [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, 311 [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
312 290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */ 312 290700, 22, 22 }, /* 364.5 Mb */
313 [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, 313 [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
314 317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */ 314 317200, 22, 22 }, /* 405 Mb */
315 [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, 315 [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
316 317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */ 316 317200, 23, 23 }, /* 405 Mb */
317 [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, 317 [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
318 346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */ 318 346400, 23, 23 }, /* 450 Mb */
319 }, 319 },
320 50, /* probe interval */ 320 50, /* probe interval */
321 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 321 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
326 0, 326 0,
327 { 327 {
328 { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 328 { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
329 5400, 0, 12, 0}, 329 5400, 0, 12},
330 { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 330 { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
331 7800, 1, 18, 0}, 331 7800, 1, 18},
332 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 332 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
333 10000, 2, 24, 2}, 333 10000, 2, 24},
334 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 334 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
335 13900, 3, 36, 2}, 335 13900, 3, 36},
336 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 336 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
337 17300, 4, 48, 4}, 337 17300, 4, 48},
338 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 338 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
339 23000, 5, 72, 4}, 339 23000, 5, 72},
340 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 340 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
341 27400, 6, 96, 4}, 341 27400, 6, 96},
342 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 342 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
343 29300, 7, 108, 4}, 343 29300, 7, 108},
344 }, 344 },
345 50, /* probe interval */ 345 50, /* probe interval */
346 0, /* Phy rates allowed initially */ 346 0, /* Phy rates allowed initially */
@@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
351 0, 351 0,
352 { 352 {
353 { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 353 { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
354 900, 0, 2, 0}, 354 900, 0, 2},
355 { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ 355 { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
356 1900, 1, 4, 1}, 356 1900, 1, 4},
357 { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ 357 { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
358 4900, 2, 11, 2}, 358 4900, 2, 11},
359 { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ 359 { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
360 8100, 3, 22, 3}, 360 8100, 3, 22},
361 { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 361 { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
362 5400, 4, 12, 4}, 362 5400, 4, 12},
363 { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 363 { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
364 7800, 5, 18, 4}, 364 7800, 5, 18},
365 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 365 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
366 10000, 6, 24, 6}, 366 10000, 6, 24},
367 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 367 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
368 13900, 7, 36, 6}, 368 13900, 7, 36},
369 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 369 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
370 17300, 8, 48, 8}, 370 17300, 8, 48},
371 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 371 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
372 23000, 9, 72, 8}, 372 23000, 9, 72},
373 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 373 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
374 27400, 10, 96, 8}, 374 27400, 10, 96},
375 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 375 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
376 29300, 11, 108, 8}, 376 29300, 11, 108},
377 }, 377 },
378 50, /* probe interval */ 378 50, /* probe interval */
379 0, /* Phy rates allowed initially */ 379 0, /* Phy rates allowed initially */
380}; 380};
381 381
382static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, 382static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
383 struct ieee80211_tx_rate *rate) 383 struct ieee80211_tx_rate *rate)
384{ 384{
385 int rix = 0, i = 0; 385 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
386 static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 }; 386 int rix, i, idx = 0;
387 387
388 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 388 if (!(rate->flags & IEEE80211_TX_RC_MCS))
389 return rate->idx; 389 return rate->idx;
390 390
391 while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) { 391 for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
392 rix++; i++; 392 idx = ath_rc_priv->valid_rate_index[i];
393
394 if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
395 rate_table->info[idx].ratecode == rate->idx)
396 break;
393 } 397 }
394 398
395 rix += rate->idx + rate_table->mcs_start; 399 rix = idx;
396 400
397 if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && 401 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
398 (rate->flags & IEEE80211_TX_RC_SHORT_GI)) 402 rix++;
399 rix = rate_table->info[rix].ht_index;
400 else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
401 rix = rate_table->info[rix].sgi_index;
402 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
403 rix = rate_table->info[rix].cw40index;
404 403
405 return rix; 404 return rix;
406} 405}
407 406
408static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, 407static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
409 struct ath_rate_priv *ath_rc_priv)
410{ 408{
409 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
411 u8 i, j, idx, idx_next; 410 u8 i, j, idx, idx_next;
412 411
413 for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) { 412 for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
424 } 423 }
425} 424}
426 425
427static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
428{
429 u8 i;
430
431 for (i = 0; i < ath_rc_priv->rate_table_size; i++)
432 ath_rc_priv->valid_rate_index[i] = 0;
433}
434
435static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
436 u8 index, int valid_tx_rate)
437{
438 BUG_ON(index > ath_rc_priv->rate_table_size);
439 ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
440}
441
442static inline 426static inline
443int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table, 427int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
444 struct ath_rate_priv *ath_rc_priv, 428 struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
479} 463}
480 464
481static inline int 465static inline int
482ath_rc_get_lower_rix(const struct ath_rate_table *rate_table, 466ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
483 struct ath_rate_priv *ath_rc_priv,
484 u8 cur_valid_txrate, u8 *next_idx) 467 u8 cur_valid_txrate, u8 *next_idx)
485{ 468{
486 int8_t i; 469 int8_t i;
@@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
495 return 0; 478 return 0;
496} 479}
497 480
498static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, 481static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
499 const struct ath_rate_table *rate_table,
500 u32 capflag)
501{ 482{
483 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
502 u8 i, hi = 0; 484 u8 i, hi = 0;
503 485
504 for (i = 0; i < rate_table->rate_cnt; i++) { 486 for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
506 u32 phy = rate_table->info[i].phy; 488 u32 phy = rate_table->info[i].phy;
507 u8 valid_rate_count = 0; 489 u8 valid_rate_count = 0;
508 490
509 if (!ath_rc_valid_phyrate(phy, capflag, 0)) 491 if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
510 continue; 492 continue;
511 493
512 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy]; 494 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
513 495
514 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; 496 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
515 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 497 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
516 ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1); 498 ath_rc_priv->valid_rate_index[i] = true;
517 hi = i; 499 hi = i;
518 } 500 }
519 } 501 }
@@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
521 return hi; 503 return hi;
522} 504}
523 505
524static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, 506static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
525 const struct ath_rate_table *rate_table, 507 u32 phy, u32 capflag)
526 struct ath_rateset *rateset,
527 u32 capflag)
528{ 508{
529 u8 i, j, hi = 0; 509 if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
510 return false;
530 511
531 /* Use intersection of working rates and valid rates */ 512 if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
532 for (i = 0; i < rateset->rs_nrates; i++) { 513 return false;
533 for (j = 0; j < rate_table->rate_cnt; j++) {
534 u32 phy = rate_table->info[j].phy;
535 u16 rate_flags = rate_table->info[j].rate_flags;
536 u8 rate = rateset->rs_rates[i];
537 u8 dot11rate = rate_table->info[j].dot11rate;
538
539 /* We allow a rate only if its valid and the
540 * capflag matches one of the validity
541 * (VALID/VALID_20/VALID_40) flags */
542
543 if ((rate == dot11rate) &&
544 (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
545 WLAN_RC_CAP_MODE(capflag) &&
546 (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
547 !WLAN_RC_PHY_HT(phy)) {
548 u8 valid_rate_count = 0;
549
550 if (!ath_rc_valid_phyrate(phy, capflag, 0))
551 continue;
552
553 valid_rate_count =
554 ath_rc_priv->valid_phy_ratecnt[phy];
555
556 ath_rc_priv->valid_phy_rateidx[phy]
557 [valid_rate_count] = j;
558 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
559 ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
560 hi = max(hi, j);
561 }
562 }
563 }
564 514
565 return hi; 515 if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
516 return false;
517
518 return true;
566} 519}
567 520
568static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv, 521static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
569 const struct ath_rate_table *rate_table, 522 u32 phy, u32 capflag)
570 struct ath_rateset *rateset, u32 capflag)
571{ 523{
572 u8 i, j, hi = 0; 524 if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
525 return false;
526
527 if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
528 return false;
529
530 if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
531 return false;
532
533 return true;
534}
535
536static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
537{
538 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
539 struct ath_rateset *rateset;
540 u32 phy, capflag = ath_rc_priv->ht_cap;
541 u16 rate_flags;
542 u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
543
544 if (legacy)
545 rateset = &ath_rc_priv->neg_rates;
546 else
547 rateset = &ath_rc_priv->neg_ht_rates;
573 548
574 /* Use intersection of working rates and valid rates */
575 for (i = 0; i < rateset->rs_nrates; i++) { 549 for (i = 0; i < rateset->rs_nrates; i++) {
576 for (j = 0; j < rate_table->rate_cnt; j++) { 550 for (j = 0; j < rate_table->rate_cnt; j++) {
577 u32 phy = rate_table->info[j].phy; 551 phy = rate_table->info[j].phy;
578 u16 rate_flags = rate_table->info[j].rate_flags; 552 rate_flags = rate_table->info[j].rate_flags;
579 u8 rate = rateset->rs_rates[i]; 553 rate = rateset->rs_rates[i];
580 u8 dot11rate = rate_table->info[j].dot11rate; 554 dot11rate = rate_table->info[j].dot11rate;
581 555
582 if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) || 556 if (legacy &&
583 !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) || 557 !ath_rc_check_legacy(rate, dot11rate,
584 !WLAN_RC_PHY_HT_VALID(rate_flags, capflag)) 558 rate_flags, phy, capflag))
559 continue;
560
561 if (!legacy &&
562 !ath_rc_check_ht(rate, dot11rate,
563 rate_flags, phy, capflag))
585 continue; 564 continue;
586 565
587 if (!ath_rc_valid_phyrate(phy, capflag, 0)) 566 if (!ath_rc_valid_phyrate(phy, capflag, 0))
588 continue; 567 continue;
589 568
590 ath_rc_priv->valid_phy_rateidx[phy] 569 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
591 [ath_rc_priv->valid_phy_ratecnt[phy]] = j; 570 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
592 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 571 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
593 ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1); 572 ath_rc_priv->valid_rate_index[j] = true;
594 hi = max(hi, j); 573 hi = max(hi, j);
595 } 574 }
596 } 575 }
@@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
598 return hi; 577 return hi;
599} 578}
600 579
601/* Finds the highest rate index we can use */ 580static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
602static u8 ath_rc_get_highest_rix(struct ath_softc *sc, 581 int *is_probing)
603 struct ath_rate_priv *ath_rc_priv,
604 const struct ath_rate_table *rate_table,
605 int *is_probing,
606 bool legacy)
607{ 582{
583 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
608 u32 best_thruput, this_thruput, now_msec; 584 u32 best_thruput, this_thruput, now_msec;
609 u8 rate, next_rate, best_rate, maxindex, minindex; 585 u8 rate, next_rate, best_rate, maxindex, minindex;
610 int8_t index = 0; 586 int8_t index = 0;
@@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
624 u8 per_thres; 600 u8 per_thres;
625 601
626 rate = ath_rc_priv->valid_rate_index[index]; 602 rate = ath_rc_priv->valid_rate_index[index];
627 if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
628 continue;
629 if (rate > ath_rc_priv->rate_max_phy) 603 if (rate > ath_rc_priv->rate_max_phy)
630 continue; 604 continue;
631 605
@@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
707 rate->count = tries; 681 rate->count = tries;
708 rate->idx = rate_table->info[rix].ratecode; 682 rate->idx = rate_table->info[rix].ratecode;
709 683
710 if (txrc->short_preamble)
711 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
712 if (txrc->rts || rtsctsenable) 684 if (txrc->rts || rtsctsenable)
713 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; 685 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
714 686
@@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
726 const struct ath_rate_table *rate_table, 698 const struct ath_rate_table *rate_table,
727 struct ieee80211_tx_info *tx_info) 699 struct ieee80211_tx_info *tx_info)
728{ 700{
729 struct ieee80211_tx_rate *rates = tx_info->control.rates; 701 struct ieee80211_bss_conf *bss_conf;
730 int i = 0, rix = 0, cix, enable_g_protection = 0;
731 702
732 /* get the cix for the lowest valid rix */ 703 if (!tx_info->control.vif)
733 for (i = 3; i >= 0; i--) { 704 return;
734 if (rates[i].count && (rates[i].idx >= 0)) { 705 /*
735 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 706 * For legacy frames, mac80211 takes care of CTS protection.
736 break; 707 */
737 } 708 if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
738 } 709 return;
739 cix = rate_table->info[rix].ctrl_rate;
740 710
741 /* All protection frames are transmited at 2Mb/s for 802.11g, 711 bss_conf = &tx_info->control.vif->bss_conf;
742 * otherwise we transmit them at 1Mb/s */ 712
743 if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ && 713 if (!bss_conf->basic_rates)
744 !conf_is_ht(&sc->hw->conf)) 714 return;
745 enable_g_protection = 1;
746 715
747 /* 716 /*
748 * If 802.11g protection is enabled, determine whether to use RTS/CTS or 717 * For now, use the lowest allowed basic rate for HT frames.
749 * just CTS. Note that this is only done for OFDM/HT unicast frames.
750 */ 718 */
751 if ((tx_info->control.vif && 719 tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
752 tx_info->control.vif->bss_conf.use_cts_prot) &&
753 (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
754 WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
755 rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
756 cix = rate_table->info[enable_g_protection].ctrl_rate;
757 }
758
759 tx_info->control.rts_cts_rate_idx = cix;
760} 720}
761 721
762static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, 722static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
789 try_per_rate = 4; 749 try_per_rate = 4;
790 750
791 rate_table = ath_rc_priv->rate_table; 751 rate_table = ath_rc_priv->rate_table;
792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 752 rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
793 &is_probe, false);
794 753
795 /*
796 * If we're in HT mode and both us and our peer supports LDPC.
797 * We don't need to check our own device's capabilities as our own
798 * ht capabilities would have already been intersected with our peer's.
799 */
800 if (conf_is_ht(&sc->hw->conf) && 754 if (conf_is_ht(&sc->hw->conf) &&
801 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) 755 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
802 tx_info->flags |= IEEE80211_TX_CTL_LDPC; 756 tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
806 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT); 760 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
807 761
808 if (is_probe) { 762 if (is_probe) {
809 /* set one try for probe rates. For the 763 /*
810 * probes don't enable rts */ 764 * Set one try for probe rates. For the
765 * probes don't enable RTS.
766 */
811 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 767 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
812 1, rix, 0); 768 1, rix, 0);
813 769 /*
814 /* Get the next tried/allowed rate. No RTS for the next series 770 * Get the next tried/allowed rate.
815 * after the probe rate 771 * No RTS for the next series after the probe rate.
816 */ 772 */
817 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); 773 ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
818 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 774 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
819 try_per_rate, rix, 0); 775 try_per_rate, rix, 0);
820 776
821 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 777 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
822 } else { 778 } else {
823 /* Set the chosen rate. No RTS for first series entry. */ 779 /*
780 * Set the chosen rate. No RTS for first series entry.
781 */
824 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 782 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
825 try_per_rate, rix, 0); 783 try_per_rate, rix, 0);
826 } 784 }
827 785
828 /* Fill in the other rates for multirate retry */ 786 for ( ; i < 4; i++) {
829 for ( ; i < 3; i++) { 787 /*
788 * Use twice the number of tries for the last MRR segment.
789 */
790 if (i + 1 == 4)
791 try_per_rate = 8;
792
793 ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
830 794
831 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); 795 /*
832 /* All other rates in the series have RTS enabled */ 796 * All other rates in the series have RTS enabled.
797 */
833 ath_rc_rate_set_series(rate_table, &rates[i], txrc, 798 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
834 try_per_rate, rix, 1); 799 try_per_rate, rix, 1);
835 } 800 }
836 801
837 /* Use twice the number of tries for the last MRR segment. */
838 try_per_rate = 8;
839
840 /*
841 * If the last rate in the rate series is MCS and has
842 * more than 80% of per thresh, then use a legacy rate
843 * as last retry to ensure that the frame is tried in both
844 * MCS and legacy rate.
845 */
846 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
847 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
848 (ath_rc_priv->per[rix] > 45))
849 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
850 &is_probe, true);
851
852 /* All other rates in the series have RTS enabled */
853 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
854 try_per_rate, rix, 1);
855 /* 802 /*
856 * NB:Change rate series to enable aggregation when operating 803 * NB:Change rate series to enable aggregation when operating
857 * at lower MCS rates. When first rate in series is MCS2 804 * at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
893 rates[0].count = ATH_TXMAXTRY; 840 rates[0].count = ATH_TXMAXTRY;
894 } 841 }
895 842
896 /* Setup RTS/CTS */
897 ath_rc_rate_set_rtscts(sc, rate_table, tx_info); 843 ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
898} 844}
899 845
@@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
1046 stats->per = per; 992 stats->per = per;
1047} 993}
1048 994
1049/* Update PER, RSSI and whatever else that the code thinks it is doing.
1050 If you can make sense of all this, you really need to go out more. */
1051
1052static void ath_rc_update_ht(struct ath_softc *sc, 995static void ath_rc_update_ht(struct ath_softc *sc,
1053 struct ath_rate_priv *ath_rc_priv, 996 struct ath_rate_priv *ath_rc_priv,
1054 struct ieee80211_tx_info *tx_info, 997 struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1077 if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 && 1020 if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
1078 rate_table->info[tx_rate].ratekbps <= 1021 rate_table->info[tx_rate].ratekbps <=
1079 rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) { 1022 rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
1080 ath_rc_get_lower_rix(rate_table, ath_rc_priv, 1023 ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
1081 (u8)tx_rate, &ath_rc_priv->rate_max_phy); 1024 &ath_rc_priv->rate_max_phy);
1082 1025
1083 /* Don't probe for a little while. */ 1026 /* Don't probe for a little while. */
1084 ath_rc_priv->probe_time = now_msec; 1027 ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1122 1065
1123} 1066}
1124 1067
1068static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1069{
1070 struct ath_rc_stats *stats;
1071
1072 stats = &rc->rcstats[final_rate];
1073 stats->success++;
1074}
1125 1075
1126static void ath_rc_tx_status(struct ath_softc *sc, 1076static void ath_rc_tx_status(struct ath_softc *sc,
1127 struct ath_rate_priv *ath_rc_priv, 1077 struct ath_rate_priv *ath_rc_priv,
1128 struct ieee80211_tx_info *tx_info, 1078 struct sk_buff *skb)
1129 int final_ts_idx, int xretries, int long_retry)
1130{ 1079{
1131 const struct ath_rate_table *rate_table; 1080 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1132 struct ieee80211_tx_rate *rates = tx_info->status.rates; 1081 struct ieee80211_tx_rate *rates = tx_info->status.rates;
1082 struct ieee80211_tx_rate *rate;
1083 int final_ts_idx = 0, xretries = 0, long_retry = 0;
1133 u8 flags; 1084 u8 flags;
1134 u32 i = 0, rix; 1085 u32 i = 0, rix;
1135 1086
1136 rate_table = ath_rc_priv->rate_table; 1087 for (i = 0; i < sc->hw->max_rates; i++) {
1088 rate = &tx_info->status.rates[i];
1089 if (rate->idx < 0 || !rate->count)
1090 break;
1091
1092 final_ts_idx = i;
1093 long_retry = rate->count - 1;
1094 }
1095
1096 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
1097 xretries = 1;
1137 1098
1138 /* 1099 /*
1139 * If the first rate is not the final index, there 1100 * If the first rate is not the final index, there
1140 * are intermediate rate failures to be processed. 1101 * are intermediate rate failures to be processed.
1141 */ 1102 */
1142 if (final_ts_idx != 0) { 1103 if (final_ts_idx != 0) {
1143 /* Process intermediate rates that failed.*/
1144 for (i = 0; i < final_ts_idx ; i++) { 1104 for (i = 0; i < final_ts_idx ; i++) {
1145 if (rates[i].count != 0 && (rates[i].idx >= 0)) { 1105 if (rates[i].count != 0 && (rates[i].idx >= 0)) {
1146 flags = rates[i].flags; 1106 flags = rates[i].flags;
@@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1152 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) 1112 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
1153 return; 1113 return;
1154 1114
1155 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 1115 rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
1156 ath_rc_update_ht(sc, ath_rc_priv, tx_info, 1116 ath_rc_update_ht(sc, ath_rc_priv, tx_info,
1157 rix, xretries ? 1 : 2, 1117 rix, xretries ? 1 : 2,
1158 rates[i].count); 1118 rates[i].count);
1159 } 1119 }
1160 } 1120 }
1161 } else {
1162 /*
1163 * Handle the special case of MIMO PS burst, where the second
1164 * aggregate is sent out with only one rate and one try.
1165 * Treating it as an excessive retry penalizes the rate
1166 * inordinately.
1167 */
1168 if (rates[0].count == 1 && xretries == 1)
1169 xretries = 2;
1170 } 1121 }
1171 1122
1172 flags = rates[i].flags; 1123 flags = rates[final_ts_idx].flags;
1173 1124
1174 /* If HT40 and we have switched mode from 40 to 20 => don't update */ 1125 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1175 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && 1126 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1176 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) 1127 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
1177 return; 1128 return;
1178 1129
1179 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 1130 rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
1180 ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry); 1131 ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
1132 ath_debug_stat_rc(ath_rc_priv, rix);
1181} 1133}
1182 1134
1183static const 1135static const
@@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1185 enum ieee80211_band band, 1137 enum ieee80211_band band,
1186 bool is_ht) 1138 bool is_ht)
1187{ 1139{
1188 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1189
1190 switch(band) { 1140 switch(band) {
1191 case IEEE80211_BAND_2GHZ: 1141 case IEEE80211_BAND_2GHZ:
1192 if (is_ht) 1142 if (is_ht)
@@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1197 return &ar5416_11na_ratetable; 1147 return &ar5416_11na_ratetable;
1198 return &ar5416_11a_ratetable; 1148 return &ar5416_11a_ratetable;
1199 default: 1149 default:
1200 ath_dbg(common, CONFIG, "Invalid band\n");
1201 return NULL; 1150 return NULL;
1202 } 1151 }
1203} 1152}
1204 1153
1205static void ath_rc_init(struct ath_softc *sc, 1154static void ath_rc_init(struct ath_softc *sc,
1206 struct ath_rate_priv *ath_rc_priv, 1155 struct ath_rate_priv *ath_rc_priv)
1207 struct ieee80211_supported_band *sband,
1208 struct ieee80211_sta *sta,
1209 const struct ath_rate_table *rate_table)
1210{ 1156{
1157 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
1211 struct ath_rateset *rateset = &ath_rc_priv->neg_rates; 1158 struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
1212 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1159 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1213 struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
1214 u8 i, j, k, hi = 0, hthi = 0; 1160 u8 i, j, k, hi = 0, hthi = 0;
1215 1161
1216 /* Initial rate table size. Will change depending
1217 * on the working rate set */
1218 ath_rc_priv->rate_table_size = RATE_TABLE_SIZE; 1162 ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
1219 1163
1220 /* Initialize thresholds according to the global rate table */
1221 for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) { 1164 for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
1222 ath_rc_priv->per[i] = 0; 1165 ath_rc_priv->per[i] = 0;
1166 ath_rc_priv->valid_rate_index[i] = 0;
1223 } 1167 }
1224 1168
1225 /* Determine the valid rates */
1226 ath_rc_init_valid_rate_idx(ath_rc_priv);
1227
1228 for (i = 0; i < WLAN_RC_PHY_MAX; i++) { 1169 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1229 for (j = 0; j < RATE_TABLE_SIZE; j++) 1170 for (j = 0; j < RATE_TABLE_SIZE; j++)
1230 ath_rc_priv->valid_phy_rateidx[i][j] = 0; 1171 ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc,
1232 } 1173 }
1233 1174
1234 if (!rateset->rs_nrates) { 1175 if (!rateset->rs_nrates) {
1235 /* No working rate, just initialize valid rates */ 1176 hi = ath_rc_init_validrates(ath_rc_priv);
1236 hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
1237 ath_rc_priv->ht_cap);
1238 } else { 1177 } else {
1239 /* Use intersection of working rates and valid rates */ 1178 hi = ath_rc_setvalid_rates(ath_rc_priv, true);
1240 hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table, 1179
1241 rateset, ath_rc_priv->ht_cap); 1180 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
1242 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) { 1181 hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
1243 hthi = ath_rc_setvalid_htrates(ath_rc_priv, 1182
1244 rate_table,
1245 ht_mcs,
1246 ath_rc_priv->ht_cap);
1247 }
1248 hi = max(hi, hthi); 1183 hi = max(hi, hthi);
1249 } 1184 }
1250 1185
1251 ath_rc_priv->rate_table_size = hi + 1; 1186 ath_rc_priv->rate_table_size = hi + 1;
1252 ath_rc_priv->rate_max_phy = 0; 1187 ath_rc_priv->rate_max_phy = 0;
1253 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); 1188 WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1254 1189
1255 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { 1190 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1256 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) { 1191 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc,
1258 ath_rc_priv->valid_phy_rateidx[i][j]; 1193 ath_rc_priv->valid_phy_rateidx[i][j];
1259 } 1194 }
1260 1195
1261 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) 1196 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
1262 || !ath_rc_priv->valid_phy_ratecnt[i]) 1197 !ath_rc_priv->valid_phy_ratecnt[i])
1263 continue; 1198 continue;
1264 1199
1265 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1]; 1200 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
1266 } 1201 }
1267 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); 1202 WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1268 BUG_ON(k > RATE_TABLE_SIZE); 1203 WARN_ON(k > RATE_TABLE_SIZE);
1269 1204
1270 ath_rc_priv->max_valid_rate = k; 1205 ath_rc_priv->max_valid_rate = k;
1271 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1206 ath_rc_sort_validrates(ath_rc_priv);
1272 ath_rc_priv->rate_max_phy = (k > 4) ? 1207 ath_rc_priv->rate_max_phy = (k > 4) ?
1273 ath_rc_priv->valid_rate_index[k-4] : 1208 ath_rc_priv->valid_rate_index[k-4] :
1274 ath_rc_priv->valid_rate_index[k-1]; 1209 ath_rc_priv->valid_rate_index[k-1];
1275 ath_rc_priv->rate_table = rate_table;
1276 1210
1277 ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n", 1211 ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
1278 ath_rc_priv->ht_cap); 1212 ath_rc_priv->ht_cap);
1279} 1213}
1280 1214
1281static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, 1215static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
1282 bool is_cw40, bool is_sgi)
1283{ 1216{
1284 u8 caps = 0; 1217 u8 caps = 0;
1285 1218
@@ -1289,10 +1222,14 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1289 caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG; 1222 caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
1290 else if (sta->ht_cap.mcs.rx_mask[1]) 1223 else if (sta->ht_cap.mcs.rx_mask[1])
1291 caps |= WLAN_RC_DS_FLAG; 1224 caps |= WLAN_RC_DS_FLAG;
1292 if (is_cw40) 1225 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
1293 caps |= WLAN_RC_40_FLAG; 1226 caps |= WLAN_RC_40_FLAG;
1294 if (is_sgi) 1227 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
1295 caps |= WLAN_RC_SGI_FLAG; 1228 caps |= WLAN_RC_SGI_FLAG;
1229 } else {
1230 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
1231 caps |= WLAN_RC_SGI_FLAG;
1232 }
1296 } 1233 }
1297 1234
1298 return caps; 1235 return caps;
@@ -1319,15 +1256,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
1319/* mac80211 Rate Control callbacks */ 1256/* mac80211 Rate Control callbacks */
1320/***********************************/ 1257/***********************************/
1321 1258
1322static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1323{
1324 struct ath_rc_stats *stats;
1325
1326 stats = &rc->rcstats[final_rate];
1327 stats->success++;
1328}
1329
1330
1331static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, 1259static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1332 struct ieee80211_sta *sta, void *priv_sta, 1260 struct ieee80211_sta *sta, void *priv_sta,
1333 struct sk_buff *skb) 1261 struct sk_buff *skb)
@@ -1335,22 +1263,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1335 struct ath_softc *sc = priv; 1263 struct ath_softc *sc = priv;
1336 struct ath_rate_priv *ath_rc_priv = priv_sta; 1264 struct ath_rate_priv *ath_rc_priv = priv_sta;
1337 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1265 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1338 struct ieee80211_hdr *hdr; 1266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1339 int final_ts_idx = 0, tx_status = 0; 1267 __le16 fc = hdr->frame_control;
1340 int long_retry = 0;
1341 __le16 fc;
1342 int i;
1343
1344 hdr = (struct ieee80211_hdr *)skb->data;
1345 fc = hdr->frame_control;
1346 for (i = 0; i < sc->hw->max_rates; i++) {
1347 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1348 if (rate->idx < 0 || !rate->count)
1349 break;
1350
1351 final_ts_idx = i;
1352 long_retry = rate->count - 1;
1353 }
1354 1268
1355 if (!priv_sta || !ieee80211_is_data(fc)) 1269 if (!priv_sta || !ieee80211_is_data(fc))
1356 return; 1270 return;
@@ -1363,11 +1277,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1363 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) 1277 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
1364 return; 1278 return;
1365 1279
1366 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) 1280 ath_rc_tx_status(sc, ath_rc_priv, skb);
1367 tx_status = 1;
1368
1369 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
1370 long_retry);
1371 1281
1372 /* Check if aggregation has to be enabled for this tid */ 1282 /* Check if aggregation has to be enabled for this tid */
1373 if (conf_is_ht(&sc->hw->conf) && 1283 if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1293,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1383 ieee80211_start_tx_ba_session(sta, tid, 0); 1293 ieee80211_start_tx_ba_session(sta, tid, 0);
1384 } 1294 }
1385 } 1295 }
1386
1387 ath_debug_stat_rc(ath_rc_priv,
1388 ath_rc_get_rateindex(ath_rc_priv->rate_table,
1389 &tx_info->status.rates[final_ts_idx]));
1390} 1296}
1391 1297
1392static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, 1298static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1393 struct ieee80211_sta *sta, void *priv_sta) 1299 struct ieee80211_sta *sta, void *priv_sta)
1394{ 1300{
1395 struct ath_softc *sc = priv; 1301 struct ath_softc *sc = priv;
1302 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1396 struct ath_rate_priv *ath_rc_priv = priv_sta; 1303 struct ath_rate_priv *ath_rc_priv = priv_sta;
1397 const struct ath_rate_table *rate_table;
1398 bool is_cw40, is_sgi = false;
1399 int i, j = 0; 1304 int i, j = 0;
1400 1305
1401 for (i = 0; i < sband->n_bitrates; i++) { 1306 for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1322,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1417 ath_rc_priv->neg_ht_rates.rs_nrates = j; 1322 ath_rc_priv->neg_ht_rates.rs_nrates = j;
1418 } 1323 }
1419 1324
1420 is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40); 1325 ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
1421 1326 sta->ht_cap.ht_supported);
1422 if (is_cw40) 1327 if (!ath_rc_priv->rate_table) {
1423 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 1328 ath_err(common, "No rate table chosen\n");
1424 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 1329 return;
1425 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 1330 }
1426
1427 /* Choose rate table first */
1428
1429 rate_table = ath_choose_rate_table(sc, sband->band,
1430 sta->ht_cap.ht_supported);
1431 1331
1432 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi); 1332 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
1433 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1333 ath_rc_init(sc, priv_sta);
1434} 1334}
1435 1335
1436static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, 1336static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1339,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1439{ 1339{
1440 struct ath_softc *sc = priv; 1340 struct ath_softc *sc = priv;
1441 struct ath_rate_priv *ath_rc_priv = priv_sta; 1341 struct ath_rate_priv *ath_rc_priv = priv_sta;
1442 const struct ath_rate_table *rate_table = NULL;
1443 bool oper_cw40 = false, oper_sgi;
1444 bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
1445 bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
1446
1447 /* FIXME: Handle AP mode later when we support CWM */
1448 1342
1449 if (changed & IEEE80211_RC_BW_CHANGED) { 1343 if (changed & IEEE80211_RC_BW_CHANGED) {
1450 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 1344 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
1451 return; 1345 ath_rc_init(sc, priv_sta);
1452 1346
1453 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) 1347 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1454 oper_cw40 = true; 1348 "Operating HT Bandwidth changed to: %d\n",
1455 1349 sc->hw->conf.channel_type);
1456 if (oper_cw40)
1457 oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1458 true : false;
1459 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
1460 oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1461 true : false;
1462 else
1463 oper_sgi = false;
1464
1465 if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
1466 rate_table = ath_choose_rate_table(sc, sband->band,
1467 sta->ht_cap.ht_supported);
1468 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
1469 oper_cw40, oper_sgi);
1470 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1471
1472 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1473 "Operating HT Bandwidth changed to: %d\n",
1474 sc->hw->conf.channel_type);
1475 }
1476 } 1350 }
1477} 1351}
1478 1352
@@ -1484,7 +1358,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1484 struct ath_rate_priv *rc = file->private_data; 1358 struct ath_rate_priv *rc = file->private_data;
1485 char *buf; 1359 char *buf;
1486 unsigned int len = 0, max; 1360 unsigned int len = 0, max;
1487 int i = 0; 1361 int rix;
1488 ssize_t retval; 1362 ssize_t retval;
1489 1363
1490 if (rc->rate_table == NULL) 1364 if (rc->rate_table == NULL)
@@ -1500,7 +1374,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1500 "HT", "MCS", "Rate", 1374 "HT", "MCS", "Rate",
1501 "Success", "Retries", "XRetries", "PER"); 1375 "Success", "Retries", "XRetries", "PER");
1502 1376
1503 for (i = 0; i < rc->rate_table_size; i++) { 1377 for (rix = 0; rix < rc->max_valid_rate; rix++) {
1378 u8 i = rc->valid_rate_index[rix];
1504 u32 ratekbps = rc->rate_table->info[i].ratekbps; 1379 u32 ratekbps = rc->rate_table->info[i].ratekbps;
1505 struct ath_rc_stats *stats = &rc->rcstats[i]; 1380 struct ath_rc_stats *stats = &rc->rcstats[i];
1506 char mcs[5]; 1381 char mcs[5];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 75f8e9b06b28..268e67dc5fb2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -160,10 +160,6 @@ struct ath_rate_table {
160 u32 user_ratekbps; 160 u32 user_ratekbps;
161 u8 ratecode; 161 u8 ratecode;
162 u8 dot11rate; 162 u8 dot11rate;
163 u8 ctrl_rate;
164 u8 cw40index;
165 u8 sgi_index;
166 u8 ht_index;
167 } info[RATE_TABLE_SIZE]; 163 } info[RATE_TABLE_SIZE];
168 u32 probe_interval; 164 u32 probe_interval;
169 u8 initial_ratemax; 165 u8 initial_ratemax;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4480c0cc655f..83d16e7ed272 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -424,8 +424,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
424 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 424 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
425 425
426 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 426 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
427 /* The following may also be needed for other older chips */ 427 /* This is needed for older chips */
428 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 428 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
429 rfilt |= ATH9K_RX_FILTER_PROM; 429 rfilt |= ATH9K_RX_FILTER_PROM;
430 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 430 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
431 } 431 }
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 87cac8eb7834..4e6760f8596d 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -801,6 +801,8 @@
801#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ 801#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
802#define AR_SREV_VERSION_9462 0x280 802#define AR_SREV_VERSION_9462 0x280
803#define AR_SREV_REVISION_9462_20 2 803#define AR_SREV_REVISION_9462_20 2
804#define AR_SREV_VERSION_9565 0x2C0
805#define AR_SREV_REVISION_9565_10 0
804#define AR_SREV_VERSION_9550 0x400 806#define AR_SREV_VERSION_9550 0x400
805 807
806#define AR_SREV_5416(_ah) \ 808#define AR_SREV_5416(_ah) \
@@ -909,6 +911,13 @@
909 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ 911 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
910 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20)) 912 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
911 913
914#define AR_SREV_9565(_ah) \
915 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
916
917#define AR_SREV_9565_10(_ah) \
918 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
919 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
920
912#define AR_SREV_9550(_ah) \ 921#define AR_SREV_9550(_ah) \
913 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550)) 922 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
914 923
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 44a08eb53c62..a483d518758c 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -497,7 +497,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
497 497
498 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); 498 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
499 499
500 if (AR_SREV_9462(ah)) { 500 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
501 /* 501 /*
502 * this is needed to prevent the chip waking up 502 * this is needed to prevent the chip waking up
503 * the host within 3-4 seconds with certain 503 * the host within 3-4 seconds with certain
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0d4155aec48d..36618e3a5e60 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -568,7 +568,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
568 if (!an->sleeping) { 568 if (!an->sleeping) {
569 ath_tx_queue_tid(txq, tid); 569 ath_tx_queue_tid(txq, tid);
570 570
571 if (ts->ts_status & ATH9K_TXERR_FILT) 571 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
572 tid->ac->clear_ps_filter = true; 572 tid->ac->clear_ps_filter = true;
573 } 573 }
574 } 574 }
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1773 TX_STAT_INC(txq->axq_qnum, queued); 1773 TX_STAT_INC(txq->axq_qnum, queued);
1774} 1774}
1775 1775
1776static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1776static void setup_frame_info(struct ieee80211_hw *hw,
1777 struct ieee80211_sta *sta,
1778 struct sk_buff *skb,
1777 int framelen) 1779 int framelen)
1778{ 1780{
1779 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1781 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1780 struct ieee80211_sta *sta = tx_info->control.sta;
1781 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1782 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1782 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1783 const struct ieee80211_rate *rate; 1784 const struct ieee80211_rate *rate;
@@ -1819,10 +1820,14 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1819{ 1820{
1820 struct ath_hw *ah = sc->sc_ah; 1821 struct ath_hw *ah = sc->sc_ah;
1821 struct ath9k_channel *curchan = ah->curchan; 1822 struct ath9k_channel *curchan = ah->curchan;
1823
1822 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1824 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1823 (curchan->channelFlags & CHANNEL_5GHZ) && 1825 (curchan->channelFlags & CHANNEL_5GHZ) &&
1824 (chainmask == 0x7) && (rate < 0x90)) 1826 (chainmask == 0x7) && (rate < 0x90))
1825 return 0x3; 1827 return 0x3;
1828 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
1829 IS_CCK_RATE(rate))
1830 return 0x2;
1826 else 1831 else
1827 return chainmask; 1832 return chainmask;
1828} 1833}
@@ -1935,7 +1940,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1935{ 1940{
1936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1941 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1942 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1938 struct ieee80211_sta *sta = info->control.sta; 1943 struct ieee80211_sta *sta = txctl->sta;
1939 struct ieee80211_vif *vif = info->control.vif; 1944 struct ieee80211_vif *vif = info->control.vif;
1940 struct ath_softc *sc = hw->priv; 1945 struct ath_softc *sc = hw->priv;
1941 struct ath_txq *txq = txctl->txq; 1946 struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1984,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1979 !ieee80211_is_data(hdr->frame_control)) 1984 !ieee80211_is_data(hdr->frame_control))
1980 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1985 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1981 1986
1982 setup_frame_info(hw, skb, frmlen); 1987 setup_frame_info(hw, sta, skb, frmlen);
1983 1988
1984 /* 1989 /*
1985 * At this point, the vif, hw_key and sta pointers in the tx control 1990 * At this point, the vif, hw_key and sta pointers in the tx control
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 376be11161c0..2aa4a59c72c8 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -425,6 +425,7 @@ struct ar9170 {
425 bool rx_has_plcp; 425 bool rx_has_plcp;
426 struct sk_buff *rx_failover; 426 struct sk_buff *rx_failover;
427 int rx_failover_missing; 427 int rx_failover_missing;
428 u32 ampdu_ref;
428 429
429 /* FIFO for collecting outstanding BlockAckRequest */ 430 /* FIFO for collecting outstanding BlockAckRequest */
430 struct list_head bar_list[__AR9170_NUM_TXQ]; 431 struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
577void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); 578void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
578 579
579/* TX */ 580/* TX */
580void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 581void carl9170_op_tx(struct ieee80211_hw *hw,
582 struct ieee80211_tx_control *control,
583 struct sk_buff *skb);
581void carl9170_tx_janitor(struct work_struct *work); 584void carl9170_tx_janitor(struct work_struct *work);
582void carl9170_tx_process_status(struct ar9170 *ar, 585void carl9170_tx_process_status(struct ar9170 *ar,
583 const struct carl9170_rsp *cmd); 586 const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index c5ca6f1f5836..24ac2876a733 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
341 if (SUPP(CARL9170FW_WLANTX_CAB)) { 341 if (SUPP(CARL9170FW_WLANTX_CAB)) {
342 if_comb_types |= 342 if_comb_types |=
343 BIT(NL80211_IFTYPE_AP) | 343 BIT(NL80211_IFTYPE_AP) |
344 BIT(NL80211_IFTYPE_MESH_POINT) |
344 BIT(NL80211_IFTYPE_P2P_GO); 345 BIT(NL80211_IFTYPE_P2P_GO);
345 } 346 }
346 } 347 }
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 53415bfd8bef..e3b1b6e87760 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -304,7 +304,8 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
304 struct ath_common *common = &ar->common; 304 struct ath_common *common = &ar->common;
305 u8 *mac_addr, *bssid; 305 u8 *mac_addr, *bssid;
306 u32 cam_mode = AR9170_MAC_CAM_DEFAULTS; 306 u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
307 u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS; 307 u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
308 AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
308 u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG | 309 u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
309 AR9170_MAC_RX_CTRL_SHORT_FILTER; 310 AR9170_MAC_RX_CTRL_SHORT_FILTER;
310 u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS; 311 u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
@@ -318,10 +319,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
318 bssid = common->curbssid; 319 bssid = common->curbssid;
319 320
320 switch (vif->type) { 321 switch (vif->type) {
321 case NL80211_IFTYPE_MESH_POINT:
322 case NL80211_IFTYPE_ADHOC: 322 case NL80211_IFTYPE_ADHOC:
323 cam_mode |= AR9170_MAC_CAM_IBSS; 323 cam_mode |= AR9170_MAC_CAM_IBSS;
324 break; 324 break;
325 case NL80211_IFTYPE_MESH_POINT:
325 case NL80211_IFTYPE_AP: 326 case NL80211_IFTYPE_AP:
326 cam_mode |= AR9170_MAC_CAM_AP; 327 cam_mode |= AR9170_MAC_CAM_AP;
327 328
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 858e58dfc4dc..67997b39aba7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
616 616
617 goto unlock; 617 goto unlock;
618 618
619 case NL80211_IFTYPE_MESH_POINT:
619 case NL80211_IFTYPE_AP: 620 case NL80211_IFTYPE_AP:
620 if ((vif->type == NL80211_IFTYPE_STATION) || 621 if ((vif->type == NL80211_IFTYPE_STATION) ||
621 (vif->type == NL80211_IFTYPE_WDS) || 622 (vif->type == NL80211_IFTYPE_WDS) ||
622 (vif->type == NL80211_IFTYPE_AP)) 623 (vif->type == NL80211_IFTYPE_AP) ||
624 (vif->type == NL80211_IFTYPE_MESH_POINT))
623 break; 625 break;
624 626
625 err = -EBUSY; 627 err = -EBUSY;
@@ -1147,6 +1149,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1147 break; 1149 break;
1148 case WLAN_CIPHER_SUITE_CCMP: 1150 case WLAN_CIPHER_SUITE_CCMP:
1149 ktype = AR9170_ENC_ALG_AESCCMP; 1151 ktype = AR9170_ENC_ALG_AESCCMP;
1152 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1150 break; 1153 break;
1151 default: 1154 default:
1152 return -EOPNOTSUPP; 1155 return -EOPNOTSUPP;
@@ -1778,6 +1781,7 @@ void *carl9170_alloc(size_t priv_size)
1778 hw->wiphy->interface_modes = 0; 1781 hw->wiphy->interface_modes = 0;
1779 1782
1780 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1783 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1784 IEEE80211_HW_MFP_CAPABLE |
1781 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1785 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1782 IEEE80211_HW_SUPPORTS_PS | 1786 IEEE80211_HW_SUPPORTS_PS |
1783 IEEE80211_HW_PS_NULLFUNC_STACK | 1787 IEEE80211_HW_PS_NULLFUNC_STACK |
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 6f6a34155667..a0b723078547 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
206 206
207 case NL80211_IFTYPE_AP: 207 case NL80211_IFTYPE_AP:
208 case NL80211_IFTYPE_ADHOC: 208 case NL80211_IFTYPE_ADHOC:
209 case NL80211_IFTYPE_MESH_POINT:
209 carl9170_update_beacon(ar, true); 210 carl9170_update_beacon(ar, true);
210 break; 211 break;
211 212
@@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
623#undef TID_CHECK 624#undef TID_CHECK
624} 625}
625 626
626static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms) 627static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
628 struct ieee80211_rx_status *rx_status)
627{ 629{
628 __le16 fc; 630 __le16 fc;
629 631
@@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
636 return true; 638 return true;
637 } 639 }
638 640
641 rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
642 rx_status->ampdu_reference = ar->ampdu_ref;
643
639 /* 644 /*
640 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts 645 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
641 * certain frame types can be part of an aMPDU. 646 * certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
684 if (unlikely(len < sizeof(*mac))) 689 if (unlikely(len < sizeof(*mac)))
685 goto drop; 690 goto drop;
686 691
692 memset(&status, 0, sizeof(status));
693
687 mpdu_len = len - sizeof(*mac); 694 mpdu_len = len - sizeof(*mac);
688 695
689 mac = (void *)(buf + mpdu_len); 696 mac = (void *)(buf + mpdu_len);
690 mac_status = mac->status; 697 mac_status = mac->status;
691 switch (mac_status & AR9170_RX_STATUS_MPDU) { 698 switch (mac_status & AR9170_RX_STATUS_MPDU) {
692 case AR9170_RX_STATUS_MPDU_FIRST: 699 case AR9170_RX_STATUS_MPDU_FIRST:
700 ar->ampdu_ref++;
693 /* Aggregated MPDUs start with an PLCP header */ 701 /* Aggregated MPDUs start with an PLCP header */
694 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { 702 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
695 head = (void *) buf; 703 head = (void *) buf;
@@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
720 break; 728 break;
721 729
722 case AR9170_RX_STATUS_MPDU_LAST: 730 case AR9170_RX_STATUS_MPDU_LAST:
731 status.flag |= RX_FLAG_AMPDU_IS_LAST;
732
723 /* 733 /*
724 * The last frame of an A-MPDU has an extra tail 734 * The last frame of an A-MPDU has an extra tail
725 * which does contain the phy status of the whole 735 * which does contain the phy status of the whole
726 * aggregate. 736 * aggregate.
727 */ 737 */
728
729 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { 738 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
730 mpdu_len -= sizeof(struct ar9170_rx_phystatus); 739 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
731 phy = (void *)(buf + mpdu_len); 740 phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
773 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN))) 782 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
774 goto drop; 783 goto drop;
775 784
776 memset(&status, 0, sizeof(status));
777 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) 785 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
778 goto drop; 786 goto drop;
779 787
780 if (!carl9170_ampdu_check(ar, buf, mac_status)) 788 if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
781 goto drop; 789 goto drop;
782 790
783 if (phy) 791 if (phy)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6a8681407a1d..84377cf580e0 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
867 return false; 867 return false;
868} 868}
869 869
870static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) 870static int carl9170_tx_prepare(struct ar9170 *ar,
871 struct ieee80211_sta *sta,
872 struct sk_buff *skb)
871{ 873{
872 struct ieee80211_hdr *hdr; 874 struct ieee80211_hdr *hdr;
873 struct _carl9170_tx_superframe *txc; 875 struct _carl9170_tx_superframe *txc;
874 struct carl9170_vif_info *cvif; 876 struct carl9170_vif_info *cvif;
875 struct ieee80211_tx_info *info; 877 struct ieee80211_tx_info *info;
876 struct ieee80211_tx_rate *txrate; 878 struct ieee80211_tx_rate *txrate;
877 struct ieee80211_sta *sta;
878 struct carl9170_tx_info *arinfo; 879 struct carl9170_tx_info *arinfo;
879 unsigned int hw_queue; 880 unsigned int hw_queue;
880 int i; 881 int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
910 else 911 else
911 cvif = NULL; 912 cvif = NULL;
912 913
913 sta = info->control.sta;
914
915 txc = (void *)skb_push(skb, sizeof(*txc)); 914 txc = (void *)skb_push(skb, sizeof(*txc));
916 memset(txc, 0, sizeof(*txc)); 915 memset(txc, 0, sizeof(*txc));
917 916
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
1457 return false; 1456 return false;
1458} 1457}
1459 1458
1460void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1459void carl9170_op_tx(struct ieee80211_hw *hw,
1460 struct ieee80211_tx_control *control,
1461 struct sk_buff *skb)
1461{ 1462{
1462 struct ar9170 *ar = hw->priv; 1463 struct ar9170 *ar = hw->priv;
1463 struct ieee80211_tx_info *info; 1464 struct ieee80211_tx_info *info;
1464 struct ieee80211_sta *sta; 1465 struct ieee80211_sta *sta = control->sta;
1465 bool run; 1466 bool run;
1466 1467
1467 if (unlikely(!IS_STARTED(ar))) 1468 if (unlikely(!IS_STARTED(ar)))
1468 goto err_free; 1469 goto err_free;
1469 1470
1470 info = IEEE80211_SKB_CB(skb); 1471 info = IEEE80211_SKB_CB(skb);
1471 sta = info->control.sta;
1472 1472
1473 if (unlikely(carl9170_tx_prepare(ar, skb))) 1473 if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
1474 goto err_free; 1474 goto err_free;
1475 1475
1476 carl9170_tx_accounting(ar, skb); 1476 carl9170_tx_accounting(ar, skb);
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 4648bbf76abc..098fe9ee7096 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -4,6 +4,7 @@ b43-y += tables.o
4b43-$(CONFIG_B43_PHY_N) += tables_nphy.o 4b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
5b43-$(CONFIG_B43_PHY_N) += radio_2055.o 5b43-$(CONFIG_B43_PHY_N) += radio_2055.o
6b43-$(CONFIG_B43_PHY_N) += radio_2056.o 6b43-$(CONFIG_B43_PHY_N) += radio_2056.o
7b43-$(CONFIG_B43_PHY_N) += radio_2057.o
7b43-y += phy_common.o 8b43-y += phy_common.o
8b43-y += phy_g.o 9b43-y += phy_g.o
9b43-y += phy_a.o 10b43-y += phy_a.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7c899fc7ddd0..b298e5d68be2 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -241,16 +241,18 @@ enum {
241#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ 241#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */
242#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ 242#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */
243#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ 243#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */
244#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ 244#define B43_SHM_SH_HOSTF1 0x005E /* Hostflags 1 for ucode options */
245#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */ 245#define B43_SHM_SH_HOSTF2 0x0060 /* Hostflags 2 for ucode options */
246#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */ 246#define B43_SHM_SH_HOSTF3 0x0062 /* Hostflags 3 for ucode options */
247#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ 247#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */
248#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ 248#define B43_SHM_SH_RADAR 0x0066 /* Radar register */
249#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ 249#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */
250#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ 250#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */
251#define B43_SHM_SH_HOSTF4 0x0078 /* Hostflags 4 for ucode options */
251#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ 252#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */
252#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */ 253#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */
253#define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */ 254#define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */
255#define B43_SHM_SH_HOSTF5 0x00D4 /* Hostflags 5 for ucode options */
254#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ 256#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */
255/* TSSI information */ 257/* TSSI information */
256#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */ 258#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@ enum {
415#define B43_PHYTYPE_HT 0x07 417#define B43_PHYTYPE_HT 0x07
416#define B43_PHYTYPE_LCN 0x08 418#define B43_PHYTYPE_LCN 0x08
417#define B43_PHYTYPE_LCNXN 0x09 419#define B43_PHYTYPE_LCNXN 0x09
420#define B43_PHYTYPE_LCN40 0x0a
421#define B43_PHYTYPE_AC 0x0b
418 422
419/* PHYRegisters */ 423/* PHYRegisters */
420#define B43_PHY_ILT_A_CTRL 0x0072 424#define B43_PHY_ILT_A_CTRL 0x0072
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a140165dfee0..73730e94e0ac 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev)
533{ 533{
534 u64 ret; 534 u64 ret;
535 535
536 ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); 536 ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
537 ret <<= 16; 537 ret <<= 16;
538 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI); 538 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
539 ret <<= 16; 539 ret <<= 16;
540 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); 540 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
541 541
542 return ret; 542 return ret;
543} 543}
@@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
550 lo = (value & 0x00000000FFFFULL); 550 lo = (value & 0x00000000FFFFULL);
551 mi = (value & 0x0000FFFF0000ULL) >> 16; 551 mi = (value & 0x0000FFFF0000ULL) >> 16;
552 hi = (value & 0xFFFF00000000ULL) >> 32; 552 hi = (value & 0xFFFF00000000ULL) >> 32;
553 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo); 553 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
554 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi); 554 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
555 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); 555 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
556} 556}
557 557
558/* Read the firmware capabilities bitmask (Opensource firmware only) */ 558/* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
3412} 3412}
3413 3413
3414static void b43_op_tx(struct ieee80211_hw *hw, 3414static void b43_op_tx(struct ieee80211_hw *hw,
3415 struct sk_buff *skb) 3415 struct ieee80211_tx_control *control,
3416 struct sk_buff *skb)
3416{ 3417{
3417 struct b43_wl *wl = hw_to_b43_wl(hw); 3418 struct b43_wl *wl = hw_to_b43_wl(hw);
3418 3419
@@ -4282,6 +4283,35 @@ out:
4282 return err; 4283 return err;
4283} 4284}
4284 4285
4286static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
4287{
4288 switch (phy_type) {
4289 case B43_PHYTYPE_A:
4290 return "A";
4291 case B43_PHYTYPE_B:
4292 return "B";
4293 case B43_PHYTYPE_G:
4294 return "G";
4295 case B43_PHYTYPE_N:
4296 return "N";
4297 case B43_PHYTYPE_LP:
4298 return "LP";
4299 case B43_PHYTYPE_SSLPN:
4300 return "SSLPN";
4301 case B43_PHYTYPE_HT:
4302 return "HT";
4303 case B43_PHYTYPE_LCN:
4304 return "LCN";
4305 case B43_PHYTYPE_LCNXN:
4306 return "LCNXN";
4307 case B43_PHYTYPE_LCN40:
4308 return "LCN40";
4309 case B43_PHYTYPE_AC:
4310 return "AC";
4311 }
4312 return "UNKNOWN";
4313}
4314
4285/* Get PHY and RADIO versioning numbers */ 4315/* Get PHY and RADIO versioning numbers */
4286static int b43_phy_versioning(struct b43_wldev *dev) 4316static int b43_phy_versioning(struct b43_wldev *dev)
4287{ 4317{
@@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4342 unsupported = 1; 4372 unsupported = 1;
4343 } 4373 }
4344 if (unsupported) { 4374 if (unsupported) {
4345 b43err(dev->wl, "FOUND UNSUPPORTED PHY " 4375 b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
4346 "(Analog %u, Type %u, Revision %u)\n", 4376 analog_type, phy_type, b43_phy_name(dev, phy_type),
4347 analog_type, phy_type, phy_rev); 4377 phy_rev);
4348 return -EOPNOTSUPP; 4378 return -EOPNOTSUPP;
4349 } 4379 }
4350 b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", 4380 b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
4351 analog_type, phy_type, phy_rev); 4381 analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
4352 4382
4353 /* Get RADIO versioning */ 4383 /* Get RADIO versioning */
4354 if (dev->dev->core_rev >= 24) { 4384 if (dev->dev->core_rev >= 24) {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3f8883b14d9c..f01676ac481b 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
240 (b43_radio_read16(dev, offset) & mask) | set); 240 (b43_radio_read16(dev, offset) & mask) | set);
241} 241}
242 242
243bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
244 u16 value, int delay, int timeout)
245{
246 u16 val;
247 int i;
248
249 for (i = 0; i < timeout; i += delay) {
250 val = b43_radio_read(dev, offset);
251 if ((val & mask) == value)
252 return true;
253 udelay(delay);
254 }
255 return false;
256}
257
243u16 b43_phy_read(struct b43_wldev *dev, u16 reg) 258u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
244{ 259{
245 assert_mac_suspended(dev); 260 assert_mac_suspended(dev);
@@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
428 average = (a + b + c + d + 2) / 4; 443 average = (a + b + c + d + 2) / 4;
429 if (is_ofdm) { 444 if (is_ofdm) {
430 /* Adjust for CCK-boost */ 445 /* Adjust for CCK-boost */
431 if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO) 446 if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
432 & B43_HF_CCKBOOST) 447 & B43_HF_CCKBOOST)
433 average = (average >= 13) ? (average - 13) : 0; 448 average = (average >= 13) ? (average - 13) : 0;
434 } 449 }
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9233b13fc16d..f1b999349876 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -365,6 +365,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
365void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set); 365void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
366 366
367/** 367/**
368 * b43_radio_wait_value - Waits for a given value in masked register read
369 */
370bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
371 u16 value, int delay, int timeout);
372
373/**
368 * b43_radio_lock - Lock firmware radio register access 374 * b43_radio_lock - Lock firmware radio register access
369 */ 375 */
370void b43_radio_lock(struct b43_wldev *dev); 376void b43_radio_lock(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b92bb9c92ad1..3c35382ee6c2 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -32,6 +32,7 @@
32#include "tables_nphy.h" 32#include "tables_nphy.h"
33#include "radio_2055.h" 33#include "radio_2055.h"
34#include "radio_2056.h" 34#include "radio_2056.h"
35#include "radio_2057.h"
35#include "main.h" 36#include "main.h"
36 37
37struct nphy_txgains { 38struct nphy_txgains {
@@ -126,6 +127,46 @@ ok:
126 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); 127 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
127} 128}
128 129
130/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
131static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
132 u16 value, u8 core, bool off,
133 u8 override)
134{
135 const struct nphy_rf_control_override_rev7 *e;
136 u16 en_addrs[3][2] = {
137 { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
138 };
139 u16 en_addr;
140 u16 en_mask = field;
141 u16 val_addr;
142 u8 i;
143
144 /* Remember: we can get NULL! */
145 e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
146
147 for (i = 0; i < 2; i++) {
148 if (override >= ARRAY_SIZE(en_addrs)) {
149 b43err(dev->wl, "Invalid override value %d\n", override);
150 return;
151 }
152 en_addr = en_addrs[override][i];
153
154 val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
155
156 if (off) {
157 b43_phy_mask(dev, en_addr, ~en_mask);
158 if (e) /* Do it safer, better than wl */
159 b43_phy_mask(dev, val_addr, ~e->val_mask);
160 } else {
161 if (!core || (core & (1 << i))) {
162 b43_phy_set(dev, en_addr, en_mask);
163 if (e)
164 b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
165 }
166 }
167 }
168}
169
129/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ 170/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
130static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, 171static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
131 u16 value, u8 core, bool off) 172 u16 value, u8 core, bool off)
@@ -459,6 +500,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
459} 500}
460 501
461/************************************************** 502/**************************************************
503 * Radio 0x2057
504 **************************************************/
505
506/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
507static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
508{
509 struct b43_phy *phy = &dev->phy;
510 u16 tmp;
511
512 if (phy->radio_rev == 5) {
513 b43_phy_mask(dev, 0x342, ~0x2);
514 udelay(10);
515 b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
516 b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
517 }
518
519 b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
520 udelay(10);
521 b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
522 if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
523 b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
524 return 0;
525 }
526 b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
527 tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
528 b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
529
530 if (phy->radio_rev == 5) {
531 b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
532 b43_radio_mask(dev, 0x1ca, ~0x2);
533 }
534 if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
535 b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
536 b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
537 tmp << 2);
538 }
539
540 return tmp & 0x3e;
541}
542
543/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
544static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
545{
546 struct b43_phy *phy = &dev->phy;
547 bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
548 phy->radio_rev == 6);
549 u16 tmp;
550
551 if (special) {
552 b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
553 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
554 } else {
555 b43_radio_write(dev, 0x1AE, 0x61);
556 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
557 }
558 b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
559 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
560 if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
561 5000000))
562 b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
563 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
564 if (special) {
565 b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
566 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
567 } else {
568 b43_radio_write(dev, 0x1AE, 0x69);
569 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
570 }
571 b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
572 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
573 if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
574 5000000))
575 b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
576 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
577 if (special) {
578 b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
579 b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
580 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
581 } else {
582 b43_radio_write(dev, 0x1AE, 0x73);
583 b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
584 b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
585 }
586 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
587 if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
588 5000000)) {
589 b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
590 return 0;
591 }
592 tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
593 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
594 return tmp;
595}
596
597static void b43_radio_2057_init_pre(struct b43_wldev *dev)
598{
599 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
600 /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
601 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
602 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
603 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
604}
605
606static void b43_radio_2057_init_post(struct b43_wldev *dev)
607{
608 b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
609
610 b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
611 b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
612 mdelay(2);
613 b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
614 b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
615
616 if (dev->phy.n->init_por) {
617 b43_radio_2057_rcal(dev);
618 b43_radio_2057_rccal(dev);
619 }
620 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
621
622 dev->phy.n->init_por = false;
623}
624
625/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
626static void b43_radio_2057_init(struct b43_wldev *dev)
627{
628 b43_radio_2057_init_pre(dev);
629 r2057_upload_inittabs(dev);
630 b43_radio_2057_init_post(dev);
631}
632
633/**************************************************
462 * Radio 0x2056 634 * Radio 0x2056
463 **************************************************/ 635 **************************************************/
464 636
@@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
545 enum ieee80211_band band = b43_current_band(dev->wl); 717 enum ieee80211_band band = b43_current_band(dev->wl);
546 u16 offset; 718 u16 offset;
547 u8 i; 719 u8 i;
548 u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; 720 u16 bias, cbias;
721 u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
722 u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
549 723
550 B43_WARN_ON(dev->phy.rev < 3); 724 B43_WARN_ON(dev->phy.rev < 3);
551 725
@@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
630 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); 804 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
631 } 805 }
632 } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { 806 } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
633 /* TODO */ 807 u16 freq = dev->phy.channel_freq;
808 if (freq < 5100) {
809 paa_boost = 0xA;
810 pada_boost = 0x77;
811 pgaa_boost = 0xF;
812 mixa_boost = 0xF;
813 } else if (freq < 5340) {
814 paa_boost = 0x8;
815 pada_boost = 0x77;
816 pgaa_boost = 0xFB;
817 mixa_boost = 0xF;
818 } else if (freq < 5650) {
819 paa_boost = 0x0;
820 pada_boost = 0x77;
821 pgaa_boost = 0xB;
822 mixa_boost = 0xF;
823 } else {
824 paa_boost = 0x0;
825 pada_boost = 0x77;
826 if (freq != 5825)
827 pgaa_boost = -(freq - 18) / 36 + 168;
828 else
829 pgaa_boost = 6;
830 mixa_boost = 0xF;
831 }
832
833 for (i = 0; i < 2; i++) {
834 offset = i ? B2056_TX1 : B2056_TX0;
835
836 b43_radio_write(dev,
837 offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
838 b43_radio_write(dev,
839 offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
840 b43_radio_write(dev,
841 offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
842 b43_radio_write(dev,
843 offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
844 b43_radio_write(dev,
845 offset | B2056_TX_TXSPARE1, 0x30);
846 b43_radio_write(dev,
847 offset | B2056_TX_PA_SPARE2, 0xee);
848 b43_radio_write(dev,
849 offset | B2056_TX_PADA_CASCBIAS, 0x03);
850 b43_radio_write(dev,
851 offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
852 b43_radio_write(dev,
853 offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
854 b43_radio_write(dev,
855 offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
856 }
634 } 857 }
635 858
636 udelay(50); 859 udelay(50);
@@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
643 udelay(300); 866 udelay(300);
644} 867}
645 868
869static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
870{
871 struct b43_phy *phy = &dev->phy;
872 u16 mast2, tmp;
873
874 if (phy->rev != 3)
875 return 0;
876
877 mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
878 b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
879
880 udelay(10);
881 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
882 udelay(10);
883 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
884
885 if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
886 1000000)) {
887 b43err(dev->wl, "Radio recalibration timeout\n");
888 return 0;
889 }
890
891 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
892 tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
893 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
894
895 b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
896
897 return tmp & 0x1f;
898}
899
646static void b43_radio_init2056_pre(struct b43_wldev *dev) 900static void b43_radio_init2056_pre(struct b43_wldev *dev)
647{ 901{
648 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, 902 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
665 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); 919 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
666 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); 920 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
667 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); 921 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
668 /* 922 if (dev->phy.n->init_por)
669 if (nphy->init_por) 923 b43_radio_2056_rcal(dev);
670 Call Radio 2056 Recalibrate
671 */
672} 924}
673 925
674/* 926/*
@@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev)
680 b43_radio_init2056_pre(dev); 932 b43_radio_init2056_pre(dev);
681 b2056_upload_inittabs(dev, 0, 0); 933 b2056_upload_inittabs(dev, 0, 0);
682 b43_radio_init2056_post(dev); 934 b43_radio_init2056_post(dev);
935
936 dev->phy.n->init_por = false;
683} 937}
684 938
685/************************************************** 939/**************************************************
@@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
753{ 1007{
754 struct b43_phy_n *nphy = dev->phy.n; 1008 struct b43_phy_n *nphy = dev->phy.n;
755 struct ssb_sprom *sprom = dev->dev->bus_sprom; 1009 struct ssb_sprom *sprom = dev->dev->bus_sprom;
756 int i;
757 u16 val;
758 bool workaround = false; 1010 bool workaround = false;
759 1011
760 if (sprom->revision < 4) 1012 if (sprom->revision < 4)
@@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
777 b43_radio_set(dev, B2055_CAL_MISC, 0x1); 1029 b43_radio_set(dev, B2055_CAL_MISC, 0x1);
778 msleep(1); 1030 msleep(1);
779 b43_radio_set(dev, B2055_CAL_MISC, 0x40); 1031 b43_radio_set(dev, B2055_CAL_MISC, 0x40);
780 for (i = 0; i < 200; i++) { 1032 if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
781 val = b43_radio_read(dev, B2055_CAL_COUT2);
782 if (val & 0x80) {
783 i = 0;
784 break;
785 }
786 udelay(10);
787 }
788 if (i)
789 b43err(dev->wl, "radio post init timeout\n"); 1033 b43err(dev->wl, "radio post init timeout\n");
790 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 1034 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
791 b43_switch_channel(dev, dev->phy.channel); 1035 b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
1860/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 2104/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
1861static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) 2105static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
1862{ 2106{
1863 if (dev->phy.rev >= 3) 2107 if (dev->phy.rev >= 7)
2108 ; /* TODO */
2109 else if (dev->phy.rev >= 3)
1864 b43_nphy_gain_ctl_workarounds_rev3plus(dev); 2110 b43_nphy_gain_ctl_workarounds_rev3plus(dev);
1865 else 2111 else
1866 b43_nphy_gain_ctl_workarounds_rev1_2(dev); 2112 b43_nphy_gain_ctl_workarounds_rev1_2(dev);
1867} 2113}
1868 2114
2115/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
2116static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
2117{
2118 if (!offset)
2119 offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
2120 return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
2121}
2122
2123static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2124{
2125 struct ssb_sprom *sprom = dev->dev->bus_sprom;
2126 struct b43_phy *phy = &dev->phy;
2127
2128 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
2129 0x1F };
2130 u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
2131
2132 u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
2133 u8 ntab7_138_146[] = { 0x11, 0x11 };
2134 u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
2135
2136 u16 lpf_20, lpf_40, lpf_11b;
2137 u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
2138 u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
2139 bool rccal_ovrd = false;
2140
2141 u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
2142 u16 bias, conv, filt;
2143
2144 u32 tmp32;
2145 u8 core;
2146
2147 if (phy->rev == 7) {
2148 b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
2149 b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
2150 b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
2151 b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
2152 b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
2153 b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
2154 b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
2155 b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
2156 b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
2157 b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
2158 b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
2159 b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
2160 b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
2161 b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
2162 b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
2163 b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
2164 b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
2165 }
2166 if (phy->rev <= 8) {
2167 b43_phy_write(dev, 0x23F, 0x1B0);
2168 b43_phy_write(dev, 0x240, 0x1B0);
2169 }
2170 if (phy->rev >= 8)
2171 b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
2172
2173 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
2174 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
2175 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
2176 tmp32 &= 0xffffff;
2177 b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
2178 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
2179 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
2180
2181 if (b43_nphy_ipa(dev))
2182 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
2183 rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
2184
2185 b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
2186 b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
2187
2188 lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
2189 lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
2190 lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
2191 if (b43_nphy_ipa(dev)) {
2192 if ((phy->radio_rev == 5 && phy->is_40mhz) ||
2193 phy->radio_rev == 7 || phy->radio_rev == 8) {
2194 bcap_val = b43_radio_read(dev, 0x16b);
2195 scap_val = b43_radio_read(dev, 0x16a);
2196 scap_val_11b = scap_val;
2197 bcap_val_11b = bcap_val;
2198 if (phy->radio_rev == 5 && phy->is_40mhz) {
2199 scap_val_11n_20 = scap_val;
2200 bcap_val_11n_20 = bcap_val;
2201 scap_val_11n_40 = bcap_val_11n_40 = 0xc;
2202 rccal_ovrd = true;
2203 } else { /* Rev 7/8 */
2204 lpf_20 = 4;
2205 lpf_11b = 1;
2206 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2207 scap_val_11n_20 = 0xc;
2208 bcap_val_11n_20 = 0xc;
2209 scap_val_11n_40 = 0xa;
2210 bcap_val_11n_40 = 0xa;
2211 } else {
2212 scap_val_11n_20 = 0x14;
2213 bcap_val_11n_20 = 0x14;
2214 scap_val_11n_40 = 0xf;
2215 bcap_val_11n_40 = 0xf;
2216 }
2217 rccal_ovrd = true;
2218 }
2219 }
2220 } else {
2221 if (phy->radio_rev == 5) {
2222 lpf_20 = 1;
2223 lpf_40 = 3;
2224 bcap_val = b43_radio_read(dev, 0x16b);
2225 scap_val = b43_radio_read(dev, 0x16a);
2226 scap_val_11b = scap_val;
2227 bcap_val_11b = bcap_val;
2228 scap_val_11n_20 = 0x11;
2229 scap_val_11n_40 = 0x11;
2230 bcap_val_11n_20 = 0x13;
2231 bcap_val_11n_40 = 0x13;
2232 rccal_ovrd = true;
2233 }
2234 }
2235 if (rccal_ovrd) {
2236 rx2tx_lut_20_11b = (bcap_val_11b << 8) |
2237 (scap_val_11b << 3) |
2238 lpf_11b;
2239 rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
2240 (scap_val_11n_20 << 3) |
2241 lpf_20;
2242 rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
2243 (scap_val_11n_40 << 3) |
2244 lpf_40;
2245 for (core = 0; core < 2; core++) {
2246 b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
2247 rx2tx_lut_20_11b);
2248 b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
2249 rx2tx_lut_20_11n);
2250 b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
2251 rx2tx_lut_20_11n);
2252 b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
2253 rx2tx_lut_40_11n);
2254 b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
2255 rx2tx_lut_40_11n);
2256 b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
2257 rx2tx_lut_40_11n);
2258 b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
2259 rx2tx_lut_40_11n);
2260 b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
2261 rx2tx_lut_40_11n);
2262 }
2263 b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
2264 }
2265 b43_phy_write(dev, 0x32F, 0x3);
2266 if (phy->radio_rev == 4 || phy->radio_rev == 6)
2267 b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
2268
2269 if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
2270 if (sprom->revision &&
2271 sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
2272 b43_radio_write(dev, 0x5, 0x05);
2273 b43_radio_write(dev, 0x6, 0x30);
2274 b43_radio_write(dev, 0x7, 0x00);
2275 b43_radio_set(dev, 0x4f, 0x1);
2276 b43_radio_set(dev, 0xd4, 0x1);
2277 bias = 0x1f;
2278 conv = 0x6f;
2279 filt = 0xaa;
2280 } else {
2281 bias = 0x2b;
2282 conv = 0x7f;
2283 filt = 0xee;
2284 }
2285 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2286 for (core = 0; core < 2; core++) {
2287 if (core == 0) {
2288 b43_radio_write(dev, 0x5F, bias);
2289 b43_radio_write(dev, 0x64, conv);
2290 b43_radio_write(dev, 0x66, filt);
2291 } else {
2292 b43_radio_write(dev, 0xE8, bias);
2293 b43_radio_write(dev, 0xE9, conv);
2294 b43_radio_write(dev, 0xEB, filt);
2295 }
2296 }
2297 }
2298 }
2299
2300 if (b43_nphy_ipa(dev)) {
2301 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2302 if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
2303 phy->radio_rev == 6) {
2304 for (core = 0; core < 2; core++) {
2305 if (core == 0)
2306 b43_radio_write(dev, 0x51,
2307 0x7f);
2308 else
2309 b43_radio_write(dev, 0xd6,
2310 0x7f);
2311 }
2312 }
2313 if (phy->radio_rev == 3) {
2314 for (core = 0; core < 2; core++) {
2315 if (core == 0) {
2316 b43_radio_write(dev, 0x64,
2317 0x13);
2318 b43_radio_write(dev, 0x5F,
2319 0x1F);
2320 b43_radio_write(dev, 0x66,
2321 0xEE);
2322 b43_radio_write(dev, 0x59,
2323 0x8A);
2324 b43_radio_write(dev, 0x80,
2325 0x3E);
2326 } else {
2327 b43_radio_write(dev, 0x69,
2328 0x13);
2329 b43_radio_write(dev, 0xE8,
2330 0x1F);
2331 b43_radio_write(dev, 0xEB,
2332 0xEE);
2333 b43_radio_write(dev, 0xDE,
2334 0x8A);
2335 b43_radio_write(dev, 0x105,
2336 0x3E);
2337 }
2338 }
2339 } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
2340 if (!phy->is_40mhz) {
2341 b43_radio_write(dev, 0x5F, 0x14);
2342 b43_radio_write(dev, 0xE8, 0x12);
2343 } else {
2344 b43_radio_write(dev, 0x5F, 0x16);
2345 b43_radio_write(dev, 0xE8, 0x16);
2346 }
2347 }
2348 } else {
2349 u16 freq = phy->channel_freq;
2350 if ((freq >= 5180 && freq <= 5230) ||
2351 (freq >= 5745 && freq <= 5805)) {
2352 b43_radio_write(dev, 0x7D, 0xFF);
2353 b43_radio_write(dev, 0xFE, 0xFF);
2354 }
2355 }
2356 } else {
2357 if (phy->radio_rev != 5) {
2358 for (core = 0; core < 2; core++) {
2359 if (core == 0) {
2360 b43_radio_write(dev, 0x5c, 0x61);
2361 b43_radio_write(dev, 0x51, 0x70);
2362 } else {
2363 b43_radio_write(dev, 0xe1, 0x61);
2364 b43_radio_write(dev, 0xd6, 0x70);
2365 }
2366 }
2367 }
2368 }
2369
2370 if (phy->radio_rev == 4) {
2371 b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
2372 b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
2373 for (core = 0; core < 2; core++) {
2374 if (core == 0) {
2375 b43_radio_write(dev, 0x1a1, 0x00);
2376 b43_radio_write(dev, 0x1a2, 0x3f);
2377 b43_radio_write(dev, 0x1a6, 0x3f);
2378 } else {
2379 b43_radio_write(dev, 0x1a7, 0x00);
2380 b43_radio_write(dev, 0x1ab, 0x3f);
2381 b43_radio_write(dev, 0x1ac, 0x3f);
2382 }
2383 }
2384 } else {
2385 b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
2386 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
2387 b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
2388 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
2389
2390 b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
2391 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
2392 b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
2393 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
2394 b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
2395 b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
2396
2397 b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
2398 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
2399 b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
2400 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
2401 }
2402
2403 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
2404
2405 b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
2406 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
2407 b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
2408 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
2409 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
2410 b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
2411 b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
2412
2413 if (!phy->is_40mhz) {
2414 b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
2415 b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
2416 } else {
2417 b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
2418 b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
2419 }
2420
2421 b43_nphy_gain_ctl_workarounds(dev);
2422
2423 /* TODO
2424 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
2425 aux_adc_vmid_rev7_core0);
2426 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
2427 aux_adc_vmid_rev7_core1);
2428 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
2429 aux_adc_gain_rev7);
2430 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
2431 aux_adc_gain_rev7);
2432 */
2433}
2434
1869static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) 2435static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1870{ 2436{
1871 struct b43_phy_n *nphy = dev->phy.n; 2437 struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1916 rx2tx_delays[6] = 1; 2482 rx2tx_delays[6] = 1;
1917 rx2tx_events[7] = 0x1F; 2483 rx2tx_events[7] = 0x1F;
1918 } 2484 }
1919 b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 2485 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
1920 ARRAY_SIZE(rx2tx_events)); 2486 ARRAY_SIZE(rx2tx_events));
1921 } 2487 }
1922 2488
@@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1926 2492
1927 b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); 2493 b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
1928 2494
1929 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); 2495 if (!dev->phy.is_40mhz) {
1930 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); 2496 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
2497 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
2498 } else {
2499 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
2500 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
2501 }
1931 2502
1932 b43_nphy_gain_ctl_workarounds(dev); 2503 b43_nphy_gain_ctl_workarounds(dev);
1933 2504
@@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1963 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); 2534 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
1964 2535
1965 if (dev->phy.rev == 4 && 2536 if (dev->phy.rev == 4 &&
1966 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 2537 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1967 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 2538 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
1968 0x70); 2539 0x70);
1969 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 2540 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
1970 0x70); 2541 0x70);
1971 } 2542 }
1972 2543
2544 /* Dropped probably-always-true condition */
1973 b43_phy_write(dev, 0x224, 0x03eb); 2545 b43_phy_write(dev, 0x224, 0x03eb);
1974 b43_phy_write(dev, 0x225, 0x03eb); 2546 b43_phy_write(dev, 0x225, 0x03eb);
1975 b43_phy_write(dev, 0x226, 0x0341); 2547 b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1982 b43_phy_write(dev, 0x22d, 0x042b); 2554 b43_phy_write(dev, 0x22d, 0x042b);
1983 b43_phy_write(dev, 0x22e, 0x0381); 2555 b43_phy_write(dev, 0x22e, 0x0381);
1984 b43_phy_write(dev, 0x22f, 0x0381); 2556 b43_phy_write(dev, 0x22f, 0x0381);
2557
2558 if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
2559 ; /* TODO: 0x0080000000000000 HF */
1985} 2560}
1986 2561
1987static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) 2562static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
1996 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; 2571 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
1997 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 2572 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
1998 2573
2574 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
2575 dev->dev->board_type == 0x8B) {
2576 delays1[0] = 0x1;
2577 delays1[5] = 0x14;
2578 }
2579
1999 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && 2580 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
2000 nphy->band5g_pwrgain) { 2581 nphy->band5g_pwrgain) {
2001 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); 2582 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2007 2588
2008 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); 2589 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
2009 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); 2590 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
2010 b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); 2591 if (dev->phy.rev < 3) {
2011 b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); 2592 b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
2593 b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
2594 }
2012 2595
2013 if (dev->phy.rev < 2) { 2596 if (dev->phy.rev < 2) {
2014 b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); 2597 b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2024 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); 2607 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
2025 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); 2608 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
2026 2609
2027 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
2028 dev->dev->board_type == 0x8B) {
2029 delays1[0] = 0x1;
2030 delays1[5] = 0x14;
2031 }
2032 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); 2610 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
2033 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); 2611 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
2034 2612
@@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2055 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); 2633 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
2056 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); 2634 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
2057 2635
2058 b43_phy_mask(dev, B43_NPHY_PIL_DW1, 2636 if (dev->phy.rev < 3) {
2059 ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); 2637 b43_phy_mask(dev, B43_NPHY_PIL_DW1,
2060 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); 2638 ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
2061 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); 2639 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
2062 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); 2640 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
2641 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
2642 }
2063 2643
2064 if (dev->phy.rev == 2) 2644 if (dev->phy.rev == 2)
2065 b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 2645 b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
2083 b43_phy_set(dev, B43_NPHY_IQFLIP, 2663 b43_phy_set(dev, B43_NPHY_IQFLIP,
2084 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); 2664 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
2085 2665
2086 if (dev->phy.rev >= 3) 2666 if (dev->phy.rev >= 7)
2667 b43_nphy_workarounds_rev7plus(dev);
2668 else if (dev->phy.rev >= 3)
2087 b43_nphy_workarounds_rev3plus(dev); 2669 b43_nphy_workarounds_rev3plus(dev);
2088 else 2670 else
2089 b43_nphy_workarounds_rev1_2(dev); 2671 b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
2542 b43_nphy_ipa_internal_tssi_setup(dev); 3124 b43_nphy_ipa_internal_tssi_setup(dev);
2543 3125
2544 if (phy->rev >= 7) 3126 if (phy->rev >= 7)
2545 ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */ 3127 b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
2546 else if (phy->rev >= 3) 3128 else if (phy->rev >= 3)
2547 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); 3129 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
2548 3130
@@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
2554 b43_nphy_rssi_select(dev, 0, 0); 3136 b43_nphy_rssi_select(dev, 0, 0);
2555 3137
2556 if (phy->rev >= 7) 3138 if (phy->rev >= 7)
2557 ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */ 3139 b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
2558 else if (phy->rev >= 3) 3140 else if (phy->rev >= 3)
2559 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); 3141 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
2560 3142
@@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
4761 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); 5343 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
4762 nphy->spur_avoid = (phy->rev >= 3) ? 5344 nphy->spur_avoid = (phy->rev >= 3) ?
4763 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; 5345 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
5346 nphy->init_por = true;
4764 nphy->gain_boost = true; /* this way we follow wl, assume it is true */ 5347 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
4765 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ 5348 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
4766 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ 5349 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
4801 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; 5384 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
4802 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; 5385 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
4803 } 5386 }
5387
5388 nphy->init_por = true;
4804} 5389}
4805 5390
4806static void b43_nphy_op_free(struct b43_wldev *dev) 5391static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
4887 if (blocked) { 5472 if (blocked) {
4888 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, 5473 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
4889 ~B43_NPHY_RFCTL_CMD_CHIP0PU); 5474 ~B43_NPHY_RFCTL_CMD_CHIP0PU);
4890 if (dev->phy.rev >= 3) { 5475 if (dev->phy.rev >= 7) {
5476 /* TODO */
5477 } else if (dev->phy.rev >= 3) {
4891 b43_radio_mask(dev, 0x09, ~0x2); 5478 b43_radio_mask(dev, 0x09, ~0x2);
4892 5479
4893 b43_radio_write(dev, 0x204D, 0); 5480 b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
4905 b43_radio_write(dev, 0x3064, 0); 5492 b43_radio_write(dev, 0x3064, 0);
4906 } 5493 }
4907 } else { 5494 } else {
4908 if (dev->phy.rev >= 3) { 5495 if (dev->phy.rev >= 7) {
5496 b43_radio_2057_init(dev);
5497 b43_switch_channel(dev, dev->phy.channel);
5498 } else if (dev->phy.rev >= 3) {
4909 b43_radio_init2056(dev); 5499 b43_radio_init2056(dev);
4910 b43_switch_channel(dev, dev->phy.channel); 5500 b43_switch_channel(dev, dev->phy.channel);
4911 } else { 5501 } else {
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fd12b386fea1..092c0140c249 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -785,6 +785,7 @@ struct b43_phy_n {
785 u16 papd_epsilon_offset[2]; 785 u16 papd_epsilon_offset[2];
786 s32 preamble_override; 786 s32 preamble_override;
787 u32 bb_mult_save; 787 u32 bb_mult_save;
788 bool init_por;
788 789
789 bool gain_boost; 790 bool gain_boost;
790 bool elna_gain_config; 791 bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644
index 000000000000..d61d6830c5c7
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.c
@@ -0,0 +1,141 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11n 2057 radio device data tables
5
6 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
21 Boston, MA 02110-1301, USA.
22
23*/
24
25#include "b43.h"
26#include "radio_2057.h"
27#include "phy_common.h"
28
29static u16 r2057_rev4_init[42][2] = {
30 { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
31 { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
32 { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
33 { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
34 { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
35 { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
36 { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
37 { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
38 { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
39 { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
40 { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
41};
42
43static u16 r2057_rev5_init[44][2] = {
44 { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
45 { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
46 { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
47 { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
48 { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
49 { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
50 { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
51 { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
52 { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
53 { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
54 { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
55};
56
57static u16 r2057_rev5a_init[45][2] = {
58 { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
59 { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
60 { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
61 { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
62 { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
63 { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
64 { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
65 { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
66 { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
67 { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
68 { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
69 { 0x1C2, 0x80 },
70};
71
72static u16 r2057_rev7_init[54][2] = {
73 { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
74 { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
75 { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
76 { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
77 { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
78 { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
79 { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
80 { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
81 { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
82 { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
83 { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
84 { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
85 { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
86 { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
87};
88
89static u16 r2057_rev8_init[54][2] = {
90 { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
91 { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
92 { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
93 { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
94 { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
95 { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
96 { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
97 { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
98 { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
99 { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
100 { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
101 { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
102 { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
103 { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
104};
105
106void r2057_upload_inittabs(struct b43_wldev *dev)
107{
108 struct b43_phy *phy = &dev->phy;
109 u16 *table = NULL;
110 u16 size, i;
111
112 if (phy->rev == 7) {
113 table = r2057_rev4_init[0];
114 size = ARRAY_SIZE(r2057_rev4_init);
115 } else if (phy->rev == 8 || phy->rev == 9) {
116 if (phy->radio_rev == 5) {
117 if (phy->radio_rev == 8) {
118 table = r2057_rev5_init[0];
119 size = ARRAY_SIZE(r2057_rev5_init);
120 } else {
121 table = r2057_rev5a_init[0];
122 size = ARRAY_SIZE(r2057_rev5a_init);
123 }
124 } else if (phy->radio_rev == 7) {
125 table = r2057_rev7_init[0];
126 size = ARRAY_SIZE(r2057_rev7_init);
127 } else if (phy->radio_rev == 9) {
128 table = r2057_rev8_init[0];
129 size = ARRAY_SIZE(r2057_rev8_init);
130 }
131 }
132
133 if (table) {
134 for (i = 0; i < 10; i++) {
135 pr_info("radio_write 0x%X ", *table);
136 table++;
137 pr_info("0x%X\n", *table);
138 table++;
139 }
140 }
141}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644
index 000000000000..eeebd8fbeb0d
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.h
@@ -0,0 +1,430 @@
1#ifndef B43_RADIO_2057_H_
2#define B43_RADIO_2057_H_
3
4#include <linux/types.h>
5
6#include "tables_nphy.h"
7
8#define R2057_DACBUF_VINCM_CORE0 0x000
9#define R2057_IDCODE 0x001
10#define R2057_RCCAL_MASTER 0x002
11#define R2057_RCCAL_CAP_SIZE 0x003
12#define R2057_RCAL_CONFIG 0x004
13#define R2057_GPAIO_CONFIG 0x005
14#define R2057_GPAIO_SEL1 0x006
15#define R2057_GPAIO_SEL0 0x007
16#define R2057_CLPO_CONFIG 0x008
17#define R2057_BANDGAP_CONFIG 0x009
18#define R2057_BANDGAP_RCAL_TRIM 0x00a
19#define R2057_AFEREG_CONFIG 0x00b
20#define R2057_TEMPSENSE_CONFIG 0x00c
21#define R2057_XTAL_CONFIG1 0x00d
22#define R2057_XTAL_ICORE_SIZE 0x00e
23#define R2057_XTAL_BUF_SIZE 0x00f
24#define R2057_XTAL_PULLCAP_SIZE 0x010
25#define R2057_RFPLL_MASTER 0x011
26#define R2057_VCOMONITOR_VTH_L 0x012
27#define R2057_VCOMONITOR_VTH_H 0x013
28#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT 0x014
29#define R2057_VCO_VARCSIZE_IDAC 0x015
30#define R2057_VCOCAL_COUNTVAL0 0x016
31#define R2057_VCOCAL_COUNTVAL1 0x017
32#define R2057_VCOCAL_INTCLK_COUNT 0x018
33#define R2057_VCOCAL_MASTER 0x019
34#define R2057_VCOCAL_NUMCAPCHANGE 0x01a
35#define R2057_VCOCAL_WINSIZE 0x01b
36#define R2057_VCOCAL_DELAY_AFTER_REFRESH 0x01c
37#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP 0x01d
38#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP 0x01e
39#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP 0x01f
40#define R2057_VCO_FORCECAPEN_FORCECAP1 0x020
41#define R2057_VCO_FORCECAP0 0x021
42#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE 0x022
43#define R2057_RFPLL_PFD_RESET_PW 0x023
44#define R2057_RFPLL_LOOPFILTER_R2 0x024
45#define R2057_RFPLL_LOOPFILTER_R1 0x025
46#define R2057_RFPLL_LOOPFILTER_C3 0x026
47#define R2057_RFPLL_LOOPFILTER_C2 0x027
48#define R2057_RFPLL_LOOPFILTER_C1 0x028
49#define R2057_CP_KPD_IDAC 0x029
50#define R2057_RFPLL_IDACS 0x02a
51#define R2057_RFPLL_MISC_EN 0x02b
52#define R2057_RFPLL_MMD0 0x02c
53#define R2057_RFPLL_MMD1 0x02d
54#define R2057_RFPLL_MISC_CAL_RESETN 0x02e
55#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES 0x02f
56#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE 0x030
57#define R2057_VCOCAL_READCAP0 0x031
58#define R2057_VCOCAL_READCAP1 0x032
59#define R2057_VCOCAL_STATUS 0x033
60#define R2057_LOGEN_PUS 0x034
61#define R2057_LOGEN_PTAT_RESETS 0x035
62#define R2057_VCOBUF_IDACS 0x036
63#define R2057_VCOBUF_TUNE 0x037
64#define R2057_CMOSBUF_TX2GQ_IDACS 0x038
65#define R2057_CMOSBUF_TX2GI_IDACS 0x039
66#define R2057_CMOSBUF_TX5GQ_IDACS 0x03a
67#define R2057_CMOSBUF_TX5GI_IDACS 0x03b
68#define R2057_CMOSBUF_RX2GQ_IDACS 0x03c
69#define R2057_CMOSBUF_RX2GI_IDACS 0x03d
70#define R2057_CMOSBUF_RX5GQ_IDACS 0x03e
71#define R2057_CMOSBUF_RX5GI_IDACS 0x03f
72#define R2057_LOGEN_MX2G_IDACS 0x040
73#define R2057_LOGEN_MX2G_TUNE 0x041
74#define R2057_LOGEN_MX5G_IDACS 0x042
75#define R2057_LOGEN_MX5G_TUNE 0x043
76#define R2057_LOGEN_MX5G_RCCR 0x044
77#define R2057_LOGEN_INDBUF2G_IDAC 0x045
78#define R2057_LOGEN_INDBUF2G_IBOOST 0x046
79#define R2057_LOGEN_INDBUF2G_TUNE 0x047
80#define R2057_LOGEN_INDBUF5G_IDAC 0x048
81#define R2057_LOGEN_INDBUF5G_IBOOST 0x049
82#define R2057_LOGEN_INDBUF5G_TUNE 0x04a
83#define R2057_CMOSBUF_TX_RCCR 0x04b
84#define R2057_CMOSBUF_RX_RCCR 0x04c
85#define R2057_LOGEN_SEL_PKDET 0x04d
86#define R2057_CMOSBUF_SHAREIQ_PTAT 0x04e
87#define R2057_RXTXBIAS_CONFIG_CORE0 0x04f
88#define R2057_TXGM_TXRF_PUS_CORE0 0x050
89#define R2057_TXGM_IDAC_BLEED_CORE0 0x051
90#define R2057_TXGM_GAIN_CORE0 0x056
91#define R2057_TXGM2G_PKDET_PUS_CORE0 0x057
92#define R2057_PAD2G_PTATS_CORE0 0x058
93#define R2057_PAD2G_IDACS_CORE0 0x059
94#define R2057_PAD2G_BOOST_PU_CORE0 0x05a
95#define R2057_PAD2G_CASCV_GAIN_CORE0 0x05b
96#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0 0x05c
97#define R2057_TXMIX2G_LODC_CORE0 0x05d
98#define R2057_PAD2G_TUNE_PUS_CORE0 0x05e
99#define R2057_IPA2G_GAIN_CORE0 0x05f
100#define R2057_TSSI2G_SPARE1_CORE0 0x060
101#define R2057_TSSI2G_SPARE2_CORE0 0x061
102#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0 0x062
103#define R2057_IPA2G_IMAIN_CORE0 0x063
104#define R2057_IPA2G_CASCONV_CORE0 0x064
105#define R2057_IPA2G_CASCOFFV_CORE0 0x065
106#define R2057_IPA2G_BIAS_FILTER_CORE0 0x066
107#define R2057_TX5G_PKDET_CORE0 0x069
108#define R2057_PGA_PTAT_TXGM5G_PU_CORE0 0x06a
109#define R2057_PAD5G_PTATS1_CORE0 0x06b
110#define R2057_PAD5G_CLASS_PTATS2_CORE0 0x06c
111#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0 0x06d
112#define R2057_PAD5G_CASCV_IMAIN_CORE0 0x06e
113#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0 0x06f
114#define R2057_PGA_BOOST_TUNE_CORE0 0x070
115#define R2057_PGA_GAIN_CORE0 0x071
116#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0 0x072
117#define R2057_TXMIX5G_BOOST_TUNE_CORE0 0x073
118#define R2057_PAD5G_TUNE_MISC_PUS_CORE0 0x074
119#define R2057_IPA5G_IAUX_CORE0 0x075
120#define R2057_IPA5G_GAIN_CORE0 0x076
121#define R2057_TSSI5G_SPARE1_CORE0 0x077
122#define R2057_TSSI5G_SPARE2_CORE0 0x078
123#define R2057_IPA5G_CASCOFFV_PU_CORE0 0x079
124#define R2057_IPA5G_PTAT_CORE0 0x07a
125#define R2057_IPA5G_IMAIN_CORE0 0x07b
126#define R2057_IPA5G_CASCONV_CORE0 0x07c
127#define R2057_IPA5G_BIAS_FILTER_CORE0 0x07d
128#define R2057_PAD_BIAS_FILTER_BWS_CORE0 0x080
129#define R2057_TR2G_CONFIG1_CORE0_NU 0x081
130#define R2057_TR2G_CONFIG2_CORE0_NU 0x082
131#define R2057_LNA5G_RFEN_CORE0 0x083
132#define R2057_TR5G_CONFIG2_CORE0_NU 0x084
133#define R2057_RXRFBIAS_IBOOST_PU_CORE0 0x085
134#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0 0x086
135#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0 0x087
136#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0 0x088
137#define R2057_RXMIX_CMFBITAIL_PU_CORE0 0x089
138#define R2057_LNA2_IMAIN_PTAT_PU_CORE0 0x08a
139#define R2057_LNA2_IAUX_PTAT_CORE0 0x08b
140#define R2057_LNA1_IMAIN_PTAT_PU_CORE0 0x08c
141#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0 0x08d
142#define R2057_RXRFBIAS_BANDSEL_CORE0 0x08e
143#define R2057_TIA_CONFIG_CORE0 0x08f
144#define R2057_TIA_IQGAIN_CORE0 0x090
145#define R2057_TIA_IBIAS2_CORE0 0x091
146#define R2057_TIA_IBIAS1_CORE0 0x092
147#define R2057_TIA_SPARE_Q_CORE0 0x093
148#define R2057_TIA_SPARE_I_CORE0 0x094
149#define R2057_RXMIX2G_PUS_CORE0 0x095
150#define R2057_RXMIX2G_VCMREFS_CORE0 0x096
151#define R2057_RXMIX2G_LODC_QI_CORE0 0x097
152#define R2057_W12G_BW_LNA2G_PUS_CORE0 0x098
153#define R2057_LNA2G_GAIN_CORE0 0x099
154#define R2057_LNA2G_TUNE_CORE0 0x09a
155#define R2057_RXMIX5G_PUS_CORE0 0x09b
156#define R2057_RXMIX5G_VCMREFS_CORE0 0x09c
157#define R2057_RXMIX5G_LODC_QI_CORE0 0x09d
158#define R2057_W15G_BW_LNA5G_PUS_CORE0 0x09e
159#define R2057_LNA5G_GAIN_CORE0 0x09f
160#define R2057_LNA5G_TUNE_CORE0 0x0a0
161#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0 0x0a1
162#define R2057_RXBB_BIAS_MASTER_CORE0 0x0a2
163#define R2057_RXBB_VGABUF_IDACS_CORE0 0x0a3
164#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0 0x0a4
165#define R2057_TXBUF_VINCM_CORE0 0x0a5
166#define R2057_TXBUF_IDACS_CORE0 0x0a6
167#define R2057_LPF_RESP_RXBUF_BW_CORE0 0x0a7
168#define R2057_RXBB_CC_CORE0 0x0a8
169#define R2057_RXBB_SPARE3_CORE0 0x0a9
170#define R2057_RXBB_RCCAL_HPC_CORE0 0x0aa
171#define R2057_LPF_IDACS_CORE0 0x0ab
172#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0 0x0ac
173#define R2057_TXBUF_GAIN_CORE0 0x0ad
174#define R2057_AFELOOPBACK_AACI_RESP_CORE0 0x0ae
175#define R2057_RXBUF_DEGEN_CORE0 0x0af
176#define R2057_RXBB_SPARE2_CORE0 0x0b0
177#define R2057_RXBB_SPARE1_CORE0 0x0b1
178#define R2057_RSSI_MASTER_CORE0 0x0b2
179#define R2057_W2_MASTER_CORE0 0x0b3
180#define R2057_NB_MASTER_CORE0 0x0b4
181#define R2057_W2_IDACS0_Q_CORE0 0x0b5
182#define R2057_W2_IDACS1_Q_CORE0 0x0b6
183#define R2057_W2_IDACS0_I_CORE0 0x0b7
184#define R2057_W2_IDACS1_I_CORE0 0x0b8
185#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0 0x0b9
186#define R2057_NB_IDACS_Q_CORE0 0x0ba
187#define R2057_NB_IDACS_I_CORE0 0x0bb
188#define R2057_BACKUP4_CORE0 0x0c1
189#define R2057_BACKUP3_CORE0 0x0c2
190#define R2057_BACKUP2_CORE0 0x0c3
191#define R2057_BACKUP1_CORE0 0x0c4
192#define R2057_SPARE16_CORE0 0x0c5
193#define R2057_SPARE15_CORE0 0x0c6
194#define R2057_SPARE14_CORE0 0x0c7
195#define R2057_SPARE13_CORE0 0x0c8
196#define R2057_SPARE12_CORE0 0x0c9
197#define R2057_SPARE11_CORE0 0x0ca
198#define R2057_TX2G_BIAS_RESETS_CORE0 0x0cb
199#define R2057_TX5G_BIAS_RESETS_CORE0 0x0cc
200#define R2057_IQTEST_SEL_PU 0x0cd
201#define R2057_XTAL_CONFIG2 0x0ce
202#define R2057_BUFS_MISC_LPFBW_CORE0 0x0cf
203#define R2057_TXLPF_RCCAL_CORE0 0x0d0
204#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0 0x0d1
205#define R2057_LPF_GAIN_CORE0 0x0d2
206#define R2057_DACBUF_IDACS_BW_CORE0 0x0d3
207#define R2057_RXTXBIAS_CONFIG_CORE1 0x0d4
208#define R2057_TXGM_TXRF_PUS_CORE1 0x0d5
209#define R2057_TXGM_IDAC_BLEED_CORE1 0x0d6
210#define R2057_TXGM_GAIN_CORE1 0x0db
211#define R2057_TXGM2G_PKDET_PUS_CORE1 0x0dc
212#define R2057_PAD2G_PTATS_CORE1 0x0dd
213#define R2057_PAD2G_IDACS_CORE1 0x0de
214#define R2057_PAD2G_BOOST_PU_CORE1 0x0df
215#define R2057_PAD2G_CASCV_GAIN_CORE1 0x0e0
216#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1 0x0e1
217#define R2057_TXMIX2G_LODC_CORE1 0x0e2
218#define R2057_PAD2G_TUNE_PUS_CORE1 0x0e3
219#define R2057_IPA2G_GAIN_CORE1 0x0e4
220#define R2057_TSSI2G_SPARE1_CORE1 0x0e5
221#define R2057_TSSI2G_SPARE2_CORE1 0x0e6
222#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1 0x0e7
223#define R2057_IPA2G_IMAIN_CORE1 0x0e8
224#define R2057_IPA2G_CASCONV_CORE1 0x0e9
225#define R2057_IPA2G_CASCOFFV_CORE1 0x0ea
226#define R2057_IPA2G_BIAS_FILTER_CORE1 0x0eb
227#define R2057_TX5G_PKDET_CORE1 0x0ee
228#define R2057_PGA_PTAT_TXGM5G_PU_CORE1 0x0ef
229#define R2057_PAD5G_PTATS1_CORE1 0x0f0
230#define R2057_PAD5G_CLASS_PTATS2_CORE1 0x0f1
231#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1 0x0f2
232#define R2057_PAD5G_CASCV_IMAIN_CORE1 0x0f3
233#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1 0x0f4
234#define R2057_PGA_BOOST_TUNE_CORE1 0x0f5
235#define R2057_PGA_GAIN_CORE1 0x0f6
236#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1 0x0f7
237#define R2057_TXMIX5G_BOOST_TUNE_CORE1 0x0f8
238#define R2057_PAD5G_TUNE_MISC_PUS_CORE1 0x0f9
239#define R2057_IPA5G_IAUX_CORE1 0x0fa
240#define R2057_IPA5G_GAIN_CORE1 0x0fb
241#define R2057_TSSI5G_SPARE1_CORE1 0x0fc
242#define R2057_TSSI5G_SPARE2_CORE1 0x0fd
243#define R2057_IPA5G_CASCOFFV_PU_CORE1 0x0fe
244#define R2057_IPA5G_PTAT_CORE1 0x0ff
245#define R2057_IPA5G_IMAIN_CORE1 0x100
246#define R2057_IPA5G_CASCONV_CORE1 0x101
247#define R2057_IPA5G_BIAS_FILTER_CORE1 0x102
248#define R2057_PAD_BIAS_FILTER_BWS_CORE1 0x105
249#define R2057_TR2G_CONFIG1_CORE1_NU 0x106
250#define R2057_TR2G_CONFIG2_CORE1_NU 0x107
251#define R2057_LNA5G_RFEN_CORE1 0x108
252#define R2057_TR5G_CONFIG2_CORE1_NU 0x109
253#define R2057_RXRFBIAS_IBOOST_PU_CORE1 0x10a
254#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1 0x10b
255#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1 0x10c
256#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1 0x10d
257#define R2057_RXMIX_CMFBITAIL_PU_CORE1 0x10e
258#define R2057_LNA2_IMAIN_PTAT_PU_CORE1 0x10f
259#define R2057_LNA2_IAUX_PTAT_CORE1 0x110
260#define R2057_LNA1_IMAIN_PTAT_PU_CORE1 0x111
261#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1 0x112
262#define R2057_RXRFBIAS_BANDSEL_CORE1 0x113
263#define R2057_TIA_CONFIG_CORE1 0x114
264#define R2057_TIA_IQGAIN_CORE1 0x115
265#define R2057_TIA_IBIAS2_CORE1 0x116
266#define R2057_TIA_IBIAS1_CORE1 0x117
267#define R2057_TIA_SPARE_Q_CORE1 0x118
268#define R2057_TIA_SPARE_I_CORE1 0x119
269#define R2057_RXMIX2G_PUS_CORE1 0x11a
270#define R2057_RXMIX2G_VCMREFS_CORE1 0x11b
271#define R2057_RXMIX2G_LODC_QI_CORE1 0x11c
272#define R2057_W12G_BW_LNA2G_PUS_CORE1 0x11d
273#define R2057_LNA2G_GAIN_CORE1 0x11e
274#define R2057_LNA2G_TUNE_CORE1 0x11f
275#define R2057_RXMIX5G_PUS_CORE1 0x120
276#define R2057_RXMIX5G_VCMREFS_CORE1 0x121
277#define R2057_RXMIX5G_LODC_QI_CORE1 0x122
278#define R2057_W15G_BW_LNA5G_PUS_CORE1 0x123
279#define R2057_LNA5G_GAIN_CORE1 0x124
280#define R2057_LNA5G_TUNE_CORE1 0x125
281#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1 0x126
282#define R2057_RXBB_BIAS_MASTER_CORE1 0x127
283#define R2057_RXBB_VGABUF_IDACS_CORE1 0x128
284#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1 0x129
285#define R2057_TXBUF_VINCM_CORE1 0x12a
286#define R2057_TXBUF_IDACS_CORE1 0x12b
287#define R2057_LPF_RESP_RXBUF_BW_CORE1 0x12c
288#define R2057_RXBB_CC_CORE1 0x12d
289#define R2057_RXBB_SPARE3_CORE1 0x12e
290#define R2057_RXBB_RCCAL_HPC_CORE1 0x12f
291#define R2057_LPF_IDACS_CORE1 0x130
292#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1 0x131
293#define R2057_TXBUF_GAIN_CORE1 0x132
294#define R2057_AFELOOPBACK_AACI_RESP_CORE1 0x133
295#define R2057_RXBUF_DEGEN_CORE1 0x134
296#define R2057_RXBB_SPARE2_CORE1 0x135
297#define R2057_RXBB_SPARE1_CORE1 0x136
298#define R2057_RSSI_MASTER_CORE1 0x137
299#define R2057_W2_MASTER_CORE1 0x138
300#define R2057_NB_MASTER_CORE1 0x139
301#define R2057_W2_IDACS0_Q_CORE1 0x13a
302#define R2057_W2_IDACS1_Q_CORE1 0x13b
303#define R2057_W2_IDACS0_I_CORE1 0x13c
304#define R2057_W2_IDACS1_I_CORE1 0x13d
305#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1 0x13e
306#define R2057_NB_IDACS_Q_CORE1 0x13f
307#define R2057_NB_IDACS_I_CORE1 0x140
308#define R2057_BACKUP4_CORE1 0x146
309#define R2057_BACKUP3_CORE1 0x147
310#define R2057_BACKUP2_CORE1 0x148
311#define R2057_BACKUP1_CORE1 0x149
312#define R2057_SPARE16_CORE1 0x14a
313#define R2057_SPARE15_CORE1 0x14b
314#define R2057_SPARE14_CORE1 0x14c
315#define R2057_SPARE13_CORE1 0x14d
316#define R2057_SPARE12_CORE1 0x14e
317#define R2057_SPARE11_CORE1 0x14f
318#define R2057_TX2G_BIAS_RESETS_CORE1 0x150
319#define R2057_TX5G_BIAS_RESETS_CORE1 0x151
320#define R2057_SPARE8_CORE1 0x152
321#define R2057_SPARE7_CORE1 0x153
322#define R2057_BUFS_MISC_LPFBW_CORE1 0x154
323#define R2057_TXLPF_RCCAL_CORE1 0x155
324#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1 0x156
325#define R2057_LPF_GAIN_CORE1 0x157
326#define R2057_DACBUF_IDACS_BW_CORE1 0x158
327#define R2057_DACBUF_VINCM_CORE1 0x159
328#define R2057_RCCAL_START_R1_Q1_P1 0x15a
329#define R2057_RCCAL_X1 0x15b
330#define R2057_RCCAL_TRC0 0x15c
331#define R2057_RCCAL_TRC1 0x15d
332#define R2057_RCCAL_DONE_OSCCAP 0x15e
333#define R2057_RCCAL_N0_0 0x15f
334#define R2057_RCCAL_N0_1 0x160
335#define R2057_RCCAL_N1_0 0x161
336#define R2057_RCCAL_N1_1 0x162
337#define R2057_RCAL_STATUS 0x163
338#define R2057_XTALPUOVR_PINCTRL 0x164
339#define R2057_OVR_REG0 0x165
340#define R2057_OVR_REG1 0x166
341#define R2057_OVR_REG2 0x167
342#define R2057_OVR_REG3 0x168
343#define R2057_OVR_REG4 0x169
344#define R2057_RCCAL_SCAP_VAL 0x16a
345#define R2057_RCCAL_BCAP_VAL 0x16b
346#define R2057_RCCAL_HPC_VAL 0x16c
347#define R2057_RCCAL_OVERRIDES 0x16d
348#define R2057_TX0_IQCAL_GAIN_BW 0x170
349#define R2057_TX0_LOFT_FINE_I 0x171
350#define R2057_TX0_LOFT_FINE_Q 0x172
351#define R2057_TX0_LOFT_COARSE_I 0x173
352#define R2057_TX0_LOFT_COARSE_Q 0x174
353#define R2057_TX0_TX_SSI_MASTER 0x175
354#define R2057_TX0_IQCAL_VCM_HG 0x176
355#define R2057_TX0_IQCAL_IDAC 0x177
356#define R2057_TX0_TSSI_VCM 0x178
357#define R2057_TX0_TX_SSI_MUX 0x179
358#define R2057_TX0_TSSIA 0x17a
359#define R2057_TX0_TSSIG 0x17b
360#define R2057_TX0_TSSI_MISC1 0x17c
361#define R2057_TX0_TXRXCOUPLE_2G_ATTEN 0x17d
362#define R2057_TX0_TXRXCOUPLE_2G_PWRUP 0x17e
363#define R2057_TX0_TXRXCOUPLE_5G_ATTEN 0x17f
364#define R2057_TX0_TXRXCOUPLE_5G_PWRUP 0x180
365#define R2057_TX1_IQCAL_GAIN_BW 0x190
366#define R2057_TX1_LOFT_FINE_I 0x191
367#define R2057_TX1_LOFT_FINE_Q 0x192
368#define R2057_TX1_LOFT_COARSE_I 0x193
369#define R2057_TX1_LOFT_COARSE_Q 0x194
370#define R2057_TX1_TX_SSI_MASTER 0x195
371#define R2057_TX1_IQCAL_VCM_HG 0x196
372#define R2057_TX1_IQCAL_IDAC 0x197
373#define R2057_TX1_TSSI_VCM 0x198
374#define R2057_TX1_TX_SSI_MUX 0x199
375#define R2057_TX1_TSSIA 0x19a
376#define R2057_TX1_TSSIG 0x19b
377#define R2057_TX1_TSSI_MISC1 0x19c
378#define R2057_TX1_TXRXCOUPLE_2G_ATTEN 0x19d
379#define R2057_TX1_TXRXCOUPLE_2G_PWRUP 0x19e
380#define R2057_TX1_TXRXCOUPLE_5G_ATTEN 0x19f
381#define R2057_TX1_TXRXCOUPLE_5G_PWRUP 0x1a0
382#define R2057_AFE_VCM_CAL_MASTER_CORE0 0x1a1
383#define R2057_AFE_SET_VCM_I_CORE0 0x1a2
384#define R2057_AFE_SET_VCM_Q_CORE0 0x1a3
385#define R2057_AFE_STATUS_VCM_IQADC_CORE0 0x1a4
386#define R2057_AFE_STATUS_VCM_I_CORE0 0x1a5
387#define R2057_AFE_STATUS_VCM_Q_CORE0 0x1a6
388#define R2057_AFE_VCM_CAL_MASTER_CORE1 0x1a7
389#define R2057_AFE_SET_VCM_I_CORE1 0x1a8
390#define R2057_AFE_SET_VCM_Q_CORE1 0x1a9
391#define R2057_AFE_STATUS_VCM_IQADC_CORE1 0x1aa
392#define R2057_AFE_STATUS_VCM_I_CORE1 0x1ab
393#define R2057_AFE_STATUS_VCM_Q_CORE1 0x1ac
394
395#define R2057v7_DACBUF_VINCM_CORE0 0x1ad
396#define R2057v7_RCCAL_MASTER 0x1ae
397#define R2057v7_TR2G_CONFIG3_CORE0_NU 0x1af
398#define R2057v7_TR2G_CONFIG3_CORE1_NU 0x1b0
399#define R2057v7_LOGEN_PUS1 0x1b1
400#define R2057v7_OVR_REG5 0x1b2
401#define R2057v7_OVR_REG6 0x1b3
402#define R2057v7_OVR_REG7 0x1b4
403#define R2057v7_OVR_REG8 0x1b5
404#define R2057v7_OVR_REG9 0x1b6
405#define R2057v7_OVR_REG10 0x1b7
406#define R2057v7_OVR_REG11 0x1b8
407#define R2057v7_OVR_REG12 0x1b9
408#define R2057v7_OVR_REG13 0x1ba
409#define R2057v7_OVR_REG14 0x1bb
410#define R2057v7_OVR_REG15 0x1bc
411#define R2057v7_OVR_REG16 0x1bd
412#define R2057v7_OVR_REG1 0x1be
413#define R2057v7_OVR_REG18 0x1bf
414#define R2057v7_OVR_REG19 0x1c0
415#define R2057v7_OVR_REG20 0x1c1
416#define R2057v7_OVR_REG21 0x1c2
417#define R2057v7_OVR_REG2 0x1c3
418#define R2057v7_OVR_REG23 0x1c4
419#define R2057v7_OVR_REG24 0x1c5
420#define R2057v7_OVR_REG25 0x1c6
421#define R2057v7_OVR_REG26 0x1c7
422#define R2057v7_OVR_REG27 0x1c8
423#define R2057v7_OVR_REG28 0x1c9
424#define R2057v7_IQTEST_SEL_PU2 0x1ca
425
426#define R2057_VCM_MASK 0x7
427
428void r2057_upload_inittabs(struct b43_wldev *dev);
429
430#endif /* B43_RADIO_2057_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index f0d8377429c6..97d4e27bf36f 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
2757 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ 2757 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
2758}; 2758};
2759 2759
2760/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
2761static const struct nphy_rf_control_override_rev7
2762 tbl_rf_control_override_rev7_over0[] = {
2763 { 0x0004, 0x07A, 0x07D, 0x0002, 1 },
2764 { 0x0008, 0x07A, 0x07D, 0x0004, 2 },
2765 { 0x0010, 0x07A, 0x07D, 0x0010, 4 },
2766 { 0x0020, 0x07A, 0x07D, 0x0020, 5 },
2767 { 0x0040, 0x07A, 0x07D, 0x0040, 6 },
2768 { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
2769 { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
2770 { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
2771 { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
2772 { 0x6000, 0x348, 0x349, 0xFFFF, 0 },
2773 { 0x2000, 0x348, 0x349, 0x000F, 0 },
2774};
2775
2776/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
2777static const struct nphy_rf_control_override_rev7
2778 tbl_rf_control_override_rev7_over1[] = {
2779 { 0x0002, 0x340, 0x341, 0x0002, 1 },
2780 { 0x0008, 0x340, 0x341, 0x0008, 3 },
2781 { 0x0020, 0x340, 0x341, 0x0020, 5 },
2782 { 0x0010, 0x340, 0x341, 0x0010, 4 },
2783 { 0x0004, 0x340, 0x341, 0x0004, 2 },
2784 { 0x0080, 0x340, 0x341, 0x0700, 8 },
2785 { 0x0800, 0x340, 0x341, 0x4000, 14 },
2786 { 0x0400, 0x340, 0x341, 0x2000, 13 },
2787 { 0x0200, 0x340, 0x341, 0x0800, 12 },
2788 { 0x0100, 0x340, 0x341, 0x0100, 11 },
2789 { 0x0040, 0x340, 0x341, 0x0040, 6 },
2790 { 0x0001, 0x340, 0x341, 0x0001, 0 },
2791};
2792
2793/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
2794static const struct nphy_rf_control_override_rev7
2795 tbl_rf_control_override_rev7_over2[] = {
2796 { 0x0008, 0x344, 0x345, 0x0008, 3 },
2797 { 0x0002, 0x344, 0x345, 0x0002, 1 },
2798 { 0x0001, 0x344, 0x345, 0x0001, 0 },
2799 { 0x0004, 0x344, 0x345, 0x0004, 2 },
2800 { 0x0010, 0x344, 0x345, 0x0010, 4 },
2801};
2802
2760struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { 2803struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
2761 { 10, 14, 19, 27 }, 2804 { 10, 14, 19, 27 },
2762 { -5, 6, 10, 15 }, 2805 { -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
3248 3291
3249 return e; 3292 return e;
3250} 3293}
3294
3295const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
3296 struct b43_wldev *dev, u16 field, u8 override)
3297{
3298 const struct nphy_rf_control_override_rev7 *e;
3299 u8 size, i;
3300
3301 switch (override) {
3302 case 0:
3303 e = tbl_rf_control_override_rev7_over0;
3304 size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
3305 break;
3306 case 1:
3307 e = tbl_rf_control_override_rev7_over1;
3308 size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
3309 break;
3310 case 2:
3311 e = tbl_rf_control_override_rev7_over2;
3312 size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
3313 break;
3314 default:
3315 b43err(dev->wl, "Invalid override value %d\n", override);
3316 return NULL;
3317 }
3318
3319 for (i = 0; i < size; i++) {
3320 if (e[i].field == field)
3321 return &e[i];
3322 }
3323
3324 return NULL;
3325}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index f348953c0230..c600700ceedc 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 {
35 u8 val_addr1; 35 u8 val_addr1;
36}; 36};
37 37
38struct nphy_rf_control_override_rev7 {
39 u16 field;
40 u16 val_addr_core0;
41 u16 val_addr_core1;
42 u16 val_mask;
43 u8 val_shift;
44};
45
38struct nphy_gain_ctl_workaround_entry { 46struct nphy_gain_ctl_workaround_entry {
39 s8 lna1_gain[4]; 47 s8 lna1_gain[4];
40 s8 lna2_gain[4]; 48 s8 lna2_gain[4];
@@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2
202 tbl_rf_control_override_rev2[]; 210 tbl_rf_control_override_rev2[];
203extern const struct nphy_rf_control_override_rev3 211extern const struct nphy_rf_control_override_rev3
204 tbl_rf_control_override_rev3[]; 212 tbl_rf_control_override_rev3[];
213const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
214 struct b43_wldev *dev, u16 field, u8 override);
205 215
206#endif /* B43_TABLES_NPHY_H_ */ 216#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8156135a0590..18e208e3eca1 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
1920 return 0; 1920 return 0;
1921 ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, 1921 ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
1922 (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL) 1922 (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
1923 & mask) | set); 1923 & ~mask) | set);
1924 1924
1925 return 0; 1925 return 0;
1926} 1926}
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
2492} 2492}
2493 2493
2494static void b43legacy_op_tx(struct ieee80211_hw *hw, 2494static void b43legacy_op_tx(struct ieee80211_hw *hw,
2495 struct ieee80211_tx_control *control,
2495 struct sk_buff *skb) 2496 struct sk_buff *skb)
2496{ 2497{
2497 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2498 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
@@ -3894,6 +3895,8 @@ static void b43legacy_remove(struct ssb_device *dev)
3894 cancel_work_sync(&wl->firmware_load); 3895 cancel_work_sync(&wl->firmware_load);
3895 3896
3896 B43legacy_WARN_ON(!wl); 3897 B43legacy_WARN_ON(!wl);
3898 if (!wldev->fw.ucode)
3899 return; /* NULL if fw never loaded */
3897 if (wl->current_dev == wldev) 3900 if (wl->current_dev == wldev)
3898 ieee80211_unregister_hw(wl->hw); 3901 ieee80211_unregister_hw(wl->hw);
3899 3902
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b480088b3dbe..c9d811eb6556 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -55,6 +55,14 @@ config BRCMFMAC_USB
55 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to 55 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
56 use the driver for an USB wireless card. 56 use the driver for an USB wireless card.
57 57
58config BRCMISCAN
59 bool "Broadcom I-Scan (OBSOLETE)"
60 depends on BRCMFMAC
61 ---help---
62 This option enables the I-Scan method. By default fullmac uses the
63 new E-Scan method which uses less memory in firmware and gives no
64 limitation on the number of scan results.
65
58config BRCMDBG 66config BRCMDBG
59 bool "Broadcom driver debug functions" 67 bool "Broadcom driver debug functions"
60 depends on BRCMSMAC || BRCMFMAC 68 depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 8e7e6928c936..3b2c4c20e7fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -185,7 +185,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
185 return err; 185 return err;
186} 186}
187 187
188static int 188int
189brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 189brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
190 void *data, bool write) 190 void *data, bool write)
191{ 191{
@@ -249,7 +249,9 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
249 int retval; 249 int retval;
250 250
251 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 251 brcmf_dbg(INFO, "addr:0x%08x\n", addr);
252 sdio_claim_host(sdiodev->func[1]);
252 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 253 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
254 sdio_release_host(sdiodev->func[1]);
253 brcmf_dbg(INFO, "data:0x%02x\n", data); 255 brcmf_dbg(INFO, "data:0x%02x\n", data);
254 256
255 if (ret) 257 if (ret)
@@ -264,7 +266,9 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
264 int retval; 266 int retval;
265 267
266 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 268 brcmf_dbg(INFO, "addr:0x%08x\n", addr);
269 sdio_claim_host(sdiodev->func[1]);
267 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 270 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
271 sdio_release_host(sdiodev->func[1]);
268 brcmf_dbg(INFO, "data:0x%08x\n", data); 272 brcmf_dbg(INFO, "data:0x%08x\n", data);
269 273
270 if (ret) 274 if (ret)
@@ -279,7 +283,9 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
279 int retval; 283 int retval;
280 284
281 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data); 285 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
286 sdio_claim_host(sdiodev->func[1]);
282 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 287 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
288 sdio_release_host(sdiodev->func[1]);
283 289
284 if (ret) 290 if (ret)
285 *ret = retval; 291 *ret = retval;
@@ -291,7 +297,9 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
291 int retval; 297 int retval;
292 298
293 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data); 299 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
300 sdio_claim_host(sdiodev->func[1]);
294 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 301 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
302 sdio_release_host(sdiodev->func[1]);
295 303
296 if (ret) 304 if (ret)
297 *ret = retval; 305 *ret = retval;
@@ -356,15 +364,20 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
356 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 364 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
357 fn, addr, pkt->len); 365 fn, addr, pkt->len);
358 366
367 sdio_claim_host(sdiodev->func[1]);
368
359 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 369 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
360 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 370 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
361 if (err) 371 if (err)
362 return err; 372 goto done;
363 373
364 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 374 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
365 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, 375 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
366 fn, addr, pkt); 376 fn, addr, pkt);
367 377
378done:
379 sdio_release_host(sdiodev->func[1]);
380
368 return err; 381 return err;
369} 382}
370 383
@@ -378,15 +391,20 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
378 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 391 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
379 fn, addr, pktq->qlen); 392 fn, addr, pktq->qlen);
380 393
394 sdio_claim_host(sdiodev->func[1]);
395
381 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 396 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
382 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 397 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
383 if (err) 398 if (err)
384 return err; 399 goto done;
385 400
386 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 401 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
387 err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr, 402 err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
388 pktq); 403 pktq);
389 404
405done:
406 sdio_release_host(sdiodev->func[1]);
407
390 return err; 408 return err;
391} 409}
392 410
@@ -428,10 +446,12 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
428 if (flags & SDIO_REQ_ASYNC) 446 if (flags & SDIO_REQ_ASYNC)
429 return -ENOTSUPP; 447 return -ENOTSUPP;
430 448
449 sdio_claim_host(sdiodev->func[1]);
450
431 if (bar0 != sdiodev->sbwad) { 451 if (bar0 != sdiodev->sbwad) {
432 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); 452 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
433 if (err) 453 if (err)
434 return err; 454 goto done;
435 455
436 sdiodev->sbwad = bar0; 456 sdiodev->sbwad = bar0;
437 } 457 }
@@ -443,8 +463,13 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
443 if (width == 4) 463 if (width == 4)
444 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 464 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
445 465
446 return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, 466 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
447 addr, pkt); 467 addr, pkt);
468
469done:
470 sdio_release_host(sdiodev->func[1]);
471
472 return err;
448} 473}
449 474
450int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, 475int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
@@ -485,8 +510,10 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
485 brcmf_dbg(TRACE, "Enter\n"); 510 brcmf_dbg(TRACE, "Enter\n");
486 511
487 /* issue abort cmd52 command through F0 */ 512 /* issue abort cmd52 command through F0 */
513 sdio_claim_host(sdiodev->func[1]);
488 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0, 514 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
489 SDIO_CCCR_ABORT, &t_func); 515 SDIO_CCCR_ABORT, &t_func);
516 sdio_release_host(sdiodev->func[1]);
490 517
491 brcmf_dbg(TRACE, "Exit\n"); 518 brcmf_dbg(TRACE, "Exit\n");
492 return 0; 519 return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 7c4ee72f9d56..c3247d5b3c22 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -42,6 +42,7 @@
42 42
43#define DMA_ALIGN_MASK 0x03 43#define DMA_ALIGN_MASK 0x03
44 44
45#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
45#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 46#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
46#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 47#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
47#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 48#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
@@ -51,6 +52,7 @@
51 52
52/* devices we support, null terminated */ 53/* devices we support, null terminated */
53static const struct sdio_device_id brcmf_sdmmc_ids[] = { 54static const struct sdio_device_id brcmf_sdmmc_ids[] = {
55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, 57 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)}, 58 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
@@ -101,7 +103,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
101 if (regaddr == SDIO_CCCR_IOEx) { 103 if (regaddr == SDIO_CCCR_IOEx) {
102 sdfunc = sdiodev->func[2]; 104 sdfunc = sdiodev->func[2];
103 if (sdfunc) { 105 if (sdfunc) {
104 sdio_claim_host(sdfunc);
105 if (*byte & SDIO_FUNC_ENABLE_2) { 106 if (*byte & SDIO_FUNC_ENABLE_2) {
106 /* Enable Function 2 */ 107 /* Enable Function 2 */
107 err_ret = sdio_enable_func(sdfunc); 108 err_ret = sdio_enable_func(sdfunc);
@@ -117,7 +118,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
117 "Disable F2 failed:%d\n", 118 "Disable F2 failed:%d\n",
118 err_ret); 119 err_ret);
119 } 120 }
120 sdio_release_host(sdfunc);
121 } 121 }
122 } else if ((regaddr == SDIO_CCCR_ABORT) || 122 } else if ((regaddr == SDIO_CCCR_ABORT) ||
123 (regaddr == SDIO_CCCR_IENx)) { 123 (regaddr == SDIO_CCCR_IENx)) {
@@ -126,17 +126,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
126 if (!sdfunc) 126 if (!sdfunc)
127 return -ENOMEM; 127 return -ENOMEM;
128 sdfunc->num = 0; 128 sdfunc->num = 0;
129 sdio_claim_host(sdfunc);
130 sdio_writeb(sdfunc, *byte, regaddr, &err_ret); 129 sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
131 sdio_release_host(sdfunc);
132 kfree(sdfunc); 130 kfree(sdfunc);
133 } else if (regaddr < 0xF0) { 131 } else if (regaddr < 0xF0) {
134 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr); 132 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
135 err_ret = -EPERM; 133 err_ret = -EPERM;
136 } else { 134 } else {
137 sdio_claim_host(sdfunc);
138 sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret); 135 sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
139 sdio_release_host(sdfunc);
140 } 136 }
141 137
142 return err_ret; 138 return err_ret;
@@ -157,7 +153,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
157 /* handle F0 separately */ 153 /* handle F0 separately */
158 err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte); 154 err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
159 } else { 155 } else {
160 sdio_claim_host(sdiodev->func[func]);
161 if (rw) /* CMD52 Write */ 156 if (rw) /* CMD52 Write */
162 sdio_writeb(sdiodev->func[func], *byte, regaddr, 157 sdio_writeb(sdiodev->func[func], *byte, regaddr,
163 &err_ret); 158 &err_ret);
@@ -168,7 +163,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
168 *byte = sdio_readb(sdiodev->func[func], regaddr, 163 *byte = sdio_readb(sdiodev->func[func], regaddr,
169 &err_ret); 164 &err_ret);
170 } 165 }
171 sdio_release_host(sdiodev->func[func]);
172 } 166 }
173 167
174 if (err_ret) 168 if (err_ret)
@@ -195,8 +189,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
195 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait); 189 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
196 if (brcmf_pm_resume_error(sdiodev)) 190 if (brcmf_pm_resume_error(sdiodev))
197 return -EIO; 191 return -EIO;
198 /* Claim host controller */
199 sdio_claim_host(sdiodev->func[func]);
200 192
201 if (rw) { /* CMD52 Write */ 193 if (rw) { /* CMD52 Write */
202 if (nbytes == 4) 194 if (nbytes == 4)
@@ -217,9 +209,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
217 brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes); 209 brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
218 } 210 }
219 211
220 /* Release host controller */
221 sdio_release_host(sdiodev->func[func]);
222
223 if (err_ret) 212 if (err_ret)
224 brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n", 213 brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
225 rw ? "write" : "read", err_ret); 214 rw ? "write" : "read", err_ret);
@@ -273,9 +262,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
273 if (brcmf_pm_resume_error(sdiodev)) 262 if (brcmf_pm_resume_error(sdiodev))
274 return -EIO; 263 return -EIO;
275 264
276 /* Claim host controller */
277 sdio_claim_host(sdiodev->func[func]);
278
279 skb_queue_walk(pktq, pkt) { 265 skb_queue_walk(pktq, pkt) {
280 uint pkt_len = pkt->len; 266 uint pkt_len = pkt->len;
281 pkt_len += 3; 267 pkt_len += 3;
@@ -298,9 +284,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
298 SGCount++; 284 SGCount++;
299 } 285 }
300 286
301 /* Release host controller */
302 sdio_release_host(sdiodev->func[func]);
303
304 brcmf_dbg(TRACE, "Exit\n"); 287 brcmf_dbg(TRACE, "Exit\n");
305 return err_ret; 288 return err_ret;
306} 289}
@@ -326,9 +309,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
326 if (brcmf_pm_resume_error(sdiodev)) 309 if (brcmf_pm_resume_error(sdiodev))
327 return -EIO; 310 return -EIO;
328 311
329 /* Claim host controller */
330 sdio_claim_host(sdiodev->func[func]);
331
332 pkt_len += 3; 312 pkt_len += 3;
333 pkt_len &= (uint)~3; 313 pkt_len &= (uint)~3;
334 314
@@ -342,9 +322,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
342 write ? "TX" : "RX", pkt, addr, pkt_len); 322 write ? "TX" : "RX", pkt, addr, pkt_len);
343 } 323 }
344 324
345 /* Release host controller */
346 sdio_release_host(sdiodev->func[func]);
347
348 return status; 325 return status;
349} 326}
350 327
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index a11fe54f5950..17e7ae73e008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -27,6 +27,7 @@
27 * IO codes that are interpreted by dongle firmware 27 * IO codes that are interpreted by dongle firmware
28 ******************************************************************************/ 28 ******************************************************************************/
29#define BRCMF_C_UP 2 29#define BRCMF_C_UP 2
30#define BRCMF_C_DOWN 3
30#define BRCMF_C_SET_PROMISC 10 31#define BRCMF_C_SET_PROMISC 10
31#define BRCMF_C_GET_RATE 12 32#define BRCMF_C_GET_RATE 12
32#define BRCMF_C_GET_INFRA 19 33#define BRCMF_C_GET_INFRA 19
@@ -50,7 +51,10 @@
50#define BRCMF_C_REASSOC 53 51#define BRCMF_C_REASSOC 53
51#define BRCMF_C_SET_ROAM_TRIGGER 55 52#define BRCMF_C_SET_ROAM_TRIGGER 55
52#define BRCMF_C_SET_ROAM_DELTA 57 53#define BRCMF_C_SET_ROAM_DELTA 57
54#define BRCMF_C_GET_BCNPRD 75
55#define BRCMF_C_SET_BCNPRD 76
53#define BRCMF_C_GET_DTIMPRD 77 56#define BRCMF_C_GET_DTIMPRD 77
57#define BRCMF_C_SET_DTIMPRD 78
54#define BRCMF_C_SET_COUNTRY 84 58#define BRCMF_C_SET_COUNTRY 84
55#define BRCMF_C_GET_PM 85 59#define BRCMF_C_GET_PM 85
56#define BRCMF_C_SET_PM 86 60#define BRCMF_C_SET_PM 86
@@ -130,6 +134,13 @@
130#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02 134#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
131#define BRCMF_EVENT_MSG_GROUP 0x04 135#define BRCMF_EVENT_MSG_GROUP 0x04
132 136
137#define BRCMF_ESCAN_REQ_VERSION 1
138
139#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
140
141#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
142#define BRCMF_STA_ASSOC 0x10 /* Associated */
143
133struct brcmf_event_msg { 144struct brcmf_event_msg {
134 __be16 version; 145 __be16 version;
135 __be16 flags; 146 __be16 flags;
@@ -140,6 +151,8 @@ struct brcmf_event_msg {
140 __be32 datalen; 151 __be32 datalen;
141 u8 addr[ETH_ALEN]; 152 u8 addr[ETH_ALEN];
142 char ifname[IFNAMSIZ]; 153 char ifname[IFNAMSIZ];
154 u8 ifidx;
155 u8 bsscfgidx;
143} __packed; 156} __packed;
144 157
145struct brcm_ethhdr { 158struct brcm_ethhdr {
@@ -454,6 +467,24 @@ struct brcmf_scan_results_le {
454 __le32 count; 467 __le32 count;
455}; 468};
456 469
470struct brcmf_escan_params_le {
471 __le32 version;
472 __le16 action;
473 __le16 sync_id;
474 struct brcmf_scan_params_le params_le;
475};
476
477struct brcmf_escan_result_le {
478 __le32 buflen;
479 __le32 version;
480 __le16 sync_id;
481 __le16 bss_count;
482 struct brcmf_bss_info_le bss_info_le;
483};
484
485#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
486 sizeof(struct brcmf_bss_info_le))
487
457/* used for association with a specific BSSID and chanspec list */ 488/* used for association with a specific BSSID and chanspec list */
458struct brcmf_assoc_params_le { 489struct brcmf_assoc_params_le {
459 /* 00:00:00:00:00:00: broadcast scan */ 490 /* 00:00:00:00:00:00: broadcast scan */
@@ -542,6 +573,28 @@ struct brcmf_channel_info_le {
542 __le32 scan_channel; 573 __le32 scan_channel;
543}; 574};
544 575
576struct brcmf_sta_info_le {
577 __le16 ver; /* version of this struct */
578 __le16 len; /* length in bytes of this structure */
579 __le16 cap; /* sta's advertised capabilities */
580 __le32 flags; /* flags defined below */
581 __le32 idle; /* time since data pkt rx'd from sta */
582 u8 ea[ETH_ALEN]; /* Station address */
583 __le32 count; /* # rates in this set */
584 u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
585 /* w/hi bit set if basic */
586 __le32 in; /* seconds elapsed since associated */
587 __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
588 __le32 tx_pkts; /* # of packets transmitted */
589 __le32 tx_failures; /* # of packets failed */
590 __le32 rx_ucast_pkts; /* # of unicast packets received */
591 __le32 rx_mcast_pkts; /* # of multicast packets received */
592 __le32 tx_rate; /* Rate of last successful tx frame */
593 __le32 rx_rate; /* Rate of last successful rx frame */
594 __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
595 __le32 rx_decrypt_failures; /* # of packet decrypted failed */
596};
597
545/* Bus independent dongle command */ 598/* Bus independent dongle command */
546struct brcmf_dcmd { 599struct brcmf_dcmd {
547 uint cmd; /* common dongle cmd definition */ 600 uint cmd; /* common dongle cmd definition */
@@ -561,7 +614,7 @@ struct brcmf_pub {
561 /* Linkage ponters */ 614 /* Linkage ponters */
562 struct brcmf_bus *bus_if; 615 struct brcmf_bus *bus_if;
563 struct brcmf_proto *prot; 616 struct brcmf_proto *prot;
564 struct brcmf_cfg80211_dev *config; 617 struct brcmf_cfg80211_info *config;
565 struct device *dev; /* fullmac dongle device pointer */ 618 struct device *dev; /* fullmac dongle device pointer */
566 619
567 /* Internal brcmf items */ 620 /* Internal brcmf items */
@@ -634,10 +687,13 @@ extern const struct bcmevent_name bcmevent_names[];
634 687
635extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, 688extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
636 char *buf, uint len); 689 char *buf, uint len);
690extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
691 char *buf, uint buflen, s32 bssidx);
637 692
638extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 693extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
639 694
640extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); 695extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
696extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
641 697
642/* Return pointer to interface name */ 698/* Return pointer to interface name */
643extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); 699extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -657,10 +713,6 @@ extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
657 713
658extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx); 714extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
659 715
660/* Send packet to dongle via data channel */
661extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
662 struct sk_buff *pkt);
663
664extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg); 716extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
665extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, 717extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
666 int enable, int master_mode); 718 int enable, int master_mode);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 537f499cc5d2..9b8ee19ea55d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -103,7 +103,7 @@ extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
103extern void brcmf_detach(struct device *dev); 103extern void brcmf_detach(struct device *dev);
104 104
105/* Indication from bus module to change flow-control state */ 105/* Indication from bus module to change flow-control state */
106extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on); 106extern void brcmf_txflowblock(struct device *dev, bool state);
107 107
108/* Notify tx completion */ 108/* Notify tx completion */
109extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, 109extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6f70953f0bad..15c5db5752d1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -80,12 +80,60 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
80 strncpy(buf, name, buflen); 80 strncpy(buf, name, buflen);
81 81
82 /* append data onto the end of the name string */ 82 /* append data onto the end of the name string */
83 memcpy(&buf[len], data, datalen); 83 if (data && datalen) {
84 len += datalen; 84 memcpy(&buf[len], data, datalen);
85 len += datalen;
86 }
85 87
86 return len; 88 return len;
87} 89}
88 90
91uint
92brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
93 char *buf, uint buflen, s32 bssidx)
94{
95 const s8 *prefix = "bsscfg:";
96 s8 *p;
97 u32 prefixlen;
98 u32 namelen;
99 u32 iolen;
100 __le32 bssidx_le;
101
102 if (bssidx == 0)
103 return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
104
105 prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
106 namelen = (u32) strlen(name) + 1; /* lengh of iovar name + null */
107 iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
108
109 if (buflen < 0 || iolen > (u32)buflen) {
110 brcmf_dbg(ERROR, "buffer is too short\n");
111 return 0;
112 }
113
114 p = buf;
115
116 /* copy prefix, no null */
117 memcpy(p, prefix, prefixlen);
118 p += prefixlen;
119
120 /* copy iovar name including null */
121 memcpy(p, name, namelen);
122 p += namelen;
123
124 /* bss config index as first data */
125 bssidx_le = cpu_to_le32(bssidx);
126 memcpy(p, &bssidx_le, sizeof(bssidx_le));
127 p += sizeof(bssidx_le);
128
129 /* parameter buffer follows */
130 if (datalen)
131 memcpy(p, data, datalen);
132
133 return iolen;
134
135}
136
89bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, 137bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
90 struct sk_buff *pkt, int prec) 138 struct sk_buff *pkt, int prec)
91{ 139{
@@ -205,7 +253,8 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
205 BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, { 253 BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
206 BRCMF_E_IF, "IF"}, { 254 BRCMF_E_IF, "IF"}, {
207 BRCMF_E_RSSI, "RSSI"}, { 255 BRCMF_E_RSSI, "RSSI"}, {
208 BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"} 256 BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
257 BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
209 }; 258 };
210 uint event_type, flags, auth_type, datalen; 259 uint event_type, flags, auth_type, datalen;
211 static u32 seqnum_prev; 260 static u32 seqnum_prev;
@@ -350,6 +399,11 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
350 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); 399 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
351 break; 400 break;
352 401
402 case BRCMF_E_ESCAN_RESULT:
403 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
404 datalen = 0;
405 break;
406
353 case BRCMF_E_PFN_NET_FOUND: 407 case BRCMF_E_PFN_NET_FOUND:
354 case BRCMF_E_PFN_NET_LOST: 408 case BRCMF_E_PFN_NET_LOST:
355 case BRCMF_E_PFN_SCAN_COMPLETE: 409 case BRCMF_E_PFN_SCAN_COMPLETE:
@@ -425,13 +479,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
425 } 479 }
426 480
427 /* show any appended data */ 481 /* show any appended data */
428 if (datalen) { 482 brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
429 buf = (unsigned char *) event_data;
430 brcmf_dbg(EVENT, " data (%d) : ", datalen);
431 for (i = 0; i < datalen; i++)
432 brcmf_dbg(EVENT, " 0x%02x ", *buf++);
433 brcmf_dbg(EVENT, "\n");
434 }
435} 483}
436#endif /* DEBUG */ 484#endif /* DEBUG */
437 485
@@ -522,8 +570,9 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
522 } 570 }
523 571
524#ifdef DEBUG 572#ifdef DEBUG
525 brcmf_c_show_host_event(event, event_data); 573 if (BRCMF_EVENT_ON())
526#endif /* DEBUG */ 574 brcmf_c_show_host_event(event, event_data);
575#endif /* DEBUG */
527 576
528 return 0; 577 return 0;
529} 578}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index b784920532d3..fb508c2256dd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -55,6 +55,7 @@ do { \
55#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL) 55#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
56#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL) 56#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
57#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL) 57#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
58#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
58 59
59#else /* (defined DEBUG) || (defined DEBUG) */ 60#else /* (defined DEBUG) || (defined DEBUG) */
60 61
@@ -65,6 +66,7 @@ do { \
65#define BRCMF_HDRS_ON() 0 66#define BRCMF_HDRS_ON() 0
66#define BRCMF_BYTES_ON() 0 67#define BRCMF_BYTES_ON() 0
67#define BRCMF_GLOM_ON() 0 68#define BRCMF_GLOM_ON() 0
69#define BRCMF_EVENT_ON() 0
68 70
69#endif /* defined(DEBUG) */ 71#endif /* defined(DEBUG) */
70 72
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 9ab24528f9b9..d7c76ce9d8cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -272,30 +272,6 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
272 schedule_work(&drvr->multicast_work); 272 schedule_work(&drvr->multicast_work);
273} 273}
274 274
275int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
276{
277 /* Reject if down */
278 if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
279 return -ENODEV;
280
281 /* Update multicast statistic */
282 if (pktbuf->len >= ETH_ALEN) {
283 u8 *pktdata = (u8 *) (pktbuf->data);
284 struct ethhdr *eh = (struct ethhdr *)pktdata;
285
286 if (is_multicast_ether_addr(eh->h_dest))
287 drvr->tx_multicast++;
288 if (ntohs(eh->h_proto) == ETH_P_PAE)
289 atomic_inc(&drvr->pend_8021x_cnt);
290 }
291
292 /* If the protocol uses a data header, apply it */
293 brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
294
295 /* Use bus module to send data frame */
296 return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
297}
298
299static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) 275static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
300{ 276{
301 int ret; 277 int ret;
@@ -338,7 +314,22 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
338 } 314 }
339 } 315 }
340 316
341 ret = brcmf_sendpkt(drvr, ifp->idx, skb); 317 /* Update multicast statistic */
318 if (skb->len >= ETH_ALEN) {
319 u8 *pktdata = (u8 *)(skb->data);
320 struct ethhdr *eh = (struct ethhdr *)pktdata;
321
322 if (is_multicast_ether_addr(eh->h_dest))
323 drvr->tx_multicast++;
324 if (ntohs(eh->h_proto) == ETH_P_PAE)
325 atomic_inc(&drvr->pend_8021x_cnt);
326 }
327
328 /* If the protocol uses a data header, apply it */
329 brcmf_proto_hdrpush(drvr, ifp->idx, skb);
330
331 /* Use bus module to send data frame */
332 ret = drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb);
342 333
343done: 334done:
344 if (ret) 335 if (ret)
@@ -350,19 +341,23 @@ done:
350 return 0; 341 return 0;
351} 342}
352 343
353void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state) 344void brcmf_txflowblock(struct device *dev, bool state)
354{ 345{
355 struct net_device *ndev; 346 struct net_device *ndev;
356 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 347 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
357 struct brcmf_pub *drvr = bus_if->drvr; 348 struct brcmf_pub *drvr = bus_if->drvr;
349 int i;
358 350
359 brcmf_dbg(TRACE, "Enter\n"); 351 brcmf_dbg(TRACE, "Enter\n");
360 352
361 ndev = drvr->iflist[ifidx]->ndev; 353 for (i = 0; i < BRCMF_MAX_IFS; i++)
362 if (state == ON) 354 if (drvr->iflist[i]) {
363 netif_stop_queue(ndev); 355 ndev = drvr->iflist[i]->ndev;
364 else 356 if (state)
365 netif_wake_queue(ndev); 357 netif_stop_queue(ndev);
358 else
359 netif_wake_queue(ndev);
360 }
366} 361}
367 362
368static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx, 363static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
@@ -775,6 +770,14 @@ done:
775 return err; 770 return err;
776} 771}
777 772
773int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
774{
775 brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
776 dcmd->cmd, dcmd->buf, dcmd->len);
777
778 return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
779}
780
778static int brcmf_netdev_stop(struct net_device *ndev) 781static int brcmf_netdev_stop(struct net_device *ndev)
779{ 782{
780 struct brcmf_if *ifp = netdev_priv(ndev); 783 struct brcmf_if *ifp = netdev_priv(ndev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 472f2ef5c652..3564686add9a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -482,6 +482,15 @@ struct sdpcm_shared_le {
482 __le32 brpt_addr; 482 __le32 brpt_addr;
483}; 483};
484 484
485/* SDIO read frame info */
486struct brcmf_sdio_read {
487 u8 seq_num;
488 u8 channel;
489 u16 len;
490 u16 len_left;
491 u16 len_nxtfrm;
492 u8 dat_offset;
493};
485 494
486/* misc chip info needed by some of the routines */ 495/* misc chip info needed by some of the routines */
487/* Private data for SDIO bus interaction */ 496/* Private data for SDIO bus interaction */
@@ -494,9 +503,8 @@ struct brcmf_sdio {
494 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */ 503 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
495 504
496 u32 hostintmask; /* Copy of Host Interrupt Mask */ 505 u32 hostintmask; /* Copy of Host Interrupt Mask */
497 u32 intstatus; /* Intstatus bits (events) pending */ 506 atomic_t intstatus; /* Intstatus bits (events) pending */
498 bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ 507 atomic_t fcstate; /* State of dongle flow-control */
499 bool fcstate; /* State of dongle flow-control */
500 508
501 uint blocksize; /* Block size of SDIO transfers */ 509 uint blocksize; /* Block size of SDIO transfers */
502 uint roundup; /* Max roundup limit */ 510 uint roundup; /* Max roundup limit */
@@ -508,9 +516,11 @@ struct brcmf_sdio {
508 516
509 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN]; 517 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
510 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ 518 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
511 u16 nextlen; /* Next Read Len from last header */
512 u8 rx_seq; /* Receive sequence number (expected) */ 519 u8 rx_seq; /* Receive sequence number (expected) */
520 struct brcmf_sdio_read cur_read;
521 /* info of current read frame */
513 bool rxskip; /* Skip receive (awaiting NAK ACK) */ 522 bool rxskip; /* Skip receive (awaiting NAK ACK) */
523 bool rxpending; /* Data frame pending in dongle */
514 524
515 uint rxbound; /* Rx frames to read before resched */ 525 uint rxbound; /* Rx frames to read before resched */
516 uint txbound; /* Tx frames to send before resched */ 526 uint txbound; /* Tx frames to send before resched */
@@ -531,7 +541,7 @@ struct brcmf_sdio {
531 541
532 bool intr; /* Use interrupts */ 542 bool intr; /* Use interrupts */
533 bool poll; /* Use polling */ 543 bool poll; /* Use polling */
534 bool ipend; /* Device interrupt is pending */ 544 atomic_t ipend; /* Device interrupt is pending */
535 uint spurious; /* Count of spurious interrupts */ 545 uint spurious; /* Count of spurious interrupts */
536 uint pollrate; /* Ticks between device polls */ 546 uint pollrate; /* Ticks between device polls */
537 uint polltick; /* Tick counter */ 547 uint polltick; /* Tick counter */
@@ -549,12 +559,9 @@ struct brcmf_sdio {
549 s32 idleclock; /* How to set bus driver when idle */ 559 s32 idleclock; /* How to set bus driver when idle */
550 s32 sd_rxchain; 560 s32 sd_rxchain;
551 bool use_rxchain; /* If brcmf should use PKT chains */ 561 bool use_rxchain; /* If brcmf should use PKT chains */
552 bool sleeping; /* Is SDIO bus sleeping? */
553 bool rxflow_mode; /* Rx flow control mode */ 562 bool rxflow_mode; /* Rx flow control mode */
554 bool rxflow; /* Is rx flow control on */ 563 bool rxflow; /* Is rx flow control on */
555 bool alp_only; /* Don't use HT clock (ALP only) */ 564 bool alp_only; /* Don't use HT clock (ALP only) */
556/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
557 bool usebufpool;
558 565
559 u8 *ctrl_frame_buf; 566 u8 *ctrl_frame_buf;
560 u32 ctrl_frame_len; 567 u32 ctrl_frame_len;
@@ -570,8 +577,8 @@ struct brcmf_sdio {
570 bool wd_timer_valid; 577 bool wd_timer_valid;
571 uint save_ms; 578 uint save_ms;
572 579
573 struct task_struct *dpc_tsk; 580 struct workqueue_struct *brcmf_wq;
574 struct completion dpc_wait; 581 struct work_struct datawork;
575 struct list_head dpc_tsklst; 582 struct list_head dpc_tsklst;
576 spinlock_t dpc_tl_lock; 583 spinlock_t dpc_tl_lock;
577 584
@@ -657,15 +664,6 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
657 664
658#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) 665#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
659 666
660/* Packet free applicable unconditionally for sdio and sdspi.
661 * Conditional if bufpool was present for gspi bus.
662 */
663static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
664{
665 if (bus->usebufpool)
666 brcmu_pkt_buf_free_skb(pkt);
667}
668
669/* Turn backplane clock on or off */ 667/* Turn backplane clock on or off */
670static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) 668static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
671{ 669{
@@ -853,81 +851,6 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
853 return 0; 851 return 0;
854} 852}
855 853
856static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
857{
858 int ret;
859
860 brcmf_dbg(INFO, "request %s (currently %s)\n",
861 sleep ? "SLEEP" : "WAKE",
862 bus->sleeping ? "SLEEP" : "WAKE");
863
864 /* Done if we're already in the requested state */
865 if (sleep == bus->sleeping)
866 return 0;
867
868 /* Going to sleep: set the alarm and turn off the lights... */
869 if (sleep) {
870 /* Don't sleep if something is pending */
871 if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
872 return -EBUSY;
873
874 /* Make sure the controller has the bus up */
875 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
876
877 /* Tell device to start using OOB wakeup */
878 ret = w_sdreg32(bus, SMB_USE_OOB,
879 offsetof(struct sdpcmd_regs, tosbmailbox));
880 if (ret != 0)
881 brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
882
883 /* Turn off our contribution to the HT clock request */
884 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
885
886 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
887 SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
888
889 /* Isolate the bus */
890 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
891 SBSDIO_DEVCTL_PADS_ISO, NULL);
892
893 /* Change state */
894 bus->sleeping = true;
895
896 } else {
897 /* Waking up: bus power up is ok, set local state */
898
899 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
900 0, NULL);
901
902 /* Make sure the controller has the bus up */
903 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
904
905 /* Send misc interrupt to indicate OOB not needed */
906 ret = w_sdreg32(bus, 0,
907 offsetof(struct sdpcmd_regs, tosbmailboxdata));
908 if (ret == 0)
909 ret = w_sdreg32(bus, SMB_DEV_INT,
910 offsetof(struct sdpcmd_regs, tosbmailbox));
911
912 if (ret != 0)
913 brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
914
915 /* Make sure we have SD bus access */
916 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
917
918 /* Change state */
919 bus->sleeping = false;
920 }
921
922 return 0;
923}
924
925static void bus_wake(struct brcmf_sdio *bus)
926{
927 if (bus->sleeping)
928 brcmf_sdbrcm_bussleep(bus, false);
929}
930
931static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) 854static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
932{ 855{
933 u32 intstatus = 0; 856 u32 intstatus = 0;
@@ -1056,7 +979,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1056 } 979 }
1057 980
1058 /* Clear partial in any case */ 981 /* Clear partial in any case */
1059 bus->nextlen = 0; 982 bus->cur_read.len = 0;
1060 983
1061 /* If we can't reach the device, signal failure */ 984 /* If we can't reach the device, signal failure */
1062 if (err) 985 if (err)
@@ -1108,6 +1031,96 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1108 } 1031 }
1109} 1032}
1110 1033
1034static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1035 struct brcmf_sdio_read *rd)
1036{
1037 u16 len, checksum;
1038 u8 rx_seq, fc, tx_seq_max;
1039
1040 /*
1041 * 4 bytes hardware header (frame tag)
1042 * Byte 0~1: Frame length
1043 * Byte 2~3: Checksum, bit-wise inverse of frame length
1044 */
1045 len = get_unaligned_le16(header);
1046 checksum = get_unaligned_le16(header + sizeof(u16));
1047 /* All zero means no more to read */
1048 if (!(len | checksum)) {
1049 bus->rxpending = false;
1050 return false;
1051 }
1052 if ((u16)(~(len ^ checksum))) {
1053 brcmf_dbg(ERROR, "HW header checksum error\n");
1054 bus->sdcnt.rx_badhdr++;
1055 brcmf_sdbrcm_rxfail(bus, false, false);
1056 return false;
1057 }
1058 if (len < SDPCM_HDRLEN) {
1059 brcmf_dbg(ERROR, "HW header length error\n");
1060 return false;
1061 }
1062 rd->len = len;
1063
1064 /*
1065 * 8 bytes hardware header
1066 * Byte 0: Rx sequence number
1067 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1068 * Byte 2: Length of next data frame
1069 * Byte 3: Data offset
1070 * Byte 4: Flow control bits
1071 * Byte 5: Maximum Sequence number allow for Tx
1072 * Byte 6~7: Reserved
1073 */
1074 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
1075 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
1076 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
1077 brcmf_dbg(ERROR, "HW header length too long\n");
1078 bus->sdiodev->bus_if->dstats.rx_errors++;
1079 bus->sdcnt.rx_toolong++;
1080 brcmf_sdbrcm_rxfail(bus, false, false);
1081 rd->len = 0;
1082 return false;
1083 }
1084 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1085 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1086 brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
1087 bus->sdcnt.rx_badhdr++;
1088 brcmf_sdbrcm_rxfail(bus, false, false);
1089 rd->len = 0;
1090 return false;
1091 }
1092 if (rd->seq_num != rx_seq) {
1093 brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
1094 rx_seq, rd->seq_num);
1095 bus->sdcnt.rx_badseq++;
1096 rd->seq_num = rx_seq;
1097 }
1098 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1099 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1100 /* only warm for NON glom packet */
1101 if (rd->channel != SDPCM_GLOM_CHANNEL)
1102 brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq);
1103 rd->len_nxtfrm = 0;
1104 }
1105 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1106 if (bus->flowcontrol != fc) {
1107 if (~bus->flowcontrol & fc)
1108 bus->sdcnt.fc_xoff++;
1109 if (bus->flowcontrol & ~fc)
1110 bus->sdcnt.fc_xon++;
1111 bus->sdcnt.fc_rcvd++;
1112 bus->flowcontrol = fc;
1113 }
1114 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1115 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1116 brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq);
1117 tx_seq_max = bus->tx_seq + 2;
1118 }
1119 bus->tx_max = tx_seq_max;
1120
1121 return true;
1122}
1123
1111static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) 1124static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1112{ 1125{
1113 u16 dlen, totlen; 1126 u16 dlen, totlen;
@@ -1122,6 +1135,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1122 1135
1123 int ifidx = 0; 1136 int ifidx = 0;
1124 bool usechain = bus->use_rxchain; 1137 bool usechain = bus->use_rxchain;
1138 u16 next_len;
1125 1139
1126 /* If packets, issue read(s) and send up packet chain */ 1140 /* If packets, issue read(s) and send up packet chain */
1127 /* Return sequence numbers consumed? */ 1141 /* Return sequence numbers consumed? */
@@ -1185,10 +1199,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1185 if (pnext) { 1199 if (pnext) {
1186 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n", 1200 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1187 totlen, num); 1201 totlen, num);
1188 if (BRCMF_GLOM_ON() && bus->nextlen && 1202 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1189 totlen != bus->nextlen) { 1203 totlen != bus->cur_read.len) {
1190 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n", 1204 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1191 bus->nextlen, totlen, rxseq); 1205 bus->cur_read.len, totlen, rxseq);
1192 } 1206 }
1193 pfirst = pnext = NULL; 1207 pfirst = pnext = NULL;
1194 } else { 1208 } else {
@@ -1199,7 +1213,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1199 /* Done with descriptor packet */ 1213 /* Done with descriptor packet */
1200 brcmu_pkt_buf_free_skb(bus->glomd); 1214 brcmu_pkt_buf_free_skb(bus->glomd);
1201 bus->glomd = NULL; 1215 bus->glomd = NULL;
1202 bus->nextlen = 0; 1216 bus->cur_read.len = 0;
1203 } 1217 }
1204 1218
1205 /* Ok -- either we just generated a packet chain, 1219 /* Ok -- either we just generated a packet chain,
@@ -1272,12 +1286,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1272 1286
1273 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); 1287 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
1274 seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); 1288 seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
1275 bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; 1289 next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1276 if ((bus->nextlen << 4) > MAX_RX_DATASZ) { 1290 if ((next_len << 4) > MAX_RX_DATASZ) {
1277 brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n", 1291 brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
1278 bus->nextlen, seq); 1292 next_len, seq);
1279 bus->nextlen = 0; 1293 next_len = 0;
1280 } 1294 }
1295 bus->cur_read.len = next_len << 4;
1281 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1296 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1282 txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1297 txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1283 1298
@@ -1378,7 +1393,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1378 bus->sdcnt.rxglomfail++; 1393 bus->sdcnt.rxglomfail++;
1379 brcmf_sdbrcm_free_glom(bus); 1394 brcmf_sdbrcm_free_glom(bus);
1380 } 1395 }
1381 bus->nextlen = 0; 1396 bus->cur_read.len = 0;
1382 return 0; 1397 return 0;
1383 } 1398 }
1384 1399
@@ -1573,422 +1588,166 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1573 } 1588 }
1574} 1589}
1575 1590
1576static void 1591static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1577brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
1578 struct sk_buff **pkt, u8 **rxbuf)
1579{ 1592{
1580 int sdret; /* Return code from calls */
1581
1582 *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
1583 if (*pkt == NULL)
1584 return;
1585
1586 pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
1587 *rxbuf = (u8 *) ((*pkt)->data);
1588 /* Read the entire frame */
1589 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1590 SDIO_FUNC_2, F2SYNC, *pkt);
1591 bus->sdcnt.f2rxdata++;
1592
1593 if (sdret < 0) {
1594 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
1595 rdlen, sdret);
1596 brcmu_pkt_buf_free_skb(*pkt);
1597 bus->sdiodev->bus_if->dstats.rx_errors++;
1598 /* Force retry w/normal header read.
1599 * Don't attempt NAK for
1600 * gSPI
1601 */
1602 brcmf_sdbrcm_rxfail(bus, true, true);
1603 *pkt = NULL;
1604 }
1605}
1606
1607/* Checks the header */
1608static int
1609brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
1610 u8 rxseq, u16 nextlen, u16 *len)
1611{
1612 u16 check;
1613 bool len_consistent; /* Result of comparing readahead len and
1614 len from hw-hdr */
1615
1616 memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
1617
1618 /* Extract hardware header fields */
1619 *len = get_unaligned_le16(bus->rxhdr);
1620 check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
1621
1622 /* All zeros means readahead info was bad */
1623 if (!(*len | check)) {
1624 brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
1625 goto fail;
1626 }
1627
1628 /* Validate check bytes */
1629 if ((u16)~(*len ^ check)) {
1630 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
1631 nextlen, *len, check);
1632 bus->sdcnt.rx_badhdr++;
1633 brcmf_sdbrcm_rxfail(bus, false, false);
1634 goto fail;
1635 }
1636
1637 /* Validate frame length */
1638 if (*len < SDPCM_HDRLEN) {
1639 brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
1640 *len);
1641 goto fail;
1642 }
1643
1644 /* Check for consistency with readahead info */
1645 len_consistent = (nextlen != (roundup(*len, 16) >> 4));
1646 if (len_consistent) {
1647 /* Mismatch, force retry w/normal
1648 header (may be >4K) */
1649 brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
1650 nextlen, *len, roundup(*len, 16),
1651 rxseq);
1652 brcmf_sdbrcm_rxfail(bus, true, true);
1653 goto fail;
1654 }
1655
1656 return 0;
1657
1658fail:
1659 brcmf_sdbrcm_pktfree2(bus, pkt);
1660 return -EINVAL;
1661}
1662
1663/* Return true if there may be more frames to read */
1664static uint
1665brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1666{
1667 u16 len, check; /* Extracted hardware header fields */
1668 u8 chan, seq, doff; /* Extracted software header fields */
1669 u8 fcbits; /* Extracted fcbits from software header */
1670
1671 struct sk_buff *pkt; /* Packet for event or data frames */ 1593 struct sk_buff *pkt; /* Packet for event or data frames */
1672 u16 pad; /* Number of pad bytes to read */ 1594 u16 pad; /* Number of pad bytes to read */
1673 u16 rdlen; /* Total number of bytes to read */
1674 u8 rxseq; /* Next sequence number to expect */
1675 uint rxleft = 0; /* Remaining number of frames allowed */ 1595 uint rxleft = 0; /* Remaining number of frames allowed */
1676 int sdret; /* Return code from calls */ 1596 int sdret; /* Return code from calls */
1677 u8 txmax; /* Maximum tx sequence offered */
1678 u8 *rxbuf;
1679 int ifidx = 0; 1597 int ifidx = 0;
1680 uint rxcount = 0; /* Total frames read */ 1598 uint rxcount = 0; /* Total frames read */
1599 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1600 u8 head_read = 0;
1681 1601
1682 brcmf_dbg(TRACE, "Enter\n"); 1602 brcmf_dbg(TRACE, "Enter\n");
1683 1603
1684 /* Not finished unless we encounter no more frames indication */ 1604 /* Not finished unless we encounter no more frames indication */
1685 *finished = false; 1605 bus->rxpending = true;
1686 1606
1687 for (rxseq = bus->rx_seq, rxleft = maxframes; 1607 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1688 !bus->rxskip && rxleft && 1608 !bus->rxskip && rxleft &&
1689 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN; 1609 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1690 rxseq++, rxleft--) { 1610 rd->seq_num++, rxleft--) {
1691 1611
1692 /* Handle glomming separately */ 1612 /* Handle glomming separately */
1693 if (bus->glomd || !skb_queue_empty(&bus->glom)) { 1613 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1694 u8 cnt; 1614 u8 cnt;
1695 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n", 1615 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1696 bus->glomd, skb_peek(&bus->glom)); 1616 bus->glomd, skb_peek(&bus->glom));
1697 cnt = brcmf_sdbrcm_rxglom(bus, rxseq); 1617 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1698 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt); 1618 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1699 rxseq += cnt - 1; 1619 rd->seq_num += cnt - 1;
1700 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; 1620 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1701 continue; 1621 continue;
1702 } 1622 }
1703 1623
1704 /* Try doing single read if we can */ 1624 rd->len_left = rd->len;
1705 if (bus->nextlen) { 1625 /* read header first for unknow frame length */
1706 u16 nextlen = bus->nextlen; 1626 if (!rd->len) {
1707 bus->nextlen = 0; 1627 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1708 1628 bus->sdiodev->sbwad,
1709 rdlen = len = nextlen << 4; 1629 SDIO_FUNC_2, F2SYNC,
1710 brcmf_pad(bus, &pad, &rdlen); 1630 bus->rxhdr,
1711 1631 BRCMF_FIRSTREAD);
1712 /* 1632 bus->sdcnt.f2rxhdrs++;
1713 * After the frame is received we have to 1633 if (sdret < 0) {
1714 * distinguish whether it is data 1634 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n",
1715 * or non-data frame. 1635 sdret);
1716 */ 1636 bus->sdcnt.rx_hdrfail++;
1717 brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf); 1637 brcmf_sdbrcm_rxfail(bus, true, true);
1718 if (pkt == NULL) {
1719 /* Give up on data, request rtx of events */
1720 brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
1721 len, rdlen, rxseq);
1722 continue;
1723 }
1724
1725 if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
1726 &len) < 0)
1727 continue; 1638 continue;
1728
1729 /* Extract software header fields */
1730 chan = SDPCM_PACKET_CHANNEL(
1731 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1732 seq = SDPCM_PACKET_SEQUENCE(
1733 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1734 doff = SDPCM_DOFFSET_VALUE(
1735 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1736 txmax = SDPCM_WINDOW_VALUE(
1737 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1738
1739 bus->nextlen =
1740 bus->rxhdr[SDPCM_FRAMETAG_LEN +
1741 SDPCM_NEXTLEN_OFFSET];
1742 if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
1743 brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
1744 bus->nextlen, seq);
1745 bus->nextlen = 0;
1746 } 1639 }
1747 1640
1748 bus->sdcnt.rx_readahead_cnt++; 1641 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1749
1750 /* Handle Flow Control */
1751 fcbits = SDPCM_FCMASK_VALUE(
1752 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1753
1754 if (bus->flowcontrol != fcbits) {
1755 if (~bus->flowcontrol & fcbits)
1756 bus->sdcnt.fc_xoff++;
1757
1758 if (bus->flowcontrol & ~fcbits)
1759 bus->sdcnt.fc_xon++;
1760
1761 bus->sdcnt.fc_rcvd++;
1762 bus->flowcontrol = fcbits;
1763 }
1764
1765 /* Check and update sequence number */
1766 if (rxseq != seq) {
1767 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
1768 seq, rxseq);
1769 bus->sdcnt.rx_badseq++;
1770 rxseq = seq;
1771 }
1772
1773 /* Check window for sanity */
1774 if ((u8) (txmax - bus->tx_seq) > 0x40) {
1775 brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
1776 txmax, bus->tx_seq);
1777 txmax = bus->tx_seq + 2;
1778 }
1779 bus->tx_max = txmax;
1780
1781 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1782 rxbuf, len, "Rx Data:\n");
1783 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1784 BRCMF_DATA_ON()) &&
1785 BRCMF_HDRS_ON(),
1786 bus->rxhdr, SDPCM_HDRLEN, 1642 bus->rxhdr, SDPCM_HDRLEN,
1787 "RxHdr:\n"); 1643 "RxHdr:\n");
1788 1644
1789 if (chan == SDPCM_CONTROL_CHANNEL) { 1645 if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
1790 brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n", 1646 if (!bus->rxpending)
1791 seq); 1647 break;
1792 /* Force retry w/normal header read */ 1648 else
1793 bus->nextlen = 0; 1649 continue;
1794 brcmf_sdbrcm_rxfail(bus, false, true);
1795 brcmf_sdbrcm_pktfree2(bus, pkt);
1796 continue;
1797 } 1650 }
1798 1651
1799 /* Validate data offset */ 1652 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1800 if ((doff < SDPCM_HDRLEN) || (doff > len)) { 1653 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1801 brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n", 1654 rd->len,
1802 doff, len, SDPCM_HDRLEN); 1655 rd->dat_offset);
1803 brcmf_sdbrcm_rxfail(bus, false, false); 1656 /* prepare the descriptor for the next read */
1804 brcmf_sdbrcm_pktfree2(bus, pkt); 1657 rd->len = rd->len_nxtfrm << 4;
1658 rd->len_nxtfrm = 0;
1659 /* treat all packet as event if we don't know */
1660 rd->channel = SDPCM_EVENT_CHANNEL;
1805 continue; 1661 continue;
1806 } 1662 }
1807 1663 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1808 /* All done with this one -- now deliver the packet */ 1664 rd->len - BRCMF_FIRSTREAD : 0;
1809 goto deliver; 1665 head_read = BRCMF_FIRSTREAD;
1810 }
1811
1812 /* Read frame header (hardware and software) */
1813 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
1814 SDIO_FUNC_2, F2SYNC, bus->rxhdr,
1815 BRCMF_FIRSTREAD);
1816 bus->sdcnt.f2rxhdrs++;
1817
1818 if (sdret < 0) {
1819 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
1820 bus->sdcnt.rx_hdrfail++;
1821 brcmf_sdbrcm_rxfail(bus, true, true);
1822 continue;
1823 }
1824 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1825 bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n");
1826
1827
1828 /* Extract hardware header fields */
1829 len = get_unaligned_le16(bus->rxhdr);
1830 check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
1831
1832 /* All zeros means no more frames */
1833 if (!(len | check)) {
1834 *finished = true;
1835 break;
1836 }
1837
1838 /* Validate check bytes */
1839 if ((u16) ~(len ^ check)) {
1840 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
1841 len, check);
1842 bus->sdcnt.rx_badhdr++;
1843 brcmf_sdbrcm_rxfail(bus, false, false);
1844 continue;
1845 }
1846
1847 /* Validate frame length */
1848 if (len < SDPCM_HDRLEN) {
1849 brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
1850 continue;
1851 }
1852
1853 /* Extract software header fields */
1854 chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1855 seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1856 doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1857 txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1858
1859 /* Validate data offset */
1860 if ((doff < SDPCM_HDRLEN) || (doff > len)) {
1861 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
1862 doff, len, SDPCM_HDRLEN, seq);
1863 bus->sdcnt.rx_badhdr++;
1864 brcmf_sdbrcm_rxfail(bus, false, false);
1865 continue;
1866 }
1867
1868 /* Save the readahead length if there is one */
1869 bus->nextlen =
1870 bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1871 if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
1872 brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
1873 bus->nextlen, seq);
1874 bus->nextlen = 0;
1875 }
1876
1877 /* Handle Flow Control */
1878 fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1879
1880 if (bus->flowcontrol != fcbits) {
1881 if (~bus->flowcontrol & fcbits)
1882 bus->sdcnt.fc_xoff++;
1883
1884 if (bus->flowcontrol & ~fcbits)
1885 bus->sdcnt.fc_xon++;
1886
1887 bus->sdcnt.fc_rcvd++;
1888 bus->flowcontrol = fcbits;
1889 }
1890
1891 /* Check and update sequence number */
1892 if (rxseq != seq) {
1893 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
1894 bus->sdcnt.rx_badseq++;
1895 rxseq = seq;
1896 }
1897
1898 /* Check window for sanity */
1899 if ((u8) (txmax - bus->tx_seq) > 0x40) {
1900 brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
1901 txmax, bus->tx_seq);
1902 txmax = bus->tx_seq + 2;
1903 }
1904 bus->tx_max = txmax;
1905
1906 /* Call a separate function for control frames */
1907 if (chan == SDPCM_CONTROL_CHANNEL) {
1908 brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
1909 continue;
1910 }
1911
1912 /* precondition: chan is either SDPCM_DATA_CHANNEL,
1913 SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
1914 SDPCM_GLOM_CHANNEL */
1915
1916 /* Length to read */
1917 rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
1918
1919 /* May pad read to blocksize for efficiency */
1920 if (bus->roundup && bus->blocksize &&
1921 (rdlen > bus->blocksize)) {
1922 pad = bus->blocksize - (rdlen % bus->blocksize);
1923 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1924 ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
1925 rdlen += pad;
1926 } else if (rdlen % BRCMF_SDALIGN) {
1927 rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
1928 } 1666 }
1929 1667
1930 /* Satisfy length-alignment requirements */ 1668 brcmf_pad(bus, &pad, &rd->len_left);
1931 if (rdlen & (ALIGNMENT - 1))
1932 rdlen = roundup(rdlen, ALIGNMENT);
1933
1934 if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
1935 /* Too long -- skip this frame */
1936 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
1937 len, rdlen);
1938 bus->sdiodev->bus_if->dstats.rx_errors++;
1939 bus->sdcnt.rx_toolong++;
1940 brcmf_sdbrcm_rxfail(bus, false, false);
1941 continue;
1942 }
1943 1669
1944 pkt = brcmu_pkt_buf_get_skb(rdlen + 1670 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1945 BRCMF_FIRSTREAD + BRCMF_SDALIGN); 1671 BRCMF_SDALIGN);
1946 if (!pkt) { 1672 if (!pkt) {
1947 /* Give up on data, request rtx of events */ 1673 /* Give up on data, request rtx of events */
1948 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n", 1674 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n");
1949 rdlen, chan);
1950 bus->sdiodev->bus_if->dstats.rx_dropped++; 1675 bus->sdiodev->bus_if->dstats.rx_dropped++;
1951 brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan)); 1676 brcmf_sdbrcm_rxfail(bus, false,
1677 RETRYCHAN(rd->channel));
1952 continue; 1678 continue;
1953 } 1679 }
1680 skb_pull(pkt, head_read);
1681 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1954 1682
1955 /* Leave room for what we already read, and align remainder */
1956 skb_pull(pkt, BRCMF_FIRSTREAD);
1957 pkt_align(pkt, rdlen, BRCMF_SDALIGN);
1958
1959 /* Read the remaining frame data */
1960 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1683 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1961 SDIO_FUNC_2, F2SYNC, pkt); 1684 SDIO_FUNC_2, F2SYNC, pkt);
1962 bus->sdcnt.f2rxdata++; 1685 bus->sdcnt.f2rxdata++;
1963 1686
1964 if (sdret < 0) { 1687 if (sdret < 0) {
1965 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, 1688 brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
1966 ((chan == SDPCM_EVENT_CHANNEL) ? "event" 1689 rd->len, rd->channel, sdret);
1967 : ((chan == SDPCM_DATA_CHANNEL) ? "data"
1968 : "test")), sdret);
1969 brcmu_pkt_buf_free_skb(pkt); 1690 brcmu_pkt_buf_free_skb(pkt);
1970 bus->sdiodev->bus_if->dstats.rx_errors++; 1691 bus->sdiodev->bus_if->dstats.rx_errors++;
1971 brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan)); 1692 brcmf_sdbrcm_rxfail(bus, true,
1693 RETRYCHAN(rd->channel));
1972 continue; 1694 continue;
1973 } 1695 }
1974 1696
1975 /* Copy the already-read portion */ 1697 if (head_read) {
1976 skb_push(pkt, BRCMF_FIRSTREAD); 1698 skb_push(pkt, head_read);
1977 memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD); 1699 memcpy(pkt->data, bus->rxhdr, head_read);
1700 head_read = 0;
1701 } else {
1702 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1703 rd_new.seq_num = rd->seq_num;
1704 if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
1705 rd->len = 0;
1706 brcmu_pkt_buf_free_skb(pkt);
1707 }
1708 bus->sdcnt.rx_readahead_cnt++;
1709 if (rd->len != roundup(rd_new.len, 16)) {
1710 brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n",
1711 rd->len,
1712 roundup(rd_new.len, 16) >> 4);
1713 rd->len = 0;
1714 brcmf_sdbrcm_rxfail(bus, true, true);
1715 brcmu_pkt_buf_free_skb(pkt);
1716 continue;
1717 }
1718 rd->len_nxtfrm = rd_new.len_nxtfrm;
1719 rd->channel = rd_new.channel;
1720 rd->dat_offset = rd_new.dat_offset;
1721
1722 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1723 BRCMF_DATA_ON()) &&
1724 BRCMF_HDRS_ON(),
1725 bus->rxhdr, SDPCM_HDRLEN,
1726 "RxHdr:\n");
1727
1728 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1729 brcmf_dbg(ERROR, "readahead on control packet %d?\n",
1730 rd_new.seq_num);
1731 /* Force retry w/normal header read */
1732 rd->len = 0;
1733 brcmf_sdbrcm_rxfail(bus, false, true);
1734 brcmu_pkt_buf_free_skb(pkt);
1735 continue;
1736 }
1737 }
1978 1738
1979 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), 1739 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1980 pkt->data, len, "Rx Data:\n"); 1740 pkt->data, rd->len, "Rx Data:\n");
1981 1741
1982deliver:
1983 /* Save superframe descriptor and allocate packet frame */ 1742 /* Save superframe descriptor and allocate packet frame */
1984 if (chan == SDPCM_GLOM_CHANNEL) { 1743 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1985 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { 1744 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
1986 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n", 1745 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1987 len); 1746 rd->len);
1988 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1747 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1989 pkt->data, len, 1748 pkt->data, rd->len,
1990 "Glom Data:\n"); 1749 "Glom Data:\n");
1991 __skb_trim(pkt, len); 1750 __skb_trim(pkt, rd->len);
1992 skb_pull(pkt, SDPCM_HDRLEN); 1751 skb_pull(pkt, SDPCM_HDRLEN);
1993 bus->glomd = pkt; 1752 bus->glomd = pkt;
1994 } else { 1753 } else {
@@ -1996,12 +1755,23 @@ deliver:
1996 "descriptor!\n", __func__); 1755 "descriptor!\n", __func__);
1997 brcmf_sdbrcm_rxfail(bus, false, false); 1756 brcmf_sdbrcm_rxfail(bus, false, false);
1998 } 1757 }
1758 /* prepare the descriptor for the next read */
1759 rd->len = rd->len_nxtfrm << 4;
1760 rd->len_nxtfrm = 0;
1761 /* treat all packet as event if we don't know */
1762 rd->channel = SDPCM_EVENT_CHANNEL;
1999 continue; 1763 continue;
2000 } 1764 }
2001 1765
2002 /* Fill in packet len and prio, deliver upward */ 1766 /* Fill in packet len and prio, deliver upward */
2003 __skb_trim(pkt, len); 1767 __skb_trim(pkt, rd->len);
2004 skb_pull(pkt, doff); 1768 skb_pull(pkt, rd->dat_offset);
1769
1770 /* prepare the descriptor for the next read */
1771 rd->len = rd->len_nxtfrm << 4;
1772 rd->len_nxtfrm = 0;
1773 /* treat all packet as event if we don't know */
1774 rd->channel = SDPCM_EVENT_CHANNEL;
2005 1775
2006 if (pkt->len == 0) { 1776 if (pkt->len == 0) {
2007 brcmu_pkt_buf_free_skb(pkt); 1777 brcmu_pkt_buf_free_skb(pkt);
@@ -2019,17 +1789,17 @@ deliver:
2019 brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt); 1789 brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
2020 down(&bus->sdsem); 1790 down(&bus->sdsem);
2021 } 1791 }
1792
2022 rxcount = maxframes - rxleft; 1793 rxcount = maxframes - rxleft;
2023 /* Message if we hit the limit */ 1794 /* Message if we hit the limit */
2024 if (!rxleft) 1795 if (!rxleft)
2025 brcmf_dbg(DATA, "hit rx limit of %d frames\n", 1796 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
2026 maxframes);
2027 else 1797 else
2028 brcmf_dbg(DATA, "processed %d frames\n", rxcount); 1798 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
2029 /* Back off rxseq if awaiting rtx, update rx_seq */ 1799 /* Back off rxseq if awaiting rtx, update rx_seq */
2030 if (bus->rxskip) 1800 if (bus->rxskip)
2031 rxseq--; 1801 rd->seq_num--;
2032 bus->rx_seq = rxseq; 1802 bus->rx_seq = rd->seq_num;
2033 1803
2034 return rxcount; 1804 return rxcount;
2035} 1805}
@@ -2227,7 +1997,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2227 if (ret != 0) 1997 if (ret != 0)
2228 break; 1998 break;
2229 if (intstatus & bus->hostintmask) 1999 if (intstatus & bus->hostintmask)
2230 bus->ipend = true; 2000 atomic_set(&bus->ipend, 1);
2231 } 2001 }
2232 } 2002 }
2233 2003
@@ -2235,8 +2005,8 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2235 if (bus->sdiodev->bus_if->drvr_up && 2005 if (bus->sdiodev->bus_if->drvr_up &&
2236 (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && 2006 (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
2237 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { 2007 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2238 bus->txoff = OFF; 2008 bus->txoff = false;
2239 brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF); 2009 brcmf_txflowblock(bus->sdiodev->dev, false);
2240 } 2010 }
2241 2011
2242 return cnt; 2012 return cnt;
@@ -2259,16 +2029,8 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2259 bus->watchdog_tsk = NULL; 2029 bus->watchdog_tsk = NULL;
2260 } 2030 }
2261 2031
2262 if (bus->dpc_tsk && bus->dpc_tsk != current) {
2263 send_sig(SIGTERM, bus->dpc_tsk, 1);
2264 kthread_stop(bus->dpc_tsk);
2265 bus->dpc_tsk = NULL;
2266 }
2267
2268 down(&bus->sdsem); 2032 down(&bus->sdsem);
2269 2033
2270 bus_wake(bus);
2271
2272 /* Enable clock for device interrupts */ 2034 /* Enable clock for device interrupts */
2273 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2035 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2274 2036
@@ -2327,7 +2089,7 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2327 unsigned long flags; 2089 unsigned long flags;
2328 2090
2329 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); 2091 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2330 if (!bus->sdiodev->irq_en && !bus->ipend) { 2092 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2331 enable_irq(bus->sdiodev->irq); 2093 enable_irq(bus->sdiodev->irq);
2332 bus->sdiodev->irq_en = true; 2094 bus->sdiodev->irq_en = true;
2333 } 2095 }
@@ -2339,21 +2101,69 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2339} 2101}
2340#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ 2102#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
2341 2103
2342static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) 2104static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2343{ 2105{
2344 u32 intstatus, newstatus = 0; 2106 struct list_head *new_hd;
2107 unsigned long flags;
2108
2109 if (in_interrupt())
2110 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2111 else
2112 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2113 if (new_hd == NULL)
2114 return;
2115
2116 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2117 list_add_tail(new_hd, &bus->dpc_tsklst);
2118 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2119}
2120
2121static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2122{
2123 u8 idx;
2124 u32 addr;
2125 unsigned long val;
2126 int n, ret;
2127
2128 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2129 addr = bus->ci->c_inf[idx].base +
2130 offsetof(struct sdpcmd_regs, intstatus);
2131
2132 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
2133 bus->sdcnt.f1regdata++;
2134 if (ret != 0)
2135 val = 0;
2136
2137 val &= bus->hostintmask;
2138 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2139
2140 /* Clear interrupts */
2141 if (val) {
2142 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
2143 bus->sdcnt.f1regdata++;
2144 }
2145
2146 if (ret) {
2147 atomic_set(&bus->intstatus, 0);
2148 } else if (val) {
2149 for_each_set_bit(n, &val, 32)
2150 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2151 }
2152
2153 return ret;
2154}
2155
2156static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2157{
2158 u32 newstatus = 0;
2159 unsigned long intstatus;
2345 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */ 2160 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2346 uint txlimit = bus->txbound; /* Tx frames to send before resched */ 2161 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2347 uint framecnt = 0; /* Temporary counter of tx/rx frames */ 2162 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2348 bool rxdone = true; /* Flag for no more read data */ 2163 int err = 0, n;
2349 bool resched = false; /* Flag indicating resched wanted */
2350 int err;
2351 2164
2352 brcmf_dbg(TRACE, "Enter\n"); 2165 brcmf_dbg(TRACE, "Enter\n");
2353 2166
2354 /* Start with leftover status bits */
2355 intstatus = bus->intstatus;
2356
2357 down(&bus->sdsem); 2167 down(&bus->sdsem);
2358 2168
2359 /* If waiting for HTAVAIL, check status */ 2169 /* If waiting for HTAVAIL, check status */
@@ -2399,39 +2209,22 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2399 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2209 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2400 } 2210 }
2401 bus->clkstate = CLK_AVAIL; 2211 bus->clkstate = CLK_AVAIL;
2402 } else {
2403 goto clkwait;
2404 } 2212 }
2405 } 2213 }
2406 2214
2407 bus_wake(bus);
2408
2409 /* Make sure backplane clock is on */ 2215 /* Make sure backplane clock is on */
2410 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true); 2216 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
2411 if (bus->clkstate == CLK_PENDING)
2412 goto clkwait;
2413 2217
2414 /* Pending interrupt indicates new device status */ 2218 /* Pending interrupt indicates new device status */
2415 if (bus->ipend) { 2219 if (atomic_read(&bus->ipend) > 0) {
2416 bus->ipend = false; 2220 atomic_set(&bus->ipend, 0);
2417 err = r_sdreg32(bus, &newstatus, 2221 sdio_claim_host(bus->sdiodev->func[1]);
2418 offsetof(struct sdpcmd_regs, intstatus)); 2222 err = brcmf_sdio_intr_rstatus(bus);
2419 bus->sdcnt.f1regdata++; 2223 sdio_release_host(bus->sdiodev->func[1]);
2420 if (err != 0)
2421 newstatus = 0;
2422 newstatus &= bus->hostintmask;
2423 bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
2424 if (newstatus) {
2425 err = w_sdreg32(bus, newstatus,
2426 offsetof(struct sdpcmd_regs,
2427 intstatus));
2428 bus->sdcnt.f1regdata++;
2429 }
2430 } 2224 }
2431 2225
2432 /* Merge new bits with previous */ 2226 /* Start with leftover status bits */
2433 intstatus |= newstatus; 2227 intstatus = atomic_xchg(&bus->intstatus, 0);
2434 bus->intstatus = 0;
2435 2228
2436 /* Handle flow-control change: read new state in case our ack 2229 /* Handle flow-control change: read new state in case our ack
2437 * crossed another change interrupt. If change still set, assume 2230 * crossed another change interrupt. If change still set, assume
@@ -2445,8 +2238,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2445 err = r_sdreg32(bus, &newstatus, 2238 err = r_sdreg32(bus, &newstatus,
2446 offsetof(struct sdpcmd_regs, intstatus)); 2239 offsetof(struct sdpcmd_regs, intstatus));
2447 bus->sdcnt.f1regdata += 2; 2240 bus->sdcnt.f1regdata += 2;
2448 bus->fcstate = 2241 atomic_set(&bus->fcstate,
2449 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); 2242 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2450 intstatus |= (newstatus & bus->hostintmask); 2243 intstatus |= (newstatus & bus->hostintmask);
2451 } 2244 }
2452 2245
@@ -2483,32 +2276,34 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2483 intstatus &= ~I_HMB_FRAME_IND; 2276 intstatus &= ~I_HMB_FRAME_IND;
2484 2277
2485 /* On frame indication, read available frames */ 2278 /* On frame indication, read available frames */
2486 if (PKT_AVAILABLE()) { 2279 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2487 framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone); 2280 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2488 if (rxdone || bus->rxskip) 2281 if (!bus->rxpending)
2489 intstatus &= ~I_HMB_FRAME_IND; 2282 intstatus &= ~I_HMB_FRAME_IND;
2490 rxlimit -= min(framecnt, rxlimit); 2283 rxlimit -= min(framecnt, rxlimit);
2491 } 2284 }
2492 2285
2493 /* Keep still-pending events for next scheduling */ 2286 /* Keep still-pending events for next scheduling */
2494 bus->intstatus = intstatus; 2287 if (intstatus) {
2288 for_each_set_bit(n, &intstatus, 32)
2289 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2290 }
2495 2291
2496clkwait:
2497 brcmf_sdbrcm_clrintr(bus); 2292 brcmf_sdbrcm_clrintr(bus);
2498 2293
2499 if (data_ok(bus) && bus->ctrl_frame_stat && 2294 if (data_ok(bus) && bus->ctrl_frame_stat &&
2500 (bus->clkstate == CLK_AVAIL)) { 2295 (bus->clkstate == CLK_AVAIL)) {
2501 int ret, i; 2296 int i;
2502 2297
2503 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, 2298 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2504 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf, 2299 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2505 (u32) bus->ctrl_frame_len); 2300 (u32) bus->ctrl_frame_len);
2506 2301
2507 if (ret < 0) { 2302 if (err < 0) {
2508 /* On failure, abort the command and 2303 /* On failure, abort the command and
2509 terminate the frame */ 2304 terminate the frame */
2510 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2305 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2511 ret); 2306 err);
2512 bus->sdcnt.tx_sderrs++; 2307 bus->sdcnt.tx_sderrs++;
2513 2308
2514 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2309 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
@@ -2530,42 +2325,34 @@ clkwait:
2530 break; 2325 break;
2531 } 2326 }
2532 2327
2533 } 2328 } else {
2534 if (ret == 0)
2535 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 2329 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2536 2330 }
2537 brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
2538 bus->ctrl_frame_stat = false; 2331 bus->ctrl_frame_stat = false;
2539 brcmf_sdbrcm_wait_event_wakeup(bus); 2332 brcmf_sdbrcm_wait_event_wakeup(bus);
2540 } 2333 }
2541 /* Send queued frames (limit 1 if rx may still be pending) */ 2334 /* Send queued frames (limit 1 if rx may still be pending) */
2542 else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && 2335 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2543 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit 2336 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2544 && data_ok(bus)) { 2337 && data_ok(bus)) {
2545 framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax); 2338 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2339 txlimit;
2546 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt); 2340 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2547 txlimit -= framecnt; 2341 txlimit -= framecnt;
2548 } 2342 }
2549 2343
2550 /* Resched if events or tx frames are pending,
2551 else await next interrupt */
2552 /* On failed register access, all bets are off:
2553 no resched or interrupts */
2554 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) { 2344 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2555 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n"); 2345 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
2556 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2346 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2557 bus->intstatus = 0; 2347 atomic_set(&bus->intstatus, 0);
2558 } else if (bus->clkstate == CLK_PENDING) { 2348 } else if (atomic_read(&bus->intstatus) ||
2559 brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n"); 2349 atomic_read(&bus->ipend) > 0 ||
2560 resched = true; 2350 (!atomic_read(&bus->fcstate) &&
2561 } else if (bus->intstatus || bus->ipend || 2351 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2562 (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) 2352 data_ok(bus)) || PKT_AVAILABLE()) {
2563 && data_ok(bus)) || PKT_AVAILABLE()) { 2353 brcmf_sdbrcm_adddpctsk(bus);
2564 resched = true;
2565 } 2354 }
2566 2355
2567 bus->dpc_sched = resched;
2568
2569 /* If we're done for now, turn off clock request. */ 2356 /* If we're done for now, turn off clock request. */
2570 if ((bus->clkstate != CLK_PENDING) 2357 if ((bus->clkstate != CLK_PENDING)
2571 && bus->idletime == BRCMF_IDLE_IMMEDIATE) { 2358 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
@@ -2574,65 +2361,6 @@ clkwait:
2574 } 2361 }
2575 2362
2576 up(&bus->sdsem); 2363 up(&bus->sdsem);
2577
2578 return resched;
2579}
2580
2581static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2582{
2583 struct list_head *new_hd;
2584 unsigned long flags;
2585
2586 if (in_interrupt())
2587 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2588 else
2589 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2590 if (new_hd == NULL)
2591 return;
2592
2593 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2594 list_add_tail(new_hd, &bus->dpc_tsklst);
2595 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2596}
2597
2598static int brcmf_sdbrcm_dpc_thread(void *data)
2599{
2600 struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
2601 struct list_head *cur_hd, *tmp_hd;
2602 unsigned long flags;
2603
2604 allow_signal(SIGTERM);
2605 /* Run until signal received */
2606 while (1) {
2607 if (kthread_should_stop())
2608 break;
2609
2610 if (list_empty(&bus->dpc_tsklst))
2611 if (wait_for_completion_interruptible(&bus->dpc_wait))
2612 break;
2613
2614 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2615 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
2616 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2617
2618 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
2619 /* after stopping the bus, exit thread */
2620 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
2621 bus->dpc_tsk = NULL;
2622 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2623 break;
2624 }
2625
2626 if (brcmf_sdbrcm_dpc(bus))
2627 brcmf_sdbrcm_adddpctsk(bus);
2628
2629 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2630 list_del(cur_hd);
2631 kfree(cur_hd);
2632 }
2633 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2634 }
2635 return 0;
2636} 2364}
2637 2365
2638static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) 2366static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2642,6 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2642 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2370 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2643 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2371 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2644 struct brcmf_sdio *bus = sdiodev->bus; 2372 struct brcmf_sdio *bus = sdiodev->bus;
2373 unsigned long flags;
2645 2374
2646 brcmf_dbg(TRACE, "Enter\n"); 2375 brcmf_dbg(TRACE, "Enter\n");
2647 2376
@@ -2672,21 +2401,23 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2672 spin_unlock_bh(&bus->txqlock); 2401 spin_unlock_bh(&bus->txqlock);
2673 2402
2674 if (pktq_len(&bus->txq) >= TXHI) { 2403 if (pktq_len(&bus->txq) >= TXHI) {
2675 bus->txoff = ON; 2404 bus->txoff = true;
2676 brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON); 2405 brcmf_txflowblock(bus->sdiodev->dev, true);
2677 } 2406 }
2678 2407
2679#ifdef DEBUG 2408#ifdef DEBUG
2680 if (pktq_plen(&bus->txq, prec) > qcount[prec]) 2409 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2681 qcount[prec] = pktq_plen(&bus->txq, prec); 2410 qcount[prec] = pktq_plen(&bus->txq, prec);
2682#endif 2411#endif
2683 /* Schedule DPC if needed to send queued packet(s) */ 2412
2684 if (!bus->dpc_sched) { 2413 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2685 bus->dpc_sched = true; 2414 if (list_empty(&bus->dpc_tsklst)) {
2686 if (bus->dpc_tsk) { 2415 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2687 brcmf_sdbrcm_adddpctsk(bus); 2416
2688 complete(&bus->dpc_wait); 2417 brcmf_sdbrcm_adddpctsk(bus);
2689 } 2418 queue_work(bus->brcmf_wq, &bus->datawork);
2419 } else {
2420 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2690 } 2421 }
2691 2422
2692 return ret; 2423 return ret;
@@ -2707,6 +2438,8 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2707 else 2438 else
2708 dsize = size; 2439 dsize = size;
2709 2440
2441 sdio_claim_host(bus->sdiodev->func[1]);
2442
2710 /* Set the backplane window to include the start address */ 2443 /* Set the backplane window to include the start address */
2711 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address); 2444 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
2712 if (bcmerror) { 2445 if (bcmerror) {
@@ -2748,6 +2481,8 @@ xfer_done:
2748 brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n", 2481 brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
2749 bus->sdiodev->sbwad); 2482 bus->sdiodev->sbwad);
2750 2483
2484 sdio_release_host(bus->sdiodev->func[1]);
2485
2751 return bcmerror; 2486 return bcmerror;
2752} 2487}
2753 2488
@@ -2882,6 +2617,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2882 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2617 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2883 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2618 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2884 struct brcmf_sdio *bus = sdiodev->bus; 2619 struct brcmf_sdio *bus = sdiodev->bus;
2620 unsigned long flags;
2885 2621
2886 brcmf_dbg(TRACE, "Enter\n"); 2622 brcmf_dbg(TRACE, "Enter\n");
2887 2623
@@ -2918,8 +2654,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2918 /* Need to lock here to protect txseq and SDIO tx calls */ 2654 /* Need to lock here to protect txseq and SDIO tx calls */
2919 down(&bus->sdsem); 2655 down(&bus->sdsem);
2920 2656
2921 bus_wake(bus);
2922
2923 /* Make sure backplane clock is on */ 2657 /* Make sure backplane clock is on */
2924 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2658 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2925 2659
@@ -2967,9 +2701,15 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2967 } while (ret < 0 && retries++ < TXRETRIES); 2701 } while (ret < 0 && retries++ < TXRETRIES);
2968 } 2702 }
2969 2703
2970 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) { 2704 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2705 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2706 list_empty(&bus->dpc_tsklst)) {
2707 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2708
2971 bus->activity = false; 2709 bus->activity = false;
2972 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); 2710 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2711 } else {
2712 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2973 } 2713 }
2974 2714
2975 up(&bus->sdsem); 2715 up(&bus->sdsem);
@@ -3774,23 +3514,20 @@ void brcmf_sdbrcm_isr(void *arg)
3774 } 3514 }
3775 /* Count the interrupt call */ 3515 /* Count the interrupt call */
3776 bus->sdcnt.intrcount++; 3516 bus->sdcnt.intrcount++;
3777 bus->ipend = true; 3517 if (in_interrupt())
3778 3518 atomic_set(&bus->ipend, 1);
3779 /* Shouldn't get this interrupt if we're sleeping? */ 3519 else
3780 if (bus->sleeping) { 3520 if (brcmf_sdio_intr_rstatus(bus)) {
3781 brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n"); 3521 brcmf_dbg(ERROR, "failed backplane access\n");
3782 return; 3522 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3783 } 3523 }
3784 3524
3785 /* Disable additional interrupts (is this needed now)? */ 3525 /* Disable additional interrupts (is this needed now)? */
3786 if (!bus->intr) 3526 if (!bus->intr)
3787 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n"); 3527 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
3788 3528
3789 bus->dpc_sched = true; 3529 brcmf_sdbrcm_adddpctsk(bus);
3790 if (bus->dpc_tsk) { 3530 queue_work(bus->brcmf_wq, &bus->datawork);
3791 brcmf_sdbrcm_adddpctsk(bus);
3792 complete(&bus->dpc_wait);
3793 }
3794} 3531}
3795 3532
3796static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) 3533static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3798,13 +3535,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3798#ifdef DEBUG 3535#ifdef DEBUG
3799 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); 3536 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3800#endif /* DEBUG */ 3537#endif /* DEBUG */
3538 unsigned long flags;
3801 3539
3802 brcmf_dbg(TIMER, "Enter\n"); 3540 brcmf_dbg(TIMER, "Enter\n");
3803 3541
3804 /* Ignore the timer if simulating bus down */
3805 if (bus->sleeping)
3806 return false;
3807
3808 down(&bus->sdsem); 3542 down(&bus->sdsem);
3809 3543
3810 /* Poll period: check device if appropriate. */ 3544 /* Poll period: check device if appropriate. */
@@ -3818,27 +3552,30 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3818 if (!bus->intr || 3552 if (!bus->intr ||
3819 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { 3553 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3820 3554
3821 if (!bus->dpc_sched) { 3555 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3556 if (list_empty(&bus->dpc_tsklst)) {
3822 u8 devpend; 3557 u8 devpend;
3558 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3559 flags);
3823 devpend = brcmf_sdio_regrb(bus->sdiodev, 3560 devpend = brcmf_sdio_regrb(bus->sdiodev,
3824 SDIO_CCCR_INTx, 3561 SDIO_CCCR_INTx,
3825 NULL); 3562 NULL);
3826 intstatus = 3563 intstatus =
3827 devpend & (INTR_STATUS_FUNC1 | 3564 devpend & (INTR_STATUS_FUNC1 |
3828 INTR_STATUS_FUNC2); 3565 INTR_STATUS_FUNC2);
3566 } else {
3567 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3568 flags);
3829 } 3569 }
3830 3570
3831 /* If there is something, make like the ISR and 3571 /* If there is something, make like the ISR and
3832 schedule the DPC */ 3572 schedule the DPC */
3833 if (intstatus) { 3573 if (intstatus) {
3834 bus->sdcnt.pollcnt++; 3574 bus->sdcnt.pollcnt++;
3835 bus->ipend = true; 3575 atomic_set(&bus->ipend, 1);
3836 3576
3837 bus->dpc_sched = true; 3577 brcmf_sdbrcm_adddpctsk(bus);
3838 if (bus->dpc_tsk) { 3578 queue_work(bus->brcmf_wq, &bus->datawork);
3839 brcmf_sdbrcm_adddpctsk(bus);
3840 complete(&bus->dpc_wait);
3841 }
3842 } 3579 }
3843 } 3580 }
3844 3581
@@ -3876,11 +3613,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3876 3613
3877 up(&bus->sdsem); 3614 up(&bus->sdsem);
3878 3615
3879 return bus->ipend; 3616 return (atomic_read(&bus->ipend) > 0);
3880} 3617}
3881 3618
3882static bool brcmf_sdbrcm_chipmatch(u16 chipid) 3619static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3883{ 3620{
3621 if (chipid == BCM43241_CHIP_ID)
3622 return true;
3884 if (chipid == BCM4329_CHIP_ID) 3623 if (chipid == BCM4329_CHIP_ID)
3885 return true; 3624 return true;
3886 if (chipid == BCM4330_CHIP_ID) 3625 if (chipid == BCM4330_CHIP_ID)
@@ -3890,6 +3629,26 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3890 return false; 3629 return false;
3891} 3630}
3892 3631
3632static void brcmf_sdio_dataworker(struct work_struct *work)
3633{
3634 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3635 datawork);
3636 struct list_head *cur_hd, *tmp_hd;
3637 unsigned long flags;
3638
3639 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3640 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
3641 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3642
3643 brcmf_sdbrcm_dpc(bus);
3644
3645 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3646 list_del(cur_hd);
3647 kfree(cur_hd);
3648 }
3649 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3650}
3651
3893static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) 3652static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3894{ 3653{
3895 brcmf_dbg(TRACE, "Enter\n"); 3654 brcmf_dbg(TRACE, "Enter\n");
@@ -4022,7 +3781,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
4022 SDIO_FUNC_ENABLE_1, NULL); 3781 SDIO_FUNC_ENABLE_1, NULL);
4023 3782
4024 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 3783 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
4025 bus->sleeping = false;
4026 bus->rxflow = false; 3784 bus->rxflow = false;
4027 3785
4028 /* Done with backplane-dependent accesses, can drop clock... */ 3786 /* Done with backplane-dependent accesses, can drop clock... */
@@ -4103,6 +3861,9 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
4103 /* De-register interrupt handler */ 3861 /* De-register interrupt handler */
4104 brcmf_sdio_intr_unregister(bus->sdiodev); 3862 brcmf_sdio_intr_unregister(bus->sdiodev);
4105 3863
3864 cancel_work_sync(&bus->datawork);
3865 destroy_workqueue(bus->brcmf_wq);
3866
4106 if (bus->sdiodev->bus_if->drvr) { 3867 if (bus->sdiodev->bus_if->drvr) {
4107 brcmf_detach(bus->sdiodev->dev); 3868 brcmf_detach(bus->sdiodev->dev);
4108 brcmf_sdbrcm_release_dongle(bus); 3869 brcmf_sdbrcm_release_dongle(bus);
@@ -4142,8 +3903,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
4142 bus->rxbound = BRCMF_RXBOUND; 3903 bus->rxbound = BRCMF_RXBOUND;
4143 bus->txminmax = BRCMF_TXMINMAX; 3904 bus->txminmax = BRCMF_TXMINMAX;
4144 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; 3905 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
4145 bus->usebufpool = false; /* Use bufpool if allocated,
4146 else use locally malloced rxbuf */
4147 3906
4148 /* attempt to attach to the dongle */ 3907 /* attempt to attach to the dongle */
4149 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) { 3908 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
@@ -4155,6 +3914,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
4155 init_waitqueue_head(&bus->ctrl_wait); 3914 init_waitqueue_head(&bus->ctrl_wait);
4156 init_waitqueue_head(&bus->dcmd_resp_wait); 3915 init_waitqueue_head(&bus->dcmd_resp_wait);
4157 3916
3917 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3918 if (bus->brcmf_wq == NULL) {
3919 brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
3920 goto fail;
3921 }
3922 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3923
4158 /* Set up the watchdog timer */ 3924 /* Set up the watchdog timer */
4159 init_timer(&bus->timer); 3925 init_timer(&bus->timer);
4160 bus->timer.data = (unsigned long)bus; 3926 bus->timer.data = (unsigned long)bus;
@@ -4172,15 +3938,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
4172 bus->watchdog_tsk = NULL; 3938 bus->watchdog_tsk = NULL;
4173 } 3939 }
4174 /* Initialize DPC thread */ 3940 /* Initialize DPC thread */
4175 init_completion(&bus->dpc_wait);
4176 INIT_LIST_HEAD(&bus->dpc_tsklst); 3941 INIT_LIST_HEAD(&bus->dpc_tsklst);
4177 spin_lock_init(&bus->dpc_tl_lock); 3942 spin_lock_init(&bus->dpc_tl_lock);
4178 bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
4179 bus, "brcmf_dpc");
4180 if (IS_ERR(bus->dpc_tsk)) {
4181 pr_warn("brcmf_dpc thread failed to start\n");
4182 bus->dpc_tsk = NULL;
4183 }
4184 3943
4185 /* Assign bus interface call back */ 3944 /* Assign bus interface call back */
4186 bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop; 3945 bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 58155e23d220..9434440bbc65 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -377,6 +377,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
377 377
378 /* Address of cores for new chips should be added here */ 378 /* Address of cores for new chips should be added here */
379 switch (ci->chip) { 379 switch (ci->chip) {
380 case BCM43241_CHIP_ID:
381 ci->c_inf[0].wrapbase = 0x18100000;
382 ci->c_inf[0].cib = 0x2a084411;
383 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
384 ci->c_inf[1].base = 0x18002000;
385 ci->c_inf[1].wrapbase = 0x18102000;
386 ci->c_inf[1].cib = 0x0e004211;
387 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
388 ci->c_inf[2].base = 0x18004000;
389 ci->c_inf[2].wrapbase = 0x18104000;
390 ci->c_inf[2].cib = 0x14080401;
391 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
392 ci->c_inf[3].base = 0x18003000;
393 ci->c_inf[3].wrapbase = 0x18103000;
394 ci->c_inf[3].cib = 0x07004211;
395 ci->ramsize = 0x90000;
396 break;
380 case BCM4329_CHIP_ID: 397 case BCM4329_CHIP_ID:
381 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV; 398 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
382 ci->c_inf[1].base = BCM4329_CORE_BUS_BASE; 399 ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 29bf78d264e0..0d30afd8c672 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -174,6 +174,8 @@ extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
174 u8 data, int *ret); 174 u8 data, int *ret);
175extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, 175extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
176 u32 data, int *ret); 176 u32 data, int *ret);
177extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
178 void *data, bool write);
177 179
178/* Buffer transfer to/from device (client) core via cmd53. 180/* Buffer transfer to/from device (client) core via cmd53.
179 * fn: function number 181 * fn: function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 58f89fa9c9f8..a2b4b1e71017 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -66,7 +66,9 @@
66#define BRCMF_USB_CBCTL_READ 1 66#define BRCMF_USB_CBCTL_READ 1
67#define BRCMF_USB_MAX_PKT_SIZE 1600 67#define BRCMF_USB_MAX_PKT_SIZE 1600
68 68
69#define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin"
69#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin" 70#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
71#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
70 72
71enum usbdev_suspend_state { 73enum usbdev_suspend_state {
72 USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow 74 USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
@@ -78,25 +80,13 @@ enum usbdev_suspend_state {
78 USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */ 80 USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
79}; 81};
80 82
81struct brcmf_usb_probe_info {
82 void *usbdev_info;
83 struct usb_device *usb; /* USB device pointer from OS */
84 uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
85 int intr_size; /* Size of interrupt message */
86 int interval; /* Interrupt polling interval */
87 int vid;
88 int pid;
89 enum usb_device_speed device_speed;
90 enum usbdev_suspend_state suspend_state;
91 struct usb_interface *intf;
92};
93static struct brcmf_usb_probe_info usbdev_probe_info;
94
95struct brcmf_usb_image { 83struct brcmf_usb_image {
96 void *data; 84 struct list_head list;
97 u32 len; 85 s8 *fwname;
86 u8 *image;
87 int image_len;
98}; 88};
99static struct brcmf_usb_image g_image = { NULL, 0 }; 89static struct list_head fw_image_list;
100 90
101struct intr_transfer_buf { 91struct intr_transfer_buf {
102 u32 notification; 92 u32 notification;
@@ -117,9 +107,8 @@ struct brcmf_usbdev_info {
117 int rx_low_watermark; 107 int rx_low_watermark;
118 int tx_low_watermark; 108 int tx_low_watermark;
119 int tx_high_watermark; 109 int tx_high_watermark;
120 bool txoff; 110 int tx_freecount;
121 bool rxoff; 111 bool tx_flowblock;
122 bool txoverride;
123 112
124 struct brcmf_usbreq *tx_reqs; 113 struct brcmf_usbreq *tx_reqs;
125 struct brcmf_usbreq *rx_reqs; 114 struct brcmf_usbreq *rx_reqs;
@@ -133,7 +122,6 @@ struct brcmf_usbdev_info {
133 122
134 struct usb_device *usbdev; 123 struct usb_device *usbdev;
135 struct device *dev; 124 struct device *dev;
136 enum usb_device_speed device_speed;
137 125
138 int ctl_in_pipe, ctl_out_pipe; 126 int ctl_in_pipe, ctl_out_pipe;
139 struct urb *ctl_urb; /* URB for control endpoint */ 127 struct urb *ctl_urb; /* URB for control endpoint */
@@ -146,16 +134,11 @@ struct brcmf_usbdev_info {
146 wait_queue_head_t ctrl_wait; 134 wait_queue_head_t ctrl_wait;
147 ulong ctl_op; 135 ulong ctl_op;
148 136
149 bool rxctl_deferrespok;
150
151 struct urb *bulk_urb; /* used for FW download */ 137 struct urb *bulk_urb; /* used for FW download */
152 struct urb *intr_urb; /* URB for interrupt endpoint */ 138 struct urb *intr_urb; /* URB for interrupt endpoint */
153 int intr_size; /* Size of interrupt message */ 139 int intr_size; /* Size of interrupt message */
154 int interval; /* Interrupt polling interval */ 140 int interval; /* Interrupt polling interval */
155 struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */ 141 struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
156
157 struct brcmf_usb_probe_info probe_info;
158
159}; 142};
160 143
161static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 144static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -177,48 +160,17 @@ static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
177 return brcmf_usb_get_buspub(dev)->devinfo; 160 return brcmf_usb_get_buspub(dev)->devinfo;
178} 161}
179 162
180#if 0 163static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
181static void
182brcmf_usb_txflowcontrol(struct brcmf_usbdev_info *devinfo, bool onoff)
183{ 164{
184 dhd_txflowcontrol(devinfo->bus_pub.netdev, 0, onoff); 165 return wait_event_timeout(devinfo->ioctl_resp_wait,
166 devinfo->ctl_completed,
167 msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
185} 168}
186#endif
187 169
188static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo, 170static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
189 uint *condition, bool *pending)
190{
191 DECLARE_WAITQUEUE(wait, current);
192 int timeout = IOCTL_RESP_TIMEOUT;
193
194 /* Convert timeout in millsecond to jiffies */
195 timeout = msecs_to_jiffies(timeout);
196 /* Wait until control frame is available */
197 add_wait_queue(&devinfo->ioctl_resp_wait, &wait);
198 set_current_state(TASK_INTERRUPTIBLE);
199
200 smp_mb();
201 while (!(*condition) && (!signal_pending(current) && timeout)) {
202 timeout = schedule_timeout(timeout);
203 /* Wait until control frame is available */
204 smp_mb();
205 }
206
207 if (signal_pending(current))
208 *pending = true;
209
210 set_current_state(TASK_RUNNING);
211 remove_wait_queue(&devinfo->ioctl_resp_wait, &wait);
212
213 return timeout;
214}
215
216static int brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
217{ 171{
218 if (waitqueue_active(&devinfo->ioctl_resp_wait)) 172 if (waitqueue_active(&devinfo->ioctl_resp_wait))
219 wake_up_interruptible(&devinfo->ioctl_resp_wait); 173 wake_up(&devinfo->ioctl_resp_wait);
220
221 return 0;
222} 174}
223 175
224static void 176static void
@@ -324,17 +276,9 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
324 devinfo->ctl_read.wLength = cpu_to_le16p(&size); 276 devinfo->ctl_read.wLength = cpu_to_le16p(&size);
325 devinfo->ctl_urb->transfer_buffer_length = size; 277 devinfo->ctl_urb->transfer_buffer_length = size;
326 278
327 if (devinfo->rxctl_deferrespok) { 279 devinfo->ctl_read.bRequestType = USB_DIR_IN
328 /* BMAC model */ 280 | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
329 devinfo->ctl_read.bRequestType = USB_DIR_IN 281 devinfo->ctl_read.bRequest = 1;
330 | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
331 devinfo->ctl_read.bRequest = DL_DEFER_RESP_OK;
332 } else {
333 /* full dongle model */
334 devinfo->ctl_read.bRequestType = USB_DIR_IN
335 | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
336 devinfo->ctl_read.bRequest = 1;
337 }
338 282
339 usb_fill_control_urb(devinfo->ctl_urb, 283 usb_fill_control_urb(devinfo->ctl_urb,
340 devinfo->usbdev, 284 devinfo->usbdev,
@@ -355,7 +299,6 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
355{ 299{
356 int err = 0; 300 int err = 0;
357 int timeout = 0; 301 int timeout = 0;
358 bool pending;
359 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 302 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
360 303
361 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 304 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -366,15 +309,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
366 if (test_and_set_bit(0, &devinfo->ctl_op)) 309 if (test_and_set_bit(0, &devinfo->ctl_op))
367 return -EIO; 310 return -EIO;
368 311
312 devinfo->ctl_completed = false;
369 err = brcmf_usb_send_ctl(devinfo, buf, len); 313 err = brcmf_usb_send_ctl(devinfo, buf, len);
370 if (err) { 314 if (err) {
371 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len); 315 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
316 clear_bit(0, &devinfo->ctl_op);
372 return err; 317 return err;
373 } 318 }
374 319 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
375 devinfo->ctl_completed = false;
376 timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
377 &pending);
378 clear_bit(0, &devinfo->ctl_op); 320 clear_bit(0, &devinfo->ctl_op);
379 if (!timeout) { 321 if (!timeout) {
380 brcmf_dbg(ERROR, "Txctl wait timed out\n"); 322 brcmf_dbg(ERROR, "Txctl wait timed out\n");
@@ -387,7 +329,6 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
387{ 329{
388 int err = 0; 330 int err = 0;
389 int timeout = 0; 331 int timeout = 0;
390 bool pending;
391 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 332 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
392 333
393 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 334 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -397,14 +338,14 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
397 if (test_and_set_bit(0, &devinfo->ctl_op)) 338 if (test_and_set_bit(0, &devinfo->ctl_op))
398 return -EIO; 339 return -EIO;
399 340
341 devinfo->ctl_completed = false;
400 err = brcmf_usb_recv_ctl(devinfo, buf, len); 342 err = brcmf_usb_recv_ctl(devinfo, buf, len);
401 if (err) { 343 if (err) {
402 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len); 344 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
345 clear_bit(0, &devinfo->ctl_op);
403 return err; 346 return err;
404 } 347 }
405 devinfo->ctl_completed = false; 348 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
406 timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
407 &pending);
408 err = devinfo->ctl_urb_status; 349 err = devinfo->ctl_urb_status;
409 clear_bit(0, &devinfo->ctl_op); 350 clear_bit(0, &devinfo->ctl_op);
410 if (!timeout) { 351 if (!timeout) {
@@ -418,7 +359,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
418} 359}
419 360
420static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo, 361static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
421 struct list_head *q) 362 struct list_head *q, int *counter)
422{ 363{
423 unsigned long flags; 364 unsigned long flags;
424 struct brcmf_usbreq *req; 365 struct brcmf_usbreq *req;
@@ -429,17 +370,22 @@ static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
429 } 370 }
430 req = list_entry(q->next, struct brcmf_usbreq, list); 371 req = list_entry(q->next, struct brcmf_usbreq, list);
431 list_del_init(q->next); 372 list_del_init(q->next);
373 if (counter)
374 (*counter)--;
432 spin_unlock_irqrestore(&devinfo->qlock, flags); 375 spin_unlock_irqrestore(&devinfo->qlock, flags);
433 return req; 376 return req;
434 377
435} 378}
436 379
437static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo, 380static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
438 struct list_head *q, struct brcmf_usbreq *req) 381 struct list_head *q, struct brcmf_usbreq *req,
382 int *counter)
439{ 383{
440 unsigned long flags; 384 unsigned long flags;
441 spin_lock_irqsave(&devinfo->qlock, flags); 385 spin_lock_irqsave(&devinfo->qlock, flags);
442 list_add_tail(&req->list, q); 386 list_add_tail(&req->list, q);
387 if (counter)
388 (*counter)++;
443 spin_unlock_irqrestore(&devinfo->qlock, flags); 389 spin_unlock_irqrestore(&devinfo->qlock, flags);
444} 390}
445 391
@@ -519,10 +465,16 @@ static void brcmf_usb_tx_complete(struct urb *urb)
519 else 465 else
520 devinfo->bus_pub.bus->dstats.tx_errors++; 466 devinfo->bus_pub.bus->dstats.tx_errors++;
521 467
468 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
469
522 brcmu_pkt_buf_free_skb(req->skb); 470 brcmu_pkt_buf_free_skb(req->skb);
523 req->skb = NULL; 471 req->skb = NULL;
524 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); 472 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
525 473 if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
474 devinfo->tx_flowblock) {
475 brcmf_txflowblock(devinfo->dev, false);
476 devinfo->tx_flowblock = false;
477 }
526} 478}
527 479
528static void brcmf_usb_rx_complete(struct urb *urb) 480static void brcmf_usb_rx_complete(struct urb *urb)
@@ -541,7 +493,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
541 } else { 493 } else {
542 devinfo->bus_pub.bus->dstats.rx_errors++; 494 devinfo->bus_pub.bus->dstats.rx_errors++;
543 brcmu_pkt_buf_free_skb(skb); 495 brcmu_pkt_buf_free_skb(skb);
544 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 496 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
545 return; 497 return;
546 } 498 }
547 499
@@ -550,15 +502,13 @@ static void brcmf_usb_rx_complete(struct urb *urb)
550 if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) { 502 if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
551 brcmf_dbg(ERROR, "rx protocol error\n"); 503 brcmf_dbg(ERROR, "rx protocol error\n");
552 brcmu_pkt_buf_free_skb(skb); 504 brcmu_pkt_buf_free_skb(skb);
553 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
554 devinfo->bus_pub.bus->dstats.rx_errors++; 505 devinfo->bus_pub.bus->dstats.rx_errors++;
555 } else { 506 } else
556 brcmf_rx_packet(devinfo->dev, ifidx, skb); 507 brcmf_rx_packet(devinfo->dev, ifidx, skb);
557 brcmf_usb_rx_refill(devinfo, req); 508 brcmf_usb_rx_refill(devinfo, req);
558 }
559 } else { 509 } else {
560 brcmu_pkt_buf_free_skb(skb); 510 brcmu_pkt_buf_free_skb(skb);
561 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 511 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
562 } 512 }
563 return; 513 return;
564 514
@@ -575,7 +525,7 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
575 525
576 skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu); 526 skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
577 if (!skb) { 527 if (!skb) {
578 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 528 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
579 return; 529 return;
580 } 530 }
581 req->skb = skb; 531 req->skb = skb;
@@ -584,14 +534,14 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
584 skb->data, skb_tailroom(skb), brcmf_usb_rx_complete, 534 skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
585 req); 535 req);
586 req->devinfo = devinfo; 536 req->devinfo = devinfo;
587 brcmf_usb_enq(devinfo, &devinfo->rx_postq, req); 537 brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
588 538
589 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 539 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
590 if (ret) { 540 if (ret) {
591 brcmf_usb_del_fromq(devinfo, req); 541 brcmf_usb_del_fromq(devinfo, req);
592 brcmu_pkt_buf_free_skb(req->skb); 542 brcmu_pkt_buf_free_skb(req->skb);
593 req->skb = NULL; 543 req->skb = NULL;
594 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 544 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
595 } 545 }
596 return; 546 return;
597} 547}
@@ -604,7 +554,7 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
604 brcmf_dbg(ERROR, "bus is not up\n"); 554 brcmf_dbg(ERROR, "bus is not up\n");
605 return; 555 return;
606 } 556 }
607 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq)) != NULL) 557 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
608 brcmf_usb_rx_refill(devinfo, req); 558 brcmf_usb_rx_refill(devinfo, req);
609} 559}
610 560
@@ -682,7 +632,8 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
682 return -EIO; 632 return -EIO;
683 } 633 }
684 634
685 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq); 635 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
636 &devinfo->tx_freecount);
686 if (!req) { 637 if (!req) {
687 brcmu_pkt_buf_free_skb(skb); 638 brcmu_pkt_buf_free_skb(skb);
688 brcmf_dbg(ERROR, "no req to send\n"); 639 brcmf_dbg(ERROR, "no req to send\n");
@@ -694,14 +645,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
694 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe, 645 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
695 skb->data, skb->len, brcmf_usb_tx_complete, req); 646 skb->data, skb->len, brcmf_usb_tx_complete, req);
696 req->urb->transfer_flags |= URB_ZERO_PACKET; 647 req->urb->transfer_flags |= URB_ZERO_PACKET;
697 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req); 648 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
698 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 649 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
699 if (ret) { 650 if (ret) {
700 brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n"); 651 brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
701 brcmf_usb_del_fromq(devinfo, req); 652 brcmf_usb_del_fromq(devinfo, req);
702 brcmu_pkt_buf_free_skb(req->skb); 653 brcmu_pkt_buf_free_skb(req->skb);
703 req->skb = NULL; 654 req->skb = NULL;
704 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); 655 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
656 &devinfo->tx_freecount);
657 } else {
658 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
659 !devinfo->tx_flowblock) {
660 brcmf_txflowblock(dev, true);
661 devinfo->tx_flowblock = true;
662 }
705 } 663 }
706 664
707 return ret; 665 return ret;
@@ -1112,10 +1070,14 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
1112static bool brcmf_usb_chip_support(int chipid, int chiprev) 1070static bool brcmf_usb_chip_support(int chipid, int chiprev)
1113{ 1071{
1114 switch(chipid) { 1072 switch(chipid) {
1073 case 43143:
1074 return true;
1115 case 43235: 1075 case 43235:
1116 case 43236: 1076 case 43236:
1117 case 43238: 1077 case 43238:
1118 return (chiprev == 3); 1078 return (chiprev == 3);
1079 case 43242:
1080 return true;
1119 default: 1081 default:
1120 break; 1082 break;
1121 } 1083 }
@@ -1154,17 +1116,10 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1154} 1116}
1155 1117
1156 1118
1157static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub) 1119static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
1158{ 1120{
1159 struct brcmf_usbdev_info *devinfo =
1160 (struct brcmf_usbdev_info *)bus_pub;
1161
1162 brcmf_dbg(TRACE, "devinfo %p\n", devinfo); 1121 brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
1163 1122
1164 /* store the image globally */
1165 g_image.data = devinfo->image;
1166 g_image.len = devinfo->image_len;
1167
1168 /* free the URBS */ 1123 /* free the URBS */
1169 brcmf_usb_free_q(&devinfo->rx_freeq, false); 1124 brcmf_usb_free_q(&devinfo->rx_freeq, false);
1170 brcmf_usb_free_q(&devinfo->tx_freeq, false); 1125 brcmf_usb_free_q(&devinfo->tx_freeq, false);
@@ -1175,7 +1130,6 @@ static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
1175 1130
1176 kfree(devinfo->tx_reqs); 1131 kfree(devinfo->tx_reqs);
1177 kfree(devinfo->rx_reqs); 1132 kfree(devinfo->rx_reqs);
1178 kfree(devinfo);
1179} 1133}
1180 1134
1181#define TRX_MAGIC 0x30524448 /* "HDR0" */ 1135#define TRX_MAGIC 0x30524448 /* "HDR0" */
@@ -1217,19 +1171,34 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1217{ 1171{
1218 s8 *fwname; 1172 s8 *fwname;
1219 const struct firmware *fw; 1173 const struct firmware *fw;
1174 struct brcmf_usb_image *fw_image;
1220 int err; 1175 int err;
1221 1176
1222 devinfo->image = g_image.data; 1177 switch (devinfo->bus_pub.devid) {
1223 devinfo->image_len = g_image.len; 1178 case 43143:
1224 1179 fwname = BRCMF_USB_43143_FW_NAME;
1225 /* 1180 break;
1226 * if we have an image we can leave here. 1181 case 43235:
1227 */ 1182 case 43236:
1228 if (devinfo->image) 1183 case 43238:
1229 return 0; 1184 fwname = BRCMF_USB_43236_FW_NAME;
1230 1185 break;
1231 fwname = BRCMF_USB_43236_FW_NAME; 1186 case 43242:
1187 fwname = BRCMF_USB_43242_FW_NAME;
1188 break;
1189 default:
1190 return -EINVAL;
1191 break;
1192 }
1232 1193
1194 list_for_each_entry(fw_image, &fw_image_list, list) {
1195 if (fw_image->fwname == fwname) {
1196 devinfo->image = fw_image->image;
1197 devinfo->image_len = fw_image->image_len;
1198 return 0;
1199 }
1200 }
1201 /* fw image not yet loaded. Load it now and add to list */
1233 err = request_firmware(&fw, fwname, devinfo->dev); 1202 err = request_firmware(&fw, fwname, devinfo->dev);
1234 if (!fw) { 1203 if (!fw) {
1235 brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname); 1204 brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
@@ -1240,27 +1209,32 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1240 return -EINVAL; 1209 return -EINVAL;
1241 } 1210 }
1242 1211
1243 devinfo->image = vmalloc(fw->size); /* plus nvram */ 1212 fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
1244 if (!devinfo->image) 1213 if (!fw_image)
1214 return -ENOMEM;
1215 INIT_LIST_HEAD(&fw_image->list);
1216 list_add_tail(&fw_image->list, &fw_image_list);
1217 fw_image->fwname = fwname;
1218 fw_image->image = vmalloc(fw->size);
1219 if (!fw_image->image)
1245 return -ENOMEM; 1220 return -ENOMEM;
1246 1221
1247 memcpy(devinfo->image, fw->data, fw->size); 1222 memcpy(fw_image->image, fw->data, fw->size);
1248 devinfo->image_len = fw->size; 1223 fw_image->image_len = fw->size;
1249 1224
1250 release_firmware(fw); 1225 release_firmware(fw);
1226
1227 devinfo->image = fw_image->image;
1228 devinfo->image_len = fw_image->image_len;
1229
1251 return 0; 1230 return 0;
1252} 1231}
1253 1232
1254 1233
1255static 1234static
1256struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev) 1235struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1236 int nrxq, int ntxq)
1257{ 1237{
1258 struct brcmf_usbdev_info *devinfo;
1259
1260 devinfo = kzalloc(sizeof(struct brcmf_usbdev_info), GFP_ATOMIC);
1261 if (devinfo == NULL)
1262 return NULL;
1263
1264 devinfo->bus_pub.nrxq = nrxq; 1238 devinfo->bus_pub.nrxq = nrxq;
1265 devinfo->rx_low_watermark = nrxq / 2; 1239 devinfo->rx_low_watermark = nrxq / 2;
1266 devinfo->bus_pub.devinfo = devinfo; 1240 devinfo->bus_pub.devinfo = devinfo;
@@ -1269,18 +1243,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1269 /* flow control when too many tx urbs posted */ 1243 /* flow control when too many tx urbs posted */
1270 devinfo->tx_low_watermark = ntxq / 4; 1244 devinfo->tx_low_watermark = ntxq / 4;
1271 devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3; 1245 devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
1272 devinfo->dev = dev;
1273 devinfo->usbdev = usbdev_probe_info.usb;
1274 devinfo->tx_pipe = usbdev_probe_info.tx_pipe;
1275 devinfo->rx_pipe = usbdev_probe_info.rx_pipe;
1276 devinfo->rx_pipe2 = usbdev_probe_info.rx_pipe2;
1277 devinfo->intr_pipe = usbdev_probe_info.intr_pipe;
1278
1279 devinfo->interval = usbdev_probe_info.interval;
1280 devinfo->intr_size = usbdev_probe_info.intr_size;
1281
1282 memcpy(&devinfo->probe_info, &usbdev_probe_info,
1283 sizeof(struct brcmf_usb_probe_info));
1284 devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE; 1246 devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
1285 1247
1286 /* Initialize other structure content */ 1248 /* Initialize other structure content */
@@ -1295,6 +1257,8 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1295 INIT_LIST_HEAD(&devinfo->tx_freeq); 1257 INIT_LIST_HEAD(&devinfo->tx_freeq);
1296 INIT_LIST_HEAD(&devinfo->tx_postq); 1258 INIT_LIST_HEAD(&devinfo->tx_postq);
1297 1259
1260 devinfo->tx_flowblock = false;
1261
1298 devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq); 1262 devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
1299 if (!devinfo->rx_reqs) 1263 if (!devinfo->rx_reqs)
1300 goto error; 1264 goto error;
@@ -1302,6 +1266,7 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1302 devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq); 1266 devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
1303 if (!devinfo->tx_reqs) 1267 if (!devinfo->tx_reqs)
1304 goto error; 1268 goto error;
1269 devinfo->tx_freecount = ntxq;
1305 1270
1306 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC); 1271 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
1307 if (!devinfo->intr_urb) { 1272 if (!devinfo->intr_urb) {
@@ -1313,8 +1278,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1313 brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n"); 1278 brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
1314 goto error; 1279 goto error;
1315 } 1280 }
1316 devinfo->rxctl_deferrespok = 0;
1317
1318 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC); 1281 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
1319 if (!devinfo->bulk_urb) { 1282 if (!devinfo->bulk_urb) {
1320 brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n"); 1283 brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
@@ -1336,23 +1299,21 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1336 1299
1337error: 1300error:
1338 brcmf_dbg(ERROR, "failed!\n"); 1301 brcmf_dbg(ERROR, "failed!\n");
1339 brcmf_usb_detach(&devinfo->bus_pub); 1302 brcmf_usb_detach(devinfo);
1340 return NULL; 1303 return NULL;
1341} 1304}
1342 1305
1343static int brcmf_usb_probe_cb(struct device *dev, const char *desc, 1306static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
1344 u32 bustype, u32 hdrlen) 1307 const char *desc, u32 bustype, u32 hdrlen)
1345{ 1308{
1346 struct brcmf_bus *bus = NULL; 1309 struct brcmf_bus *bus = NULL;
1347 struct brcmf_usbdev *bus_pub = NULL; 1310 struct brcmf_usbdev *bus_pub = NULL;
1348 int ret; 1311 int ret;
1312 struct device *dev = devinfo->dev;
1349 1313
1350 1314 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
1351 bus_pub = brcmf_usb_attach(BRCMF_USB_NRXQ, BRCMF_USB_NTXQ, dev); 1315 if (!bus_pub)
1352 if (!bus_pub) { 1316 return -ENODEV;
1353 ret = -ENODEV;
1354 goto fail;
1355 }
1356 1317
1357 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); 1318 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
1358 if (!bus) { 1319 if (!bus) {
@@ -1387,23 +1348,21 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
1387 return 0; 1348 return 0;
1388fail: 1349fail:
1389 /* Release resources in reverse order */ 1350 /* Release resources in reverse order */
1390 if (bus_pub)
1391 brcmf_usb_detach(bus_pub);
1392 kfree(bus); 1351 kfree(bus);
1352 brcmf_usb_detach(devinfo);
1393 return ret; 1353 return ret;
1394} 1354}
1395 1355
1396static void 1356static void
1397brcmf_usb_disconnect_cb(struct brcmf_usbdev *bus_pub) 1357brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
1398{ 1358{
1399 if (!bus_pub) 1359 if (!devinfo)
1400 return; 1360 return;
1401 brcmf_dbg(TRACE, "enter: bus_pub %p\n", bus_pub); 1361 brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
1402
1403 brcmf_detach(bus_pub->devinfo->dev);
1404 kfree(bus_pub->bus);
1405 brcmf_usb_detach(bus_pub);
1406 1362
1363 brcmf_detach(devinfo->dev);
1364 kfree(devinfo->bus_pub.bus);
1365 brcmf_usb_detach(devinfo);
1407} 1366}
1408 1367
1409static int 1368static int
@@ -1415,18 +1374,18 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1415 struct usb_device *usb = interface_to_usbdev(intf); 1374 struct usb_device *usb = interface_to_usbdev(intf);
1416 int num_of_eps; 1375 int num_of_eps;
1417 u8 endpoint_num; 1376 u8 endpoint_num;
1377 struct brcmf_usbdev_info *devinfo;
1418 1378
1419 brcmf_dbg(TRACE, "enter\n"); 1379 brcmf_dbg(TRACE, "enter\n");
1420 1380
1421 usbdev_probe_info.usb = usb; 1381 devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
1422 usbdev_probe_info.intf = intf; 1382 if (devinfo == NULL)
1383 return -ENOMEM;
1423 1384
1424 if (id != NULL) { 1385 devinfo->usbdev = usb;
1425 usbdev_probe_info.vid = id->idVendor; 1386 devinfo->dev = &usb->dev;
1426 usbdev_probe_info.pid = id->idProduct;
1427 }
1428 1387
1429 usb_set_intfdata(intf, &usbdev_probe_info); 1388 usb_set_intfdata(intf, devinfo);
1430 1389
1431 /* Check that the device supports only one configuration */ 1390 /* Check that the device supports only one configuration */
1432 if (usb->descriptor.bNumConfigurations != 1) { 1391 if (usb->descriptor.bNumConfigurations != 1) {
@@ -1475,11 +1434,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1475 } 1434 }
1476 1435
1477 endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 1436 endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1478 usbdev_probe_info.intr_pipe = usb_rcvintpipe(usb, endpoint_num); 1437 devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
1479 1438
1480 usbdev_probe_info.rx_pipe = 0; 1439 devinfo->rx_pipe = 0;
1481 usbdev_probe_info.rx_pipe2 = 0; 1440 devinfo->rx_pipe2 = 0;
1482 usbdev_probe_info.tx_pipe = 0; 1441 devinfo->tx_pipe = 0;
1483 num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1; 1442 num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
1484 1443
1485 /* Check data endpoints and get pipes */ 1444 /* Check data endpoints and get pipes */
@@ -1496,35 +1455,33 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1496 USB_ENDPOINT_NUMBER_MASK; 1455 USB_ENDPOINT_NUMBER_MASK;
1497 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) 1456 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
1498 == USB_DIR_IN) { 1457 == USB_DIR_IN) {
1499 if (!usbdev_probe_info.rx_pipe) { 1458 if (!devinfo->rx_pipe) {
1500 usbdev_probe_info.rx_pipe = 1459 devinfo->rx_pipe =
1501 usb_rcvbulkpipe(usb, endpoint_num); 1460 usb_rcvbulkpipe(usb, endpoint_num);
1502 } else { 1461 } else {
1503 usbdev_probe_info.rx_pipe2 = 1462 devinfo->rx_pipe2 =
1504 usb_rcvbulkpipe(usb, endpoint_num); 1463 usb_rcvbulkpipe(usb, endpoint_num);
1505 } 1464 }
1506 } else { 1465 } else {
1507 usbdev_probe_info.tx_pipe = 1466 devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
1508 usb_sndbulkpipe(usb, endpoint_num);
1509 } 1467 }
1510 } 1468 }
1511 1469
1512 /* Allocate interrupt URB and data buffer */ 1470 /* Allocate interrupt URB and data buffer */
1513 /* RNDIS says 8-byte intr, our old drivers used 4-byte */ 1471 /* RNDIS says 8-byte intr, our old drivers used 4-byte */
1514 if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16)) 1472 if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
1515 usbdev_probe_info.intr_size = 8; 1473 devinfo->intr_size = 8;
1516 else 1474 else
1517 usbdev_probe_info.intr_size = 4; 1475 devinfo->intr_size = 4;
1518 1476
1519 usbdev_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval; 1477 devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
1520 1478
1521 usbdev_probe_info.device_speed = usb->speed;
1522 if (usb->speed == USB_SPEED_HIGH) 1479 if (usb->speed == USB_SPEED_HIGH)
1523 brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n"); 1480 brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
1524 else 1481 else
1525 brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n"); 1482 brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
1526 1483
1527 ret = brcmf_usb_probe_cb(&usb->dev, "", USB_BUS, 0); 1484 ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
1528 if (ret) 1485 if (ret)
1529 goto fail; 1486 goto fail;
1530 1487
@@ -1533,6 +1490,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1533 1490
1534fail: 1491fail:
1535 brcmf_dbg(ERROR, "failed with errno %d\n", ret); 1492 brcmf_dbg(ERROR, "failed with errno %d\n", ret);
1493 kfree(devinfo);
1536 usb_set_intfdata(intf, NULL); 1494 usb_set_intfdata(intf, NULL);
1537 return ret; 1495 return ret;
1538 1496
@@ -1541,11 +1499,12 @@ fail:
1541static void 1499static void
1542brcmf_usb_disconnect(struct usb_interface *intf) 1500brcmf_usb_disconnect(struct usb_interface *intf)
1543{ 1501{
1544 struct usb_device *usb = interface_to_usbdev(intf); 1502 struct brcmf_usbdev_info *devinfo;
1545 1503
1546 brcmf_dbg(TRACE, "enter\n"); 1504 brcmf_dbg(TRACE, "enter\n");
1547 brcmf_usb_disconnect_cb(brcmf_usb_get_buspub(&usb->dev)); 1505 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
1548 usb_set_intfdata(intf, NULL); 1506 brcmf_usb_disconnect_cb(devinfo);
1507 kfree(devinfo);
1549} 1508}
1550 1509
1551/* 1510/*
@@ -1577,17 +1536,23 @@ static int brcmf_usb_resume(struct usb_interface *intf)
1577} 1536}
1578 1537
1579#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c 1538#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
1539#define BRCMF_USB_DEVICE_ID_43143 0xbd1e
1580#define BRCMF_USB_DEVICE_ID_43236 0xbd17 1540#define BRCMF_USB_DEVICE_ID_43236 0xbd17
1541#define BRCMF_USB_DEVICE_ID_43242 0xbd1f
1581#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc 1542#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc
1582 1543
1583static struct usb_device_id brcmf_usb_devid_table[] = { 1544static struct usb_device_id brcmf_usb_devid_table[] = {
1545 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
1584 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) }, 1546 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
1547 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
1585 /* special entry for device with firmware loaded and running */ 1548 /* special entry for device with firmware loaded and running */
1586 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) }, 1549 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
1587 { } 1550 { }
1588}; 1551};
1589MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table); 1552MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
1553MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
1590MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); 1554MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
1555MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
1591 1556
1592/* TODO: suspend and resume entries */ 1557/* TODO: suspend and resume entries */
1593static struct usb_driver brcmf_usbdrvr = { 1558static struct usb_driver brcmf_usbdrvr = {
@@ -1601,15 +1566,25 @@ static struct usb_driver brcmf_usbdrvr = {
1601 .disable_hub_initiated_lpm = 1, 1566 .disable_hub_initiated_lpm = 1,
1602}; 1567};
1603 1568
1569static void brcmf_release_fw(struct list_head *q)
1570{
1571 struct brcmf_usb_image *fw_image, *next;
1572
1573 list_for_each_entry_safe(fw_image, next, q, list) {
1574 vfree(fw_image->image);
1575 list_del_init(&fw_image->list);
1576 }
1577}
1578
1579
1604void brcmf_usb_exit(void) 1580void brcmf_usb_exit(void)
1605{ 1581{
1606 usb_deregister(&brcmf_usbdrvr); 1582 usb_deregister(&brcmf_usbdrvr);
1607 vfree(g_image.data); 1583 brcmf_release_fw(&fw_image_list);
1608 g_image.data = NULL;
1609 g_image.len = 0;
1610} 1584}
1611 1585
1612void brcmf_usb_init(void) 1586void brcmf_usb_init(void)
1613{ 1587{
1588 INIT_LIST_HEAD(&fw_image_list);
1614 usb_register(&brcmf_usbdrvr); 1589 usb_register(&brcmf_usbdrvr);
1615} 1590}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 50b5553b6964..c1abaa6db59e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -28,6 +28,7 @@
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <net/cfg80211.h> 30#include <net/cfg80211.h>
31#include <net/netlink.h>
31 32
32#include <brcmu_utils.h> 33#include <brcmu_utils.h>
33#include <defs.h> 34#include <defs.h>
@@ -35,6 +36,58 @@
35#include "dhd.h" 36#include "dhd.h"
36#include "wl_cfg80211.h" 37#include "wl_cfg80211.h"
37 38
39#define BRCMF_SCAN_IE_LEN_MAX 2048
40#define BRCMF_PNO_VERSION 2
41#define BRCMF_PNO_TIME 30
42#define BRCMF_PNO_REPEAT 4
43#define BRCMF_PNO_FREQ_EXPO_MAX 3
44#define BRCMF_PNO_MAX_PFN_COUNT 16
45#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
46#define BRCMF_PNO_HIDDEN_BIT 2
47#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
48#define BRCMF_PNO_SCAN_COMPLETE 1
49#define BRCMF_PNO_SCAN_INCOMPLETE 0
50
51#define TLV_LEN_OFF 1 /* length offset */
52#define TLV_HDR_LEN 2 /* header length */
53#define TLV_BODY_OFF 2 /* body offset */
54#define TLV_OUI_LEN 3 /* oui id length */
55#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
56#define WPA_OUI_TYPE 1
57#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */
58#define WME_OUI_TYPE 2
59
60#define VS_IE_FIXED_HDR_LEN 6
61#define WPA_IE_VERSION_LEN 2
62#define WPA_IE_MIN_OUI_LEN 4
63#define WPA_IE_SUITE_COUNT_LEN 2
64
65#define WPA_CIPHER_NONE 0 /* None */
66#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */
67#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */
68#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */
69#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
70
71#define RSN_AKM_NONE 0 /* None (IBSS) */
72#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
73#define RSN_AKM_PSK 2 /* Pre-shared Key */
74#define RSN_CAP_LEN 2 /* Length of RSN capabilities */
75#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
76
77#define VNDR_IE_CMD_LEN 4 /* length of the set command
78 * string :"add", "del" (+ NUL)
79 */
80#define VNDR_IE_COUNT_OFFSET 4
81#define VNDR_IE_PKTFLAG_OFFSET 8
82#define VNDR_IE_VSIE_OFFSET 12
83#define VNDR_IE_HDR_SIZE 12
84#define VNDR_IE_BEACON_FLAG 0x1
85#define VNDR_IE_PRBRSP_FLAG 0x2
86#define MAX_VNDR_IE_NUMBER 5
87
88#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
89#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
90
38#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ 91#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
39 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) 92 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
40 93
@@ -42,33 +95,12 @@ static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
42 95
43static u32 brcmf_dbg_level = WL_DBG_ERR; 96static u32 brcmf_dbg_level = WL_DBG_ERR;
44 97
45static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
46{
47 dev->driver_data = data;
48}
49
50static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
51{
52 void *data = NULL;
53
54 if (dev)
55 data = dev->driver_data;
56 return data;
57}
58
59static
60struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev)
61{
62 struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev);
63 return ci->cfg_priv;
64}
65
66static bool check_sys_up(struct wiphy *wiphy) 98static bool check_sys_up(struct wiphy *wiphy)
67{ 99{
68 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 100 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
69 if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { 101 if (!test_bit(WL_STATUS_READY, &cfg->status)) {
70 WL_INFO("device is not ready : status (%d)\n", 102 WL_INFO("device is not ready : status (%d)\n",
71 (int)cfg_priv->status); 103 (int)cfg->status);
72 return false; 104 return false;
73 } 105 }
74 return true; 106 return true;
@@ -256,6 +288,25 @@ struct brcmf_tlv {
256 u8 data[1]; 288 u8 data[1];
257}; 289};
258 290
291/* Vendor specific ie. id = 221, oui and type defines exact ie */
292struct brcmf_vs_tlv {
293 u8 id;
294 u8 len;
295 u8 oui[3];
296 u8 oui_type;
297};
298
299struct parsed_vndr_ie_info {
300 u8 *ie_ptr;
301 u32 ie_len; /* total length including id & length field */
302 struct brcmf_vs_tlv vndrie;
303};
304
305struct parsed_vndr_ies {
306 u32 count;
307 struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
308};
309
259/* Quarter dBm units to mW 310/* Quarter dBm units to mW
260 * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 311 * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
261 * Table is offset so the last entry is largest mW value that fits in 312 * Table is offset so the last entry is largest mW value that fits in
@@ -353,6 +404,44 @@ brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
353 return err; 404 return err;
354} 405}
355 406
407static s32
408brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
409 void *param, s32 paramlen,
410 void *buf, s32 buflen, s32 bssidx)
411{
412 s32 err = -ENOMEM;
413 u32 len;
414
415 len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
416 buf, buflen, bssidx);
417 BUG_ON(!len);
418 if (len > 0)
419 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
420 if (err)
421 WL_ERR("error (%d)\n", err);
422
423 return err;
424}
425
426static s32
427brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
428 void *param, s32 paramlen,
429 void *buf, s32 buflen, s32 bssidx)
430{
431 s32 err = -ENOMEM;
432 u32 len;
433
434 len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
435 buf, buflen, bssidx);
436 BUG_ON(!len);
437 if (len > 0)
438 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
439 if (err)
440 WL_ERR("error (%d)\n", err);
441
442 return err;
443}
444
356static void convert_key_from_CPU(struct brcmf_wsec_key *key, 445static void convert_key_from_CPU(struct brcmf_wsec_key *key,
357 struct brcmf_wsec_key_le *key_le) 446 struct brcmf_wsec_key_le *key_le)
358{ 447{
@@ -367,16 +456,22 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
367 memcpy(key_le->ea, key->ea, sizeof(key->ea)); 456 memcpy(key_le->ea, key->ea, sizeof(key->ea));
368} 457}
369 458
370static int send_key_to_dongle(struct net_device *ndev, 459static int
371 struct brcmf_wsec_key *key) 460send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
461 struct net_device *ndev, struct brcmf_wsec_key *key)
372{ 462{
373 int err; 463 int err;
374 struct brcmf_wsec_key_le key_le; 464 struct brcmf_wsec_key_le key_le;
375 465
376 convert_key_from_CPU(key, &key_le); 466 convert_key_from_CPU(key, &key_le);
377 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le)); 467
468 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
469 sizeof(key_le),
470 cfg->extra_buf,
471 WL_EXTRA_BUF_MAX, bssidx);
472
378 if (err) 473 if (err)
379 WL_ERR("WLC_SET_KEY error (%d)\n", err); 474 WL_ERR("wsec_key error (%d)\n", err);
380 return err; 475 return err;
381} 476}
382 477
@@ -385,14 +480,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
385 enum nl80211_iftype type, u32 *flags, 480 enum nl80211_iftype type, u32 *flags,
386 struct vif_params *params) 481 struct vif_params *params)
387{ 482{
388 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 483 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
389 struct wireless_dev *wdev;
390 s32 infra = 0; 484 s32 infra = 0;
485 s32 ap = 0;
391 s32 err = 0; 486 s32 err = 0;
392 487
393 WL_TRACE("Enter\n"); 488 WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type);
394 if (!check_sys_up(wiphy))
395 return -EIO;
396 489
397 switch (type) { 490 switch (type) {
398 case NL80211_IFTYPE_MONITOR: 491 case NL80211_IFTYPE_MONITOR:
@@ -401,29 +494,44 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
401 type); 494 type);
402 return -EOPNOTSUPP; 495 return -EOPNOTSUPP;
403 case NL80211_IFTYPE_ADHOC: 496 case NL80211_IFTYPE_ADHOC:
404 cfg_priv->conf->mode = WL_MODE_IBSS; 497 cfg->conf->mode = WL_MODE_IBSS;
405 infra = 0; 498 infra = 0;
406 break; 499 break;
407 case NL80211_IFTYPE_STATION: 500 case NL80211_IFTYPE_STATION:
408 cfg_priv->conf->mode = WL_MODE_BSS; 501 cfg->conf->mode = WL_MODE_BSS;
409 infra = 1; 502 infra = 1;
410 break; 503 break;
504 case NL80211_IFTYPE_AP:
505 cfg->conf->mode = WL_MODE_AP;
506 ap = 1;
507 break;
411 default: 508 default:
412 err = -EINVAL; 509 err = -EINVAL;
413 goto done; 510 goto done;
414 } 511 }
415 512
416 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); 513 if (ap) {
417 if (err) { 514 set_bit(WL_STATUS_AP_CREATING, &cfg->status);
418 WL_ERR("WLC_SET_INFRA error (%d)\n", err); 515 if (!cfg->ap_info)
419 err = -EAGAIN; 516 cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
517 GFP_KERNEL);
518 if (!cfg->ap_info) {
519 err = -ENOMEM;
520 goto done;
521 }
522 WL_INFO("IF Type = AP\n");
420 } else { 523 } else {
421 wdev = ndev->ieee80211_ptr; 524 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
422 wdev->iftype = type; 525 if (err) {
526 WL_ERR("WLC_SET_INFRA error (%d)\n", err);
527 err = -EAGAIN;
528 goto done;
529 }
530 WL_INFO("IF Type = %s\n",
531 (cfg->conf->mode == WL_MODE_IBSS) ?
532 "Adhoc" : "Infra");
423 } 533 }
424 534 ndev->ieee80211_ptr->iftype = type;
425 WL_INFO("IF Type = %s\n",
426 (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
427 535
428done: 536done:
429 WL_TRACE("Exit\n"); 537 WL_TRACE("Exit\n");
@@ -474,12 +582,55 @@ brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
474 return err; 582 return err;
475} 583}
476 584
585static s32
586brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
587 s32 bssidx)
588{
589 s8 buf[BRCMF_DCMD_SMLEN];
590 __le32 val_le;
591
592 val_le = cpu_to_le32(val);
593
594 return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
595 sizeof(val_le), buf, sizeof(buf),
596 bssidx);
597}
598
599static s32
600brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
601 s32 bssidx)
602{
603 s8 buf[BRCMF_DCMD_SMLEN];
604 s32 err;
605 __le32 val_le;
606
607 memset(buf, 0, sizeof(buf));
608 err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
609 sizeof(buf), bssidx);
610 if (err == 0) {
611 memcpy(&val_le, buf, sizeof(val_le));
612 *val = le32_to_cpu(val_le);
613 }
614 return err;
615}
616
617
618/*
619 * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
620 * should return the ndev matching bssidx.
621 */
622static s32
623brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
624{
625 return 0;
626}
627
477static void brcmf_set_mpc(struct net_device *ndev, int mpc) 628static void brcmf_set_mpc(struct net_device *ndev, int mpc)
478{ 629{
479 s32 err = 0; 630 s32 err = 0;
480 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 631 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
481 632
482 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { 633 if (test_bit(WL_STATUS_READY, &cfg->status)) {
483 err = brcmf_dev_intvar_set(ndev, "mpc", mpc); 634 err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
484 if (err) { 635 if (err) {
485 WL_ERR("fail to set mpc\n"); 636 WL_ERR("fail to set mpc\n");
@@ -489,8 +640,8 @@ static void brcmf_set_mpc(struct net_device *ndev, int mpc)
489 } 640 }
490} 641}
491 642
492static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, 643static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
493 struct brcmf_ssid *ssid) 644 struct brcmf_ssid *ssid)
494{ 645{
495 memcpy(params_le->bssid, ether_bcast, ETH_ALEN); 646 memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
496 params_le->bss_type = DOT11_BSSTYPE_ANY; 647 params_le->bss_type = DOT11_BSSTYPE_ANY;
@@ -546,7 +697,7 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
546 return -ENOMEM; 697 return -ENOMEM;
547 BUG_ON(params_size >= BRCMF_DCMD_SMLEN); 698 BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
548 699
549 wl_iscan_prep(&params->params_le, ssid); 700 brcmf_iscan_prep(&params->params_le, ssid);
550 701
551 params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION); 702 params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
552 params->action = cpu_to_le16(action); 703 params->action = cpu_to_le16(action);
@@ -565,10 +716,10 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
565 return err; 716 return err;
566} 717}
567 718
568static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv) 719static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
569{ 720{
570 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 721 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
571 struct net_device *ndev = cfg_to_ndev(cfg_priv); 722 struct net_device *ndev = cfg_to_ndev(cfg);
572 struct brcmf_ssid ssid; 723 struct brcmf_ssid ssid;
573 __le32 passive_scan; 724 __le32 passive_scan;
574 s32 err = 0; 725 s32 err = 0;
@@ -578,19 +729,19 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
578 729
579 iscan->state = WL_ISCAN_STATE_SCANING; 730 iscan->state = WL_ISCAN_STATE_SCANING;
580 731
581 passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); 732 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
582 err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN, 733 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
583 &passive_scan, sizeof(passive_scan)); 734 &passive_scan, sizeof(passive_scan));
584 if (err) { 735 if (err) {
585 WL_ERR("error (%d)\n", err); 736 WL_ERR("error (%d)\n", err);
586 return err; 737 return err;
587 } 738 }
588 brcmf_set_mpc(ndev, 0); 739 brcmf_set_mpc(ndev, 0);
589 cfg_priv->iscan_kickstart = true; 740 cfg->iscan_kickstart = true;
590 err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START); 741 err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
591 if (err) { 742 if (err) {
592 brcmf_set_mpc(ndev, 1); 743 brcmf_set_mpc(ndev, 1);
593 cfg_priv->iscan_kickstart = false; 744 cfg->iscan_kickstart = false;
594 return err; 745 return err;
595 } 746 }
596 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); 747 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -599,31 +750,31 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
599} 750}
600 751
601static s32 752static s32
602__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, 753brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
603 struct cfg80211_scan_request *request, 754 struct cfg80211_scan_request *request,
604 struct cfg80211_ssid *this_ssid) 755 struct cfg80211_ssid *this_ssid)
605{ 756{
606 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 757 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
607 struct cfg80211_ssid *ssids; 758 struct cfg80211_ssid *ssids;
608 struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int; 759 struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
609 __le32 passive_scan; 760 __le32 passive_scan;
610 bool iscan_req; 761 bool iscan_req;
611 bool spec_scan; 762 bool spec_scan;
612 s32 err = 0; 763 s32 err = 0;
613 u32 SSID_len; 764 u32 SSID_len;
614 765
615 if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { 766 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
616 WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status); 767 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
617 return -EAGAIN; 768 return -EAGAIN;
618 } 769 }
619 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) { 770 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
620 WL_ERR("Scanning being aborted : status (%lu)\n", 771 WL_ERR("Scanning being aborted : status (%lu)\n",
621 cfg_priv->status); 772 cfg->status);
622 return -EAGAIN; 773 return -EAGAIN;
623 } 774 }
624 if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { 775 if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
625 WL_ERR("Connecting : status (%lu)\n", 776 WL_ERR("Connecting : status (%lu)\n",
626 cfg_priv->status); 777 cfg->status);
627 return -EAGAIN; 778 return -EAGAIN;
628 } 779 }
629 780
@@ -632,7 +783,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
632 if (request) { 783 if (request) {
633 /* scan bss */ 784 /* scan bss */
634 ssids = request->ssids; 785 ssids = request->ssids;
635 if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len)) 786 if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
636 iscan_req = true; 787 iscan_req = true;
637 } else { 788 } else {
638 /* scan in ibss */ 789 /* scan in ibss */
@@ -640,10 +791,10 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
640 ssids = this_ssid; 791 ssids = this_ssid;
641 } 792 }
642 793
643 cfg_priv->scan_request = request; 794 cfg->scan_request = request;
644 set_bit(WL_STATUS_SCANNING, &cfg_priv->status); 795 set_bit(WL_STATUS_SCANNING, &cfg->status);
645 if (iscan_req) { 796 if (iscan_req) {
646 err = brcmf_do_iscan(cfg_priv); 797 err = brcmf_do_iscan(cfg);
647 if (!err) 798 if (!err)
648 return err; 799 return err;
649 else 800 else
@@ -662,7 +813,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
662 WL_SCAN("Broadcast scan\n"); 813 WL_SCAN("Broadcast scan\n");
663 } 814 }
664 815
665 passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); 816 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
666 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, 817 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
667 &passive_scan, sizeof(passive_scan)); 818 &passive_scan, sizeof(passive_scan));
668 if (err) { 819 if (err) {
@@ -687,8 +838,346 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
687 return 0; 838 return 0;
688 839
689scan_out: 840scan_out:
690 clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); 841 clear_bit(WL_STATUS_SCANNING, &cfg->status);
691 cfg_priv->scan_request = NULL; 842 cfg->scan_request = NULL;
843 return err;
844}
845
846static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
847 struct cfg80211_scan_request *request)
848{
849 u32 n_ssids;
850 u32 n_channels;
851 s32 i;
852 s32 offset;
853 u16 chanspec;
854 u16 channel;
855 struct ieee80211_channel *req_channel;
856 char *ptr;
857 struct brcmf_ssid_le ssid_le;
858
859 memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
860 params_le->bss_type = DOT11_BSSTYPE_ANY;
861 params_le->scan_type = 0;
862 params_le->channel_num = 0;
863 params_le->nprobes = cpu_to_le32(-1);
864 params_le->active_time = cpu_to_le32(-1);
865 params_le->passive_time = cpu_to_le32(-1);
866 params_le->home_time = cpu_to_le32(-1);
867 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
868
869 /* if request is null exit so it will be all channel broadcast scan */
870 if (!request)
871 return;
872
873 n_ssids = request->n_ssids;
874 n_channels = request->n_channels;
875 /* Copy channel array if applicable */
876 WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
877 if (n_channels > 0) {
878 for (i = 0; i < n_channels; i++) {
879 chanspec = 0;
880 req_channel = request->channels[i];
881 channel = ieee80211_frequency_to_channel(
882 req_channel->center_freq);
883 if (req_channel->band == IEEE80211_BAND_2GHZ)
884 chanspec |= WL_CHANSPEC_BAND_2G;
885 else
886 chanspec |= WL_CHANSPEC_BAND_5G;
887
888 if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
889 chanspec |= WL_CHANSPEC_BW_20;
890 chanspec |= WL_CHANSPEC_CTL_SB_NONE;
891 } else {
892 chanspec |= WL_CHANSPEC_BW_40;
893 if (req_channel->flags &
894 IEEE80211_CHAN_NO_HT40PLUS)
895 chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
896 else
897 chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
898 }
899
900 chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
901 WL_SCAN("Chan : %d, Channel spec: %x\n",
902 channel, chanspec);
903 params_le->channel_list[i] = cpu_to_le16(chanspec);
904 }
905 } else {
906 WL_SCAN("Scanning all channels\n");
907 }
908 /* Copy ssid array if applicable */
909 WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids);
910 if (n_ssids > 0) {
911 offset = offsetof(struct brcmf_scan_params_le, channel_list) +
912 n_channels * sizeof(u16);
913 offset = roundup(offset, sizeof(u32));
914 ptr = (char *)params_le + offset;
915 for (i = 0; i < n_ssids; i++) {
916 memset(&ssid_le, 0, sizeof(ssid_le));
917 ssid_le.SSID_len =
918 cpu_to_le32(request->ssids[i].ssid_len);
919 memcpy(ssid_le.SSID, request->ssids[i].ssid,
920 request->ssids[i].ssid_len);
921 if (!ssid_le.SSID_len)
922 WL_SCAN("%d: Broadcast scan\n", i);
923 else
924 WL_SCAN("%d: scan for %s size =%d\n", i,
925 ssid_le.SSID, ssid_le.SSID_len);
926 memcpy(ptr, &ssid_le, sizeof(ssid_le));
927 ptr += sizeof(ssid_le);
928 }
929 } else {
930 WL_SCAN("Broadcast scan %p\n", request->ssids);
931 if ((request->ssids) && request->ssids->ssid_len) {
932 WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID,
933 request->ssids->ssid_len);
934 params_le->ssid_le.SSID_len =
935 cpu_to_le32(request->ssids->ssid_len);
936 memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
937 request->ssids->ssid_len);
938 }
939 }
940 /* Adding mask to channel numbers */
941 params_le->channel_num =
942 cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
943 (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
944}
945
946static s32
947brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
948 struct net_device *ndev,
949 bool aborted, bool fw_abort)
950{
951 struct brcmf_scan_params_le params_le;
952 struct cfg80211_scan_request *scan_request;
953 s32 err = 0;
954
955 WL_SCAN("Enter\n");
956
957 /* clear scan request, because the FW abort can cause a second call */
958 /* to this functon and might cause a double cfg80211_scan_done */
959 scan_request = cfg->scan_request;
960 cfg->scan_request = NULL;
961
962 if (timer_pending(&cfg->escan_timeout))
963 del_timer_sync(&cfg->escan_timeout);
964
965 if (fw_abort) {
966 /* Do a scan abort to stop the driver's scan engine */
967 WL_SCAN("ABORT scan in firmware\n");
968 memset(&params_le, 0, sizeof(params_le));
969 memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
970 params_le.bss_type = DOT11_BSSTYPE_ANY;
971 params_le.scan_type = 0;
972 params_le.channel_num = cpu_to_le32(1);
973 params_le.nprobes = cpu_to_le32(1);
974 params_le.active_time = cpu_to_le32(-1);
975 params_le.passive_time = cpu_to_le32(-1);
976 params_le.home_time = cpu_to_le32(-1);
977 /* Scan is aborted by setting channel_list[0] to -1 */
978 params_le.channel_list[0] = cpu_to_le16(-1);
979 /* E-Scan (or anyother type) can be aborted by SCAN */
980 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
981 sizeof(params_le));
982 if (err)
983 WL_ERR("Scan abort failed\n");
984 }
985 /*
986 * e-scan can be initiated by scheduled scan
987 * which takes precedence.
988 */
989 if (cfg->sched_escan) {
990 WL_SCAN("scheduled scan completed\n");
991 cfg->sched_escan = false;
992 if (!aborted)
993 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
994 brcmf_set_mpc(ndev, 1);
995 } else if (scan_request) {
996 WL_SCAN("ESCAN Completed scan: %s\n",
997 aborted ? "Aborted" : "Done");
998 cfg80211_scan_done(scan_request, aborted);
999 brcmf_set_mpc(ndev, 1);
1000 }
1001 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
1002 WL_ERR("Scan complete while device not scanning\n");
1003 return -EPERM;
1004 }
1005
1006 return err;
1007}
1008
1009static s32
1010brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
1011 struct cfg80211_scan_request *request, u16 action)
1012{
1013 s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
1014 offsetof(struct brcmf_escan_params_le, params_le);
1015 struct brcmf_escan_params_le *params;
1016 s32 err = 0;
1017
1018 WL_SCAN("E-SCAN START\n");
1019
1020 if (request != NULL) {
1021 /* Allocate space for populating ssids in struct */
1022 params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
1023
1024 /* Allocate space for populating ssids in struct */
1025 params_size += sizeof(struct brcmf_ssid) * request->n_ssids;
1026 }
1027
1028 params = kzalloc(params_size, GFP_KERNEL);
1029 if (!params) {
1030 err = -ENOMEM;
1031 goto exit;
1032 }
1033 BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
1034 brcmf_escan_prep(&params->params_le, request);
1035 params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
1036 params->action = cpu_to_le16(action);
1037 params->sync_id = cpu_to_le16(0x1234);
1038
1039 err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
1040 cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
1041 if (err) {
1042 if (err == -EBUSY)
1043 WL_INFO("system busy : escan canceled\n");
1044 else
1045 WL_ERR("error (%d)\n", err);
1046 }
1047
1048 kfree(params);
1049exit:
1050 return err;
1051}
1052
1053static s32
1054brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
1055 struct net_device *ndev, struct cfg80211_scan_request *request)
1056{
1057 s32 err;
1058 __le32 passive_scan;
1059 struct brcmf_scan_results *results;
1060
1061 WL_SCAN("Enter\n");
1062 cfg->escan_info.ndev = ndev;
1063 cfg->escan_info.wiphy = wiphy;
1064 cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
1065 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
1066 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
1067 &passive_scan, sizeof(passive_scan));
1068 if (err) {
1069 WL_ERR("error (%d)\n", err);
1070 return err;
1071 }
1072 brcmf_set_mpc(ndev, 0);
1073 results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
1074 results->version = 0;
1075 results->count = 0;
1076 results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
1077
1078 err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
1079 if (err)
1080 brcmf_set_mpc(ndev, 1);
1081 return err;
1082}
1083
1084static s32
1085brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
1086 struct cfg80211_scan_request *request,
1087 struct cfg80211_ssid *this_ssid)
1088{
1089 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1090 struct cfg80211_ssid *ssids;
1091 struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
1092 __le32 passive_scan;
1093 bool escan_req;
1094 bool spec_scan;
1095 s32 err;
1096 u32 SSID_len;
1097
1098 WL_SCAN("START ESCAN\n");
1099
1100 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
1101 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
1102 return -EAGAIN;
1103 }
1104 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
1105 WL_ERR("Scanning being aborted : status (%lu)\n",
1106 cfg->status);
1107 return -EAGAIN;
1108 }
1109 if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
1110 WL_ERR("Connecting : status (%lu)\n",
1111 cfg->status);
1112 return -EAGAIN;
1113 }
1114
1115 /* Arm scan timeout timer */
1116 mod_timer(&cfg->escan_timeout, jiffies +
1117 WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
1118
1119 escan_req = false;
1120 if (request) {
1121 /* scan bss */
1122 ssids = request->ssids;
1123 escan_req = true;
1124 } else {
1125 /* scan in ibss */
1126 /* we don't do escan in ibss */
1127 ssids = this_ssid;
1128 }
1129
1130 cfg->scan_request = request;
1131 set_bit(WL_STATUS_SCANNING, &cfg->status);
1132 if (escan_req) {
1133 err = brcmf_do_escan(cfg, wiphy, ndev, request);
1134 if (!err)
1135 return err;
1136 else
1137 goto scan_out;
1138 } else {
1139 WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
1140 ssids->ssid, ssids->ssid_len);
1141 memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
1142 SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
1143 sr->ssid_le.SSID_len = cpu_to_le32(0);
1144 spec_scan = false;
1145 if (SSID_len) {
1146 memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
1147 sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
1148 spec_scan = true;
1149 } else
1150 WL_SCAN("Broadcast scan\n");
1151
1152 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
1153 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
1154 &passive_scan, sizeof(passive_scan));
1155 if (err) {
1156 WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
1157 goto scan_out;
1158 }
1159 brcmf_set_mpc(ndev, 0);
1160 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
1161 sizeof(sr->ssid_le));
1162 if (err) {
1163 if (err == -EBUSY)
1164 WL_INFO("BUSY: scan for \"%s\" canceled\n",
1165 sr->ssid_le.SSID);
1166 else
1167 WL_ERR("WLC_SCAN error (%d)\n", err);
1168
1169 brcmf_set_mpc(ndev, 1);
1170 goto scan_out;
1171 }
1172 }
1173
1174 return 0;
1175
1176scan_out:
1177 clear_bit(WL_STATUS_SCANNING, &cfg->status);
1178 if (timer_pending(&cfg->escan_timeout))
1179 del_timer_sync(&cfg->escan_timeout);
1180 cfg->scan_request = NULL;
692 return err; 1181 return err;
693} 1182}
694 1183
@@ -697,6 +1186,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
697 struct cfg80211_scan_request *request) 1186 struct cfg80211_scan_request *request)
698{ 1187{
699 struct net_device *ndev = request->wdev->netdev; 1188 struct net_device *ndev = request->wdev->netdev;
1189 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
700 s32 err = 0; 1190 s32 err = 0;
701 1191
702 WL_TRACE("Enter\n"); 1192 WL_TRACE("Enter\n");
@@ -704,7 +1194,11 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
704 if (!check_sys_up(wiphy)) 1194 if (!check_sys_up(wiphy))
705 return -EIO; 1195 return -EIO;
706 1196
707 err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL); 1197 if (cfg->iscan_on)
1198 err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
1199 else if (cfg->escan_on)
1200 err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
1201
708 if (err) 1202 if (err)
709 WL_ERR("scan error (%d)\n", err); 1203 WL_ERR("scan error (%d)\n", err);
710 1204
@@ -749,8 +1243,8 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
749 1243
750static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) 1244static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
751{ 1245{
752 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1246 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
753 struct net_device *ndev = cfg_to_ndev(cfg_priv); 1247 struct net_device *ndev = cfg_to_ndev(cfg);
754 s32 err = 0; 1248 s32 err = 0;
755 1249
756 WL_TRACE("Enter\n"); 1250 WL_TRACE("Enter\n");
@@ -758,30 +1252,30 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
758 return -EIO; 1252 return -EIO;
759 1253
760 if (changed & WIPHY_PARAM_RTS_THRESHOLD && 1254 if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
761 (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) { 1255 (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
762 cfg_priv->conf->rts_threshold = wiphy->rts_threshold; 1256 cfg->conf->rts_threshold = wiphy->rts_threshold;
763 err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold); 1257 err = brcmf_set_rts(ndev, cfg->conf->rts_threshold);
764 if (!err) 1258 if (!err)
765 goto done; 1259 goto done;
766 } 1260 }
767 if (changed & WIPHY_PARAM_FRAG_THRESHOLD && 1261 if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
768 (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) { 1262 (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
769 cfg_priv->conf->frag_threshold = wiphy->frag_threshold; 1263 cfg->conf->frag_threshold = wiphy->frag_threshold;
770 err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold); 1264 err = brcmf_set_frag(ndev, cfg->conf->frag_threshold);
771 if (!err) 1265 if (!err)
772 goto done; 1266 goto done;
773 } 1267 }
774 if (changed & WIPHY_PARAM_RETRY_LONG 1268 if (changed & WIPHY_PARAM_RETRY_LONG
775 && (cfg_priv->conf->retry_long != wiphy->retry_long)) { 1269 && (cfg->conf->retry_long != wiphy->retry_long)) {
776 cfg_priv->conf->retry_long = wiphy->retry_long; 1270 cfg->conf->retry_long = wiphy->retry_long;
777 err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true); 1271 err = brcmf_set_retry(ndev, cfg->conf->retry_long, true);
778 if (!err) 1272 if (!err)
779 goto done; 1273 goto done;
780 } 1274 }
781 if (changed & WIPHY_PARAM_RETRY_SHORT 1275 if (changed & WIPHY_PARAM_RETRY_SHORT
782 && (cfg_priv->conf->retry_short != wiphy->retry_short)) { 1276 && (cfg->conf->retry_short != wiphy->retry_short)) {
783 cfg_priv->conf->retry_short = wiphy->retry_short; 1277 cfg->conf->retry_short = wiphy->retry_short;
784 err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false); 1278 err = brcmf_set_retry(ndev, cfg->conf->retry_short, false);
785 if (!err) 1279 if (!err)
786 goto done; 1280 goto done;
787 } 1281 }
@@ -791,61 +1285,6 @@ done:
791 return err; 1285 return err;
792} 1286}
793 1287
794static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
795{
796 switch (item) {
797 case WL_PROF_SEC:
798 return &cfg_priv->profile->sec;
799 case WL_PROF_BSSID:
800 return &cfg_priv->profile->bssid;
801 case WL_PROF_SSID:
802 return &cfg_priv->profile->ssid;
803 }
804 WL_ERR("invalid item (%d)\n", item);
805 return NULL;
806}
807
808static s32
809brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
810 const struct brcmf_event_msg *e, void *data, s32 item)
811{
812 s32 err = 0;
813 struct brcmf_ssid *ssid;
814
815 switch (item) {
816 case WL_PROF_SSID:
817 ssid = (struct brcmf_ssid *) data;
818 memset(cfg_priv->profile->ssid.SSID, 0,
819 sizeof(cfg_priv->profile->ssid.SSID));
820 memcpy(cfg_priv->profile->ssid.SSID,
821 ssid->SSID, ssid->SSID_len);
822 cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
823 break;
824 case WL_PROF_BSSID:
825 if (data)
826 memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
827 else
828 memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
829 break;
830 case WL_PROF_SEC:
831 memcpy(&cfg_priv->profile->sec, data,
832 sizeof(cfg_priv->profile->sec));
833 break;
834 case WL_PROF_BEACONINT:
835 cfg_priv->profile->beacon_interval = *(u16 *)data;
836 break;
837 case WL_PROF_DTIMPERIOD:
838 cfg_priv->profile->dtim_period = *(u8 *)data;
839 break;
840 default:
841 WL_ERR("unsupported item (%d)\n", item);
842 err = -EOPNOTSUPP;
843 break;
844 }
845
846 return err;
847}
848
849static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof) 1288static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
850{ 1289{
851 memset(prof, 0, sizeof(*prof)); 1290 memset(prof, 0, sizeof(*prof));
@@ -878,20 +1317,20 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
878 } 1317 }
879} 1318}
880 1319
881static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv) 1320static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
882{ 1321{
883 struct net_device *ndev = NULL; 1322 struct net_device *ndev = NULL;
884 s32 err = 0; 1323 s32 err = 0;
885 1324
886 WL_TRACE("Enter\n"); 1325 WL_TRACE("Enter\n");
887 1326
888 if (cfg_priv->link_up) { 1327 if (cfg->link_up) {
889 ndev = cfg_to_ndev(cfg_priv); 1328 ndev = cfg_to_ndev(cfg);
890 WL_INFO("Call WLC_DISASSOC to stop excess roaming\n "); 1329 WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
891 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0); 1330 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
892 if (err) 1331 if (err)
893 WL_ERR("WLC_DISASSOC failed (%d)\n", err); 1332 WL_ERR("WLC_DISASSOC failed (%d)\n", err);
894 cfg_priv->link_up = false; 1333 cfg->link_up = false;
895 } 1334 }
896 WL_TRACE("Exit\n"); 1335 WL_TRACE("Exit\n");
897} 1336}
@@ -900,13 +1339,13 @@ static s32
900brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, 1339brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
901 struct cfg80211_ibss_params *params) 1340 struct cfg80211_ibss_params *params)
902{ 1341{
903 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1342 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1343 struct brcmf_cfg80211_profile *profile = cfg->profile;
904 struct brcmf_join_params join_params; 1344 struct brcmf_join_params join_params;
905 size_t join_params_size = 0; 1345 size_t join_params_size = 0;
906 s32 err = 0; 1346 s32 err = 0;
907 s32 wsec = 0; 1347 s32 wsec = 0;
908 s32 bcnprd; 1348 s32 bcnprd;
909 struct brcmf_ssid ssid;
910 1349
911 WL_TRACE("Enter\n"); 1350 WL_TRACE("Enter\n");
912 if (!check_sys_up(wiphy)) 1351 if (!check_sys_up(wiphy))
@@ -919,7 +1358,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
919 return -EOPNOTSUPP; 1358 return -EOPNOTSUPP;
920 } 1359 }
921 1360
922 set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1361 set_bit(WL_STATUS_CONNECTING, &cfg->status);
923 1362
924 if (params->bssid) 1363 if (params->bssid)
925 WL_CONN("BSSID: %pM\n", params->bssid); 1364 WL_CONN("BSSID: %pM\n", params->bssid);
@@ -982,40 +1421,38 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
982 memset(&join_params, 0, sizeof(struct brcmf_join_params)); 1421 memset(&join_params, 0, sizeof(struct brcmf_join_params));
983 1422
984 /* SSID */ 1423 /* SSID */
985 ssid.SSID_len = min_t(u32, params->ssid_len, 32); 1424 profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32);
986 memcpy(ssid.SSID, params->ssid, ssid.SSID_len); 1425 memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len);
987 memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len); 1426 memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len);
988 join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); 1427 join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
989 join_params_size = sizeof(join_params.ssid_le); 1428 join_params_size = sizeof(join_params.ssid_le);
990 brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
991 1429
992 /* BSSID */ 1430 /* BSSID */
993 if (params->bssid) { 1431 if (params->bssid) {
994 memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN); 1432 memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
995 join_params_size = sizeof(join_params.ssid_le) + 1433 join_params_size = sizeof(join_params.ssid_le) +
996 BRCMF_ASSOC_PARAMS_FIXED_SIZE; 1434 BRCMF_ASSOC_PARAMS_FIXED_SIZE;
1435 memcpy(profile->bssid, params->bssid, ETH_ALEN);
997 } else { 1436 } else {
998 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); 1437 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
1438 memset(profile->bssid, 0, ETH_ALEN);
999 } 1439 }
1000 1440
1001 brcmf_update_prof(cfg_priv, NULL,
1002 &join_params.params_le.bssid, WL_PROF_BSSID);
1003
1004 /* Channel */ 1441 /* Channel */
1005 if (params->channel) { 1442 if (params->channel) {
1006 u32 target_channel; 1443 u32 target_channel;
1007 1444
1008 cfg_priv->channel = 1445 cfg->channel =
1009 ieee80211_frequency_to_channel( 1446 ieee80211_frequency_to_channel(
1010 params->channel->center_freq); 1447 params->channel->center_freq);
1011 if (params->channel_fixed) { 1448 if (params->channel_fixed) {
1012 /* adding chanspec */ 1449 /* adding chanspec */
1013 brcmf_ch_to_chanspec(cfg_priv->channel, 1450 brcmf_ch_to_chanspec(cfg->channel,
1014 &join_params, &join_params_size); 1451 &join_params, &join_params_size);
1015 } 1452 }
1016 1453
1017 /* set channel for starter */ 1454 /* set channel for starter */
1018 target_channel = cfg_priv->channel; 1455 target_channel = cfg->channel;
1019 err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL, 1456 err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
1020 &target_channel); 1457 &target_channel);
1021 if (err) { 1458 if (err) {
@@ -1023,9 +1460,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1023 goto done; 1460 goto done;
1024 } 1461 }
1025 } else 1462 } else
1026 cfg_priv->channel = 0; 1463 cfg->channel = 0;
1027 1464
1028 cfg_priv->ibss_starter = false; 1465 cfg->ibss_starter = false;
1029 1466
1030 1467
1031 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, 1468 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
@@ -1037,7 +1474,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1037 1474
1038done: 1475done:
1039 if (err) 1476 if (err)
1040 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1477 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
1041 WL_TRACE("Exit\n"); 1478 WL_TRACE("Exit\n");
1042 return err; 1479 return err;
1043} 1480}
@@ -1045,14 +1482,14 @@ done:
1045static s32 1482static s32
1046brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) 1483brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1047{ 1484{
1048 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1485 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1049 s32 err = 0; 1486 s32 err = 0;
1050 1487
1051 WL_TRACE("Enter\n"); 1488 WL_TRACE("Enter\n");
1052 if (!check_sys_up(wiphy)) 1489 if (!check_sys_up(wiphy))
1053 return -EIO; 1490 return -EIO;
1054 1491
1055 brcmf_link_down(cfg_priv); 1492 brcmf_link_down(cfg);
1056 1493
1057 WL_TRACE("Exit\n"); 1494 WL_TRACE("Exit\n");
1058 1495
@@ -1062,7 +1499,8 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1062static s32 brcmf_set_wpa_version(struct net_device *ndev, 1499static s32 brcmf_set_wpa_version(struct net_device *ndev,
1063 struct cfg80211_connect_params *sme) 1500 struct cfg80211_connect_params *sme)
1064{ 1501{
1065 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1502 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1503 struct brcmf_cfg80211_profile *profile = cfg->profile;
1066 struct brcmf_cfg80211_security *sec; 1504 struct brcmf_cfg80211_security *sec;
1067 s32 val = 0; 1505 s32 val = 0;
1068 s32 err = 0; 1506 s32 err = 0;
@@ -1079,7 +1517,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
1079 WL_ERR("set wpa_auth failed (%d)\n", err); 1517 WL_ERR("set wpa_auth failed (%d)\n", err);
1080 return err; 1518 return err;
1081 } 1519 }
1082 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1520 sec = &profile->sec;
1083 sec->wpa_versions = sme->crypto.wpa_versions; 1521 sec->wpa_versions = sme->crypto.wpa_versions;
1084 return err; 1522 return err;
1085} 1523}
@@ -1087,7 +1525,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
1087static s32 brcmf_set_auth_type(struct net_device *ndev, 1525static s32 brcmf_set_auth_type(struct net_device *ndev,
1088 struct cfg80211_connect_params *sme) 1526 struct cfg80211_connect_params *sme)
1089{ 1527{
1090 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1528 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1529 struct brcmf_cfg80211_profile *profile = cfg->profile;
1091 struct brcmf_cfg80211_security *sec; 1530 struct brcmf_cfg80211_security *sec;
1092 s32 val = 0; 1531 s32 val = 0;
1093 s32 err = 0; 1532 s32 err = 0;
@@ -1118,7 +1557,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
1118 WL_ERR("set auth failed (%d)\n", err); 1557 WL_ERR("set auth failed (%d)\n", err);
1119 return err; 1558 return err;
1120 } 1559 }
1121 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1560 sec = &profile->sec;
1122 sec->auth_type = sme->auth_type; 1561 sec->auth_type = sme->auth_type;
1123 return err; 1562 return err;
1124} 1563}
@@ -1127,7 +1566,8 @@ static s32
1127brcmf_set_set_cipher(struct net_device *ndev, 1566brcmf_set_set_cipher(struct net_device *ndev,
1128 struct cfg80211_connect_params *sme) 1567 struct cfg80211_connect_params *sme)
1129{ 1568{
1130 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1569 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1570 struct brcmf_cfg80211_profile *profile = cfg->profile;
1131 struct brcmf_cfg80211_security *sec; 1571 struct brcmf_cfg80211_security *sec;
1132 s32 pval = 0; 1572 s32 pval = 0;
1133 s32 gval = 0; 1573 s32 gval = 0;
@@ -1183,7 +1623,7 @@ brcmf_set_set_cipher(struct net_device *ndev,
1183 return err; 1623 return err;
1184 } 1624 }
1185 1625
1186 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1626 sec = &profile->sec;
1187 sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; 1627 sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
1188 sec->cipher_group = sme->crypto.cipher_group; 1628 sec->cipher_group = sme->crypto.cipher_group;
1189 1629
@@ -1193,7 +1633,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
1193static s32 1633static s32
1194brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) 1634brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1195{ 1635{
1196 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1636 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1637 struct brcmf_cfg80211_profile *profile = cfg->profile;
1197 struct brcmf_cfg80211_security *sec; 1638 struct brcmf_cfg80211_security *sec;
1198 s32 val = 0; 1639 s32 val = 0;
1199 s32 err = 0; 1640 s32 err = 0;
@@ -1239,74 +1680,76 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1239 return err; 1680 return err;
1240 } 1681 }
1241 } 1682 }
1242 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1683 sec = &profile->sec;
1243 sec->wpa_auth = sme->crypto.akm_suites[0]; 1684 sec->wpa_auth = sme->crypto.akm_suites[0];
1244 1685
1245 return err; 1686 return err;
1246} 1687}
1247 1688
1248static s32 1689static s32
1249brcmf_set_wep_sharedkey(struct net_device *ndev, 1690brcmf_set_sharedkey(struct net_device *ndev,
1250 struct cfg80211_connect_params *sme) 1691 struct cfg80211_connect_params *sme)
1251{ 1692{
1252 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1693 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1694 struct brcmf_cfg80211_profile *profile = cfg->profile;
1253 struct brcmf_cfg80211_security *sec; 1695 struct brcmf_cfg80211_security *sec;
1254 struct brcmf_wsec_key key; 1696 struct brcmf_wsec_key key;
1255 s32 val; 1697 s32 val;
1256 s32 err = 0; 1698 s32 err = 0;
1699 s32 bssidx;
1257 1700
1258 WL_CONN("key len (%d)\n", sme->key_len); 1701 WL_CONN("key len (%d)\n", sme->key_len);
1259 1702
1260 if (sme->key_len == 0) 1703 if (sme->key_len == 0)
1261 return 0; 1704 return 0;
1262 1705
1263 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1706 sec = &profile->sec;
1264 WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n", 1707 WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
1265 sec->wpa_versions, sec->cipher_pairwise); 1708 sec->wpa_versions, sec->cipher_pairwise);
1266 1709
1267 if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) 1710 if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
1268 return 0; 1711 return 0;
1269 1712
1270 if (sec->cipher_pairwise & 1713 if (!(sec->cipher_pairwise &
1271 (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) { 1714 (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)))
1272 memset(&key, 0, sizeof(key)); 1715 return 0;
1273 key.len = (u32) sme->key_len;
1274 key.index = (u32) sme->key_idx;
1275 if (key.len > sizeof(key.data)) {
1276 WL_ERR("Too long key length (%u)\n", key.len);
1277 return -EINVAL;
1278 }
1279 memcpy(key.data, sme->key, key.len);
1280 key.flags = BRCMF_PRIMARY_KEY;
1281 switch (sec->cipher_pairwise) {
1282 case WLAN_CIPHER_SUITE_WEP40:
1283 key.algo = CRYPTO_ALGO_WEP1;
1284 break;
1285 case WLAN_CIPHER_SUITE_WEP104:
1286 key.algo = CRYPTO_ALGO_WEP128;
1287 break;
1288 default:
1289 WL_ERR("Invalid algorithm (%d)\n",
1290 sme->crypto.ciphers_pairwise[0]);
1291 return -EINVAL;
1292 }
1293 /* Set the new key/index */
1294 WL_CONN("key length (%d) key index (%d) algo (%d)\n",
1295 key.len, key.index, key.algo);
1296 WL_CONN("key \"%s\"\n", key.data);
1297 err = send_key_to_dongle(ndev, &key);
1298 if (err)
1299 return err;
1300 1716
1301 if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) { 1717 memset(&key, 0, sizeof(key));
1302 WL_CONN("set auth_type to shared key\n"); 1718 key.len = (u32) sme->key_len;
1303 val = 1; /* shared key */ 1719 key.index = (u32) sme->key_idx;
1304 err = brcmf_dev_intvar_set(ndev, "auth", val); 1720 if (key.len > sizeof(key.data)) {
1305 if (err) { 1721 WL_ERR("Too long key length (%u)\n", key.len);
1306 WL_ERR("set auth failed (%d)\n", err); 1722 return -EINVAL;
1307 return err; 1723 }
1308 } 1724 memcpy(key.data, sme->key, key.len);
1309 } 1725 key.flags = BRCMF_PRIMARY_KEY;
1726 switch (sec->cipher_pairwise) {
1727 case WLAN_CIPHER_SUITE_WEP40:
1728 key.algo = CRYPTO_ALGO_WEP1;
1729 break;
1730 case WLAN_CIPHER_SUITE_WEP104:
1731 key.algo = CRYPTO_ALGO_WEP128;
1732 break;
1733 default:
1734 WL_ERR("Invalid algorithm (%d)\n",
1735 sme->crypto.ciphers_pairwise[0]);
1736 return -EINVAL;
1737 }
1738 /* Set the new key/index */
1739 WL_CONN("key length (%d) key index (%d) algo (%d)\n",
1740 key.len, key.index, key.algo);
1741 WL_CONN("key \"%s\"\n", key.data);
1742 bssidx = brcmf_find_bssidx(cfg, ndev);
1743 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1744 if (err)
1745 return err;
1746
1747 if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
1748 WL_CONN("set auth_type to shared key\n");
1749 val = WL_AUTH_SHARED_KEY; /* shared key */
1750 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
1751 if (err)
1752 WL_ERR("set auth failed (%d)\n", err);
1310 } 1753 }
1311 return err; 1754 return err;
1312} 1755}
@@ -1315,7 +1758,8 @@ static s32
1315brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, 1758brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1316 struct cfg80211_connect_params *sme) 1759 struct cfg80211_connect_params *sme)
1317{ 1760{
1318 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1761 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1762 struct brcmf_cfg80211_profile *profile = cfg->profile;
1319 struct ieee80211_channel *chan = sme->channel; 1763 struct ieee80211_channel *chan = sme->channel;
1320 struct brcmf_join_params join_params; 1764 struct brcmf_join_params join_params;
1321 size_t join_params_size; 1765 size_t join_params_size;
@@ -1332,15 +1776,15 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1332 return -EOPNOTSUPP; 1776 return -EOPNOTSUPP;
1333 } 1777 }
1334 1778
1335 set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1779 set_bit(WL_STATUS_CONNECTING, &cfg->status);
1336 1780
1337 if (chan) { 1781 if (chan) {
1338 cfg_priv->channel = 1782 cfg->channel =
1339 ieee80211_frequency_to_channel(chan->center_freq); 1783 ieee80211_frequency_to_channel(chan->center_freq);
1340 WL_CONN("channel (%d), center_req (%d)\n", 1784 WL_CONN("channel (%d), center_req (%d)\n",
1341 cfg_priv->channel, chan->center_freq); 1785 cfg->channel, chan->center_freq);
1342 } else 1786 } else
1343 cfg_priv->channel = 0; 1787 cfg->channel = 0;
1344 1788
1345 WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); 1789 WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
1346 1790
@@ -1368,20 +1812,20 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1368 goto done; 1812 goto done;
1369 } 1813 }
1370 1814
1371 err = brcmf_set_wep_sharedkey(ndev, sme); 1815 err = brcmf_set_sharedkey(ndev, sme);
1372 if (err) { 1816 if (err) {
1373 WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err); 1817 WL_ERR("brcmf_set_sharedkey failed (%d)\n", err);
1374 goto done; 1818 goto done;
1375 } 1819 }
1376 1820
1377 memset(&join_params, 0, sizeof(join_params)); 1821 memset(&join_params, 0, sizeof(join_params));
1378 join_params_size = sizeof(join_params.ssid_le); 1822 join_params_size = sizeof(join_params.ssid_le);
1379 1823
1380 ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len); 1824 profile->ssid.SSID_len = min_t(u32,
1381 memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len); 1825 sizeof(ssid.SSID), (u32)sme->ssid_len);
1382 memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len); 1826 memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
1383 join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); 1827 memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
1384 brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID); 1828 join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
1385 1829
1386 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); 1830 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
1387 1831
@@ -1389,7 +1833,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1389 WL_CONN("ssid \"%s\", len (%d)\n", 1833 WL_CONN("ssid \"%s\", len (%d)\n",
1390 ssid.SSID, ssid.SSID_len); 1834 ssid.SSID, ssid.SSID_len);
1391 1835
1392 brcmf_ch_to_chanspec(cfg_priv->channel, 1836 brcmf_ch_to_chanspec(cfg->channel,
1393 &join_params, &join_params_size); 1837 &join_params, &join_params_size);
1394 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, 1838 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
1395 &join_params, join_params_size); 1839 &join_params, join_params_size);
@@ -1398,7 +1842,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1398 1842
1399done: 1843done:
1400 if (err) 1844 if (err)
1401 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1845 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
1402 WL_TRACE("Exit\n"); 1846 WL_TRACE("Exit\n");
1403 return err; 1847 return err;
1404} 1848}
@@ -1407,7 +1851,8 @@ static s32
1407brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, 1851brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
1408 u16 reason_code) 1852 u16 reason_code)
1409{ 1853{
1410 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1854 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1855 struct brcmf_cfg80211_profile *profile = cfg->profile;
1411 struct brcmf_scb_val_le scbval; 1856 struct brcmf_scb_val_le scbval;
1412 s32 err = 0; 1857 s32 err = 0;
1413 1858
@@ -1415,16 +1860,16 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
1415 if (!check_sys_up(wiphy)) 1860 if (!check_sys_up(wiphy))
1416 return -EIO; 1861 return -EIO;
1417 1862
1418 clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 1863 clear_bit(WL_STATUS_CONNECTED, &cfg->status);
1419 1864
1420 memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN); 1865 memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
1421 scbval.val = cpu_to_le32(reason_code); 1866 scbval.val = cpu_to_le32(reason_code);
1422 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval, 1867 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
1423 sizeof(struct brcmf_scb_val_le)); 1868 sizeof(struct brcmf_scb_val_le));
1424 if (err) 1869 if (err)
1425 WL_ERR("error (%d)\n", err); 1870 WL_ERR("error (%d)\n", err);
1426 1871
1427 cfg_priv->link_up = false; 1872 cfg->link_up = false;
1428 1873
1429 WL_TRACE("Exit\n"); 1874 WL_TRACE("Exit\n");
1430 return err; 1875 return err;
@@ -1435,8 +1880,8 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1435 enum nl80211_tx_power_setting type, s32 mbm) 1880 enum nl80211_tx_power_setting type, s32 mbm)
1436{ 1881{
1437 1882
1438 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1883 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1439 struct net_device *ndev = cfg_to_ndev(cfg_priv); 1884 struct net_device *ndev = cfg_to_ndev(cfg);
1440 u16 txpwrmw; 1885 u16 txpwrmw;
1441 s32 err = 0; 1886 s32 err = 0;
1442 s32 disable = 0; 1887 s32 disable = 0;
@@ -1472,7 +1917,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1472 (s32) (brcmf_mw_to_qdbm(txpwrmw))); 1917 (s32) (brcmf_mw_to_qdbm(txpwrmw)));
1473 if (err) 1918 if (err)
1474 WL_ERR("qtxpower error (%d)\n", err); 1919 WL_ERR("qtxpower error (%d)\n", err);
1475 cfg_priv->conf->tx_power = dbm; 1920 cfg->conf->tx_power = dbm;
1476 1921
1477done: 1922done:
1478 WL_TRACE("Exit\n"); 1923 WL_TRACE("Exit\n");
@@ -1481,8 +1926,8 @@ done:
1481 1926
1482static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) 1927static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
1483{ 1928{
1484 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1929 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1485 struct net_device *ndev = cfg_to_ndev(cfg_priv); 1930 struct net_device *ndev = cfg_to_ndev(cfg);
1486 s32 txpwrdbm; 1931 s32 txpwrdbm;
1487 u8 result; 1932 u8 result;
1488 s32 err = 0; 1933 s32 err = 0;
@@ -1509,16 +1954,19 @@ static s32
1509brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, 1954brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
1510 u8 key_idx, bool unicast, bool multicast) 1955 u8 key_idx, bool unicast, bool multicast)
1511{ 1956{
1957 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1512 u32 index; 1958 u32 index;
1513 u32 wsec; 1959 u32 wsec;
1514 s32 err = 0; 1960 s32 err = 0;
1961 s32 bssidx;
1515 1962
1516 WL_TRACE("Enter\n"); 1963 WL_TRACE("Enter\n");
1517 WL_CONN("key index (%d)\n", key_idx); 1964 WL_CONN("key index (%d)\n", key_idx);
1518 if (!check_sys_up(wiphy)) 1965 if (!check_sys_up(wiphy))
1519 return -EIO; 1966 return -EIO;
1520 1967
1521 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); 1968 bssidx = brcmf_find_bssidx(cfg, ndev);
1969 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1522 if (err) { 1970 if (err) {
1523 WL_ERR("WLC_GET_WSEC error (%d)\n", err); 1971 WL_ERR("WLC_GET_WSEC error (%d)\n", err);
1524 goto done; 1972 goto done;
@@ -1541,9 +1989,11 @@ static s32
1541brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, 1989brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1542 u8 key_idx, const u8 *mac_addr, struct key_params *params) 1990 u8 key_idx, const u8 *mac_addr, struct key_params *params)
1543{ 1991{
1992 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1544 struct brcmf_wsec_key key; 1993 struct brcmf_wsec_key key;
1545 struct brcmf_wsec_key_le key_le; 1994 struct brcmf_wsec_key_le key_le;
1546 s32 err = 0; 1995 s32 err = 0;
1996 s32 bssidx;
1547 1997
1548 memset(&key, 0, sizeof(key)); 1998 memset(&key, 0, sizeof(key));
1549 key.index = (u32) key_idx; 1999 key.index = (u32) key_idx;
@@ -1552,12 +2002,13 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1552 if (!is_multicast_ether_addr(mac_addr)) 2002 if (!is_multicast_ether_addr(mac_addr))
1553 memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN); 2003 memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
1554 key.len = (u32) params->key_len; 2004 key.len = (u32) params->key_len;
2005 bssidx = brcmf_find_bssidx(cfg, ndev);
1555 /* check for key index change */ 2006 /* check for key index change */
1556 if (key.len == 0) { 2007 if (key.len == 0) {
1557 /* key delete */ 2008 /* key delete */
1558 err = send_key_to_dongle(ndev, &key); 2009 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1559 if (err) 2010 if (err)
1560 return err; 2011 WL_ERR("key delete error (%d)\n", err);
1561 } else { 2012 } else {
1562 if (key.len > sizeof(key.data)) { 2013 if (key.len > sizeof(key.data)) {
1563 WL_ERR("Invalid key length (%d)\n", key.len); 2014 WL_ERR("Invalid key length (%d)\n", key.len);
@@ -1613,12 +2064,12 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1613 convert_key_from_CPU(&key, &key_le); 2064 convert_key_from_CPU(&key, &key_le);
1614 2065
1615 brcmf_netdev_wait_pend8021x(ndev); 2066 brcmf_netdev_wait_pend8021x(ndev);
1616 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, 2067 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
1617 sizeof(key_le)); 2068 sizeof(key_le),
1618 if (err) { 2069 cfg->extra_buf,
1619 WL_ERR("WLC_SET_KEY error (%d)\n", err); 2070 WL_EXTRA_BUF_MAX, bssidx);
1620 return err; 2071 if (err)
1621 } 2072 WL_ERR("wsec_key error (%d)\n", err);
1622 } 2073 }
1623 return err; 2074 return err;
1624} 2075}
@@ -1628,11 +2079,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1628 u8 key_idx, bool pairwise, const u8 *mac_addr, 2079 u8 key_idx, bool pairwise, const u8 *mac_addr,
1629 struct key_params *params) 2080 struct key_params *params)
1630{ 2081{
2082 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1631 struct brcmf_wsec_key key; 2083 struct brcmf_wsec_key key;
1632 s32 val; 2084 s32 val;
1633 s32 wsec; 2085 s32 wsec;
1634 s32 err = 0; 2086 s32 err = 0;
1635 u8 keybuf[8]; 2087 u8 keybuf[8];
2088 s32 bssidx;
1636 2089
1637 WL_TRACE("Enter\n"); 2090 WL_TRACE("Enter\n");
1638 WL_CONN("key index (%d)\n", key_idx); 2091 WL_CONN("key index (%d)\n", key_idx);
@@ -1659,25 +2112,33 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1659 switch (params->cipher) { 2112 switch (params->cipher) {
1660 case WLAN_CIPHER_SUITE_WEP40: 2113 case WLAN_CIPHER_SUITE_WEP40:
1661 key.algo = CRYPTO_ALGO_WEP1; 2114 key.algo = CRYPTO_ALGO_WEP1;
2115 val = WEP_ENABLED;
1662 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 2116 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
1663 break; 2117 break;
1664 case WLAN_CIPHER_SUITE_WEP104: 2118 case WLAN_CIPHER_SUITE_WEP104:
1665 key.algo = CRYPTO_ALGO_WEP128; 2119 key.algo = CRYPTO_ALGO_WEP128;
2120 val = WEP_ENABLED;
1666 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); 2121 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
1667 break; 2122 break;
1668 case WLAN_CIPHER_SUITE_TKIP: 2123 case WLAN_CIPHER_SUITE_TKIP:
1669 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 2124 if (cfg->conf->mode != WL_MODE_AP) {
1670 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 2125 WL_CONN("Swapping key\n");
1671 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 2126 memcpy(keybuf, &key.data[24], sizeof(keybuf));
2127 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
2128 memcpy(&key.data[16], keybuf, sizeof(keybuf));
2129 }
1672 key.algo = CRYPTO_ALGO_TKIP; 2130 key.algo = CRYPTO_ALGO_TKIP;
2131 val = TKIP_ENABLED;
1673 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); 2132 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
1674 break; 2133 break;
1675 case WLAN_CIPHER_SUITE_AES_CMAC: 2134 case WLAN_CIPHER_SUITE_AES_CMAC:
1676 key.algo = CRYPTO_ALGO_AES_CCM; 2135 key.algo = CRYPTO_ALGO_AES_CCM;
2136 val = AES_ENABLED;
1677 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); 2137 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
1678 break; 2138 break;
1679 case WLAN_CIPHER_SUITE_CCMP: 2139 case WLAN_CIPHER_SUITE_CCMP:
1680 key.algo = CRYPTO_ALGO_AES_CCM; 2140 key.algo = CRYPTO_ALGO_AES_CCM;
2141 val = AES_ENABLED;
1681 WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); 2142 WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
1682 break; 2143 break;
1683 default: 2144 default:
@@ -1686,28 +2147,23 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1686 goto done; 2147 goto done;
1687 } 2148 }
1688 2149
1689 err = send_key_to_dongle(ndev, &key); /* Set the new key/index */ 2150 bssidx = brcmf_find_bssidx(cfg, ndev);
2151 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1690 if (err) 2152 if (err)
1691 goto done; 2153 goto done;
1692 2154
1693 val = WEP_ENABLED; 2155 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1694 err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
1695 if (err) { 2156 if (err) {
1696 WL_ERR("get wsec error (%d)\n", err); 2157 WL_ERR("get wsec error (%d)\n", err);
1697 goto done; 2158 goto done;
1698 } 2159 }
1699 wsec &= ~(WEP_ENABLED);
1700 wsec |= val; 2160 wsec |= val;
1701 err = brcmf_dev_intvar_set(ndev, "wsec", wsec); 2161 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
1702 if (err) { 2162 if (err) {
1703 WL_ERR("set wsec error (%d)\n", err); 2163 WL_ERR("set wsec error (%d)\n", err);
1704 goto done; 2164 goto done;
1705 } 2165 }
1706 2166
1707 val = 1; /* assume shared key. otherwise 0 */
1708 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
1709 if (err)
1710 WL_ERR("WLC_SET_AUTH error (%d)\n", err);
1711done: 2167done:
1712 WL_TRACE("Exit\n"); 2168 WL_TRACE("Exit\n");
1713 return err; 2169 return err;
@@ -1717,10 +2173,10 @@ static s32
1717brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 2173brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
1718 u8 key_idx, bool pairwise, const u8 *mac_addr) 2174 u8 key_idx, bool pairwise, const u8 *mac_addr)
1719{ 2175{
2176 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1720 struct brcmf_wsec_key key; 2177 struct brcmf_wsec_key key;
1721 s32 err = 0; 2178 s32 err = 0;
1722 s32 val; 2179 s32 bssidx;
1723 s32 wsec;
1724 2180
1725 WL_TRACE("Enter\n"); 2181 WL_TRACE("Enter\n");
1726 if (!check_sys_up(wiphy)) 2182 if (!check_sys_up(wiphy))
@@ -1735,7 +2191,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
1735 WL_CONN("key index (%d)\n", key_idx); 2191 WL_CONN("key index (%d)\n", key_idx);
1736 2192
1737 /* Set the new key/index */ 2193 /* Set the new key/index */
1738 err = send_key_to_dongle(ndev, &key); 2194 bssidx = brcmf_find_bssidx(cfg, ndev);
2195 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1739 if (err) { 2196 if (err) {
1740 if (err == -EINVAL) { 2197 if (err == -EINVAL) {
1741 if (key.index >= DOT11_MAX_DEFAULT_KEYS) 2198 if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -1744,35 +2201,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
1744 } 2201 }
1745 /* Ignore this error, may happen during DISASSOC */ 2202 /* Ignore this error, may happen during DISASSOC */
1746 err = -EAGAIN; 2203 err = -EAGAIN;
1747 goto done;
1748 } 2204 }
1749 2205
1750 val = 0;
1751 err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
1752 if (err) {
1753 WL_ERR("get wsec error (%d)\n", err);
1754 /* Ignore this error, may happen during DISASSOC */
1755 err = -EAGAIN;
1756 goto done;
1757 }
1758 wsec &= ~(WEP_ENABLED);
1759 wsec |= val;
1760 err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
1761 if (err) {
1762 WL_ERR("set wsec error (%d)\n", err);
1763 /* Ignore this error, may happen during DISASSOC */
1764 err = -EAGAIN;
1765 goto done;
1766 }
1767
1768 val = 0; /* assume open key. otherwise 1 */
1769 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
1770 if (err) {
1771 WL_ERR("WLC_SET_AUTH error (%d)\n", err);
1772 /* Ignore this error, may happen during DISASSOC */
1773 err = -EAGAIN;
1774 }
1775done:
1776 WL_TRACE("Exit\n"); 2206 WL_TRACE("Exit\n");
1777 return err; 2207 return err;
1778} 2208}
@@ -1783,10 +2213,12 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
1783 void (*callback) (void *cookie, struct key_params * params)) 2213 void (*callback) (void *cookie, struct key_params * params))
1784{ 2214{
1785 struct key_params params; 2215 struct key_params params;
1786 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 2216 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2217 struct brcmf_cfg80211_profile *profile = cfg->profile;
1787 struct brcmf_cfg80211_security *sec; 2218 struct brcmf_cfg80211_security *sec;
1788 s32 wsec; 2219 s32 wsec;
1789 s32 err = 0; 2220 s32 err = 0;
2221 s32 bssidx;
1790 2222
1791 WL_TRACE("Enter\n"); 2223 WL_TRACE("Enter\n");
1792 WL_CONN("key index (%d)\n", key_idx); 2224 WL_CONN("key index (%d)\n", key_idx);
@@ -1795,16 +2227,17 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
1795 2227
1796 memset(&params, 0, sizeof(params)); 2228 memset(&params, 0, sizeof(params));
1797 2229
1798 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); 2230 bssidx = brcmf_find_bssidx(cfg, ndev);
2231 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1799 if (err) { 2232 if (err) {
1800 WL_ERR("WLC_GET_WSEC error (%d)\n", err); 2233 WL_ERR("WLC_GET_WSEC error (%d)\n", err);
1801 /* Ignore this error, may happen during DISASSOC */ 2234 /* Ignore this error, may happen during DISASSOC */
1802 err = -EAGAIN; 2235 err = -EAGAIN;
1803 goto done; 2236 goto done;
1804 } 2237 }
1805 switch (wsec) { 2238 switch (wsec & ~SES_OW_ENABLED) {
1806 case WEP_ENABLED: 2239 case WEP_ENABLED:
1807 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 2240 sec = &profile->sec;
1808 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { 2241 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
1809 params.cipher = WLAN_CIPHER_SUITE_WEP40; 2242 params.cipher = WLAN_CIPHER_SUITE_WEP40;
1810 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 2243 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
@@ -1844,53 +2277,73 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
1844 2277
1845static s32 2278static s32
1846brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, 2279brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
1847 u8 *mac, struct station_info *sinfo) 2280 u8 *mac, struct station_info *sinfo)
1848{ 2281{
1849 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 2282 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2283 struct brcmf_cfg80211_profile *profile = cfg->profile;
1850 struct brcmf_scb_val_le scb_val; 2284 struct brcmf_scb_val_le scb_val;
1851 int rssi; 2285 int rssi;
1852 s32 rate; 2286 s32 rate;
1853 s32 err = 0; 2287 s32 err = 0;
1854 u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID); 2288 u8 *bssid = profile->bssid;
2289 struct brcmf_sta_info_le *sta_info_le;
1855 2290
1856 WL_TRACE("Enter\n"); 2291 WL_TRACE("Enter, MAC %pM\n", mac);
1857 if (!check_sys_up(wiphy)) 2292 if (!check_sys_up(wiphy))
1858 return -EIO; 2293 return -EIO;
1859 2294
1860 if (memcmp(mac, bssid, ETH_ALEN)) { 2295 if (cfg->conf->mode == WL_MODE_AP) {
1861 WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X" 2296 err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
1862 "wl_bssid-%X:%X:%X:%X:%X:%X\n", 2297 cfg->dcmd_buf,
1863 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], 2298 WL_DCMD_LEN_MAX);
1864 bssid[0], bssid[1], bssid[2], bssid[3], 2299 if (err < 0) {
1865 bssid[4], bssid[5]); 2300 WL_ERR("GET STA INFO failed, %d\n", err);
1866 err = -ENOENT; 2301 goto done;
1867 goto done; 2302 }
1868 } 2303 sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
1869
1870 /* Report the current tx rate */
1871 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
1872 if (err) {
1873 WL_ERR("Could not get rate (%d)\n", err);
1874 } else {
1875 sinfo->filled |= STATION_INFO_TX_BITRATE;
1876 sinfo->txrate.legacy = rate * 5;
1877 WL_CONN("Rate %d Mbps\n", rate / 2);
1878 }
1879 2304
1880 if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { 2305 sinfo->filled = STATION_INFO_INACTIVE_TIME;
1881 memset(&scb_val, 0, sizeof(scb_val)); 2306 sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
1882 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val, 2307 if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
1883 sizeof(struct brcmf_scb_val_le)); 2308 sinfo->filled |= STATION_INFO_CONNECTED_TIME;
2309 sinfo->connected_time = le32_to_cpu(sta_info_le->in);
2310 }
2311 WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
2312 sinfo->inactive_time, sinfo->connected_time);
2313 } else if (cfg->conf->mode == WL_MODE_BSS) {
2314 if (memcmp(mac, bssid, ETH_ALEN)) {
2315 WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
2316 mac, bssid);
2317 err = -ENOENT;
2318 goto done;
2319 }
2320 /* Report the current tx rate */
2321 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
1884 if (err) { 2322 if (err) {
1885 WL_ERR("Could not get rssi (%d)\n", err); 2323 WL_ERR("Could not get rate (%d)\n", err);
2324 goto done;
1886 } else { 2325 } else {
1887 rssi = le32_to_cpu(scb_val.val); 2326 sinfo->filled |= STATION_INFO_TX_BITRATE;
1888 sinfo->filled |= STATION_INFO_SIGNAL; 2327 sinfo->txrate.legacy = rate * 5;
1889 sinfo->signal = rssi; 2328 WL_CONN("Rate %d Mbps\n", rate / 2);
1890 WL_CONN("RSSI %d dBm\n", rssi);
1891 } 2329 }
1892 }
1893 2330
2331 if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
2332 memset(&scb_val, 0, sizeof(scb_val));
2333 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
2334 sizeof(scb_val));
2335 if (err) {
2336 WL_ERR("Could not get rssi (%d)\n", err);
2337 goto done;
2338 } else {
2339 rssi = le32_to_cpu(scb_val.val);
2340 sinfo->filled |= STATION_INFO_SIGNAL;
2341 sinfo->signal = rssi;
2342 WL_CONN("RSSI %d dBm\n", rssi);
2343 }
2344 }
2345 } else
2346 err = -EPERM;
1894done: 2347done:
1895 WL_TRACE("Exit\n"); 2348 WL_TRACE("Exit\n");
1896 return err; 2349 return err;
@@ -1902,7 +2355,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
1902{ 2355{
1903 s32 pm; 2356 s32 pm;
1904 s32 err = 0; 2357 s32 err = 0;
1905 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 2358 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1906 2359
1907 WL_TRACE("Enter\n"); 2360 WL_TRACE("Enter\n");
1908 2361
@@ -1910,14 +2363,13 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
1910 * Powersave enable/disable request is coming from the 2363 * Powersave enable/disable request is coming from the
1911 * cfg80211 even before the interface is up. In that 2364 * cfg80211 even before the interface is up. In that
1912 * scenario, driver will be storing the power save 2365 * scenario, driver will be storing the power save
1913 * preference in cfg_priv struct to apply this to 2366 * preference in cfg struct to apply this to
1914 * FW later while initializing the dongle 2367 * FW later while initializing the dongle
1915 */ 2368 */
1916 cfg_priv->pwr_save = enabled; 2369 cfg->pwr_save = enabled;
1917 if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { 2370 if (!test_bit(WL_STATUS_READY, &cfg->status)) {
1918 2371
1919 WL_INFO("Device is not ready," 2372 WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
1920 "storing the value in cfg_priv struct\n");
1921 goto done; 2373 goto done;
1922 } 2374 }
1923 2375
@@ -1995,10 +2447,10 @@ done:
1995 return err; 2447 return err;
1996} 2448}
1997 2449
1998static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, 2450static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
1999 struct brcmf_bss_info_le *bi) 2451 struct brcmf_bss_info_le *bi)
2000{ 2452{
2001 struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); 2453 struct wiphy *wiphy = cfg_to_wiphy(cfg);
2002 struct ieee80211_channel *notify_channel; 2454 struct ieee80211_channel *notify_channel;
2003 struct cfg80211_bss *bss; 2455 struct cfg80211_bss *bss;
2004 struct ieee80211_supported_band *band; 2456 struct ieee80211_supported_band *band;
@@ -2062,14 +2514,14 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
2062 le32_to_cpu(bss->length)); 2514 le32_to_cpu(bss->length));
2063} 2515}
2064 2516
2065static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) 2517static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
2066{ 2518{
2067 struct brcmf_scan_results *bss_list; 2519 struct brcmf_scan_results *bss_list;
2068 struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ 2520 struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
2069 s32 err = 0; 2521 s32 err = 0;
2070 int i; 2522 int i;
2071 2523
2072 bss_list = cfg_priv->bss_list; 2524 bss_list = cfg->bss_list;
2073 if (bss_list->version != BRCMF_BSS_INFO_VERSION) { 2525 if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
2074 WL_ERR("Version %d != WL_BSS_INFO_VERSION\n", 2526 WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
2075 bss_list->version); 2527 bss_list->version);
@@ -2078,17 +2530,17 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
2078 WL_SCAN("scanned AP count (%d)\n", bss_list->count); 2530 WL_SCAN("scanned AP count (%d)\n", bss_list->count);
2079 for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { 2531 for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
2080 bi = next_bss_le(bss_list, bi); 2532 bi = next_bss_le(bss_list, bi);
2081 err = brcmf_inform_single_bss(cfg_priv, bi); 2533 err = brcmf_inform_single_bss(cfg, bi);
2082 if (err) 2534 if (err)
2083 break; 2535 break;
2084 } 2536 }
2085 return err; 2537 return err;
2086} 2538}
2087 2539
2088static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, 2540static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2089 struct net_device *ndev, const u8 *bssid) 2541 struct net_device *ndev, const u8 *bssid)
2090{ 2542{
2091 struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); 2543 struct wiphy *wiphy = cfg_to_wiphy(cfg);
2092 struct ieee80211_channel *notify_channel; 2544 struct ieee80211_channel *notify_channel;
2093 struct brcmf_bss_info_le *bi = NULL; 2545 struct brcmf_bss_info_le *bi = NULL;
2094 struct ieee80211_supported_band *band; 2546 struct ieee80211_supported_band *band;
@@ -2163,9 +2615,9 @@ CleanUp:
2163 return err; 2615 return err;
2164} 2616}
2165 2617
2166static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv) 2618static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg)
2167{ 2619{
2168 return cfg_priv->conf->mode == WL_MODE_IBSS; 2620 return cfg->conf->mode == WL_MODE_IBSS;
2169} 2621}
2170 2622
2171/* 2623/*
@@ -2182,22 +2634,62 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
2182 totlen = buflen; 2634 totlen = buflen;
2183 2635
2184 /* find tagged parameter */ 2636 /* find tagged parameter */
2185 while (totlen >= 2) { 2637 while (totlen >= TLV_HDR_LEN) {
2186 int len = elt->len; 2638 int len = elt->len;
2187 2639
2188 /* validate remaining totlen */ 2640 /* validate remaining totlen */
2189 if ((elt->id == key) && (totlen >= (len + 2))) 2641 if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
2190 return elt; 2642 return elt;
2191 2643
2192 elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2)); 2644 elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
2193 totlen -= (len + 2); 2645 totlen -= (len + TLV_HDR_LEN);
2194 } 2646 }
2195 2647
2196 return NULL; 2648 return NULL;
2197} 2649}
2198 2650
2199static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) 2651/* Is any of the tlvs the expected entry? If
2652 * not update the tlvs buffer pointer/length.
2653 */
2654static bool
2655brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
2656 u8 *oui, u32 oui_len, u8 type)
2657{
2658 /* If the contents match the OUI and the type */
2659 if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
2660 !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
2661 type == ie[TLV_BODY_OFF + oui_len]) {
2662 return true;
2663 }
2664
2665 if (tlvs == NULL)
2666 return false;
2667 /* point to the next ie */
2668 ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
2669 /* calculate the length of the rest of the buffer */
2670 *tlvs_len -= (int)(ie - *tlvs);
2671 /* update the pointer to the start of the buffer */
2672 *tlvs = ie;
2673
2674 return false;
2675}
2676
2677struct brcmf_vs_tlv *
2678brcmf_find_wpaie(u8 *parse, u32 len)
2679{
2680 struct brcmf_tlv *ie;
2681
2682 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
2683 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
2684 WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
2685 return (struct brcmf_vs_tlv *)ie;
2686 }
2687 return NULL;
2688}
2689
2690static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
2200{ 2691{
2692 struct brcmf_cfg80211_profile *profile = cfg->profile;
2201 struct brcmf_bss_info_le *bi; 2693 struct brcmf_bss_info_le *bi;
2202 struct brcmf_ssid *ssid; 2694 struct brcmf_ssid *ssid;
2203 struct brcmf_tlv *tim; 2695 struct brcmf_tlv *tim;
@@ -2208,21 +2700,21 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
2208 s32 err = 0; 2700 s32 err = 0;
2209 2701
2210 WL_TRACE("Enter\n"); 2702 WL_TRACE("Enter\n");
2211 if (brcmf_is_ibssmode(cfg_priv)) 2703 if (brcmf_is_ibssmode(cfg))
2212 return err; 2704 return err;
2213 2705
2214 ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID); 2706 ssid = &profile->ssid;
2215 2707
2216 *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); 2708 *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
2217 err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO, 2709 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
2218 cfg_priv->extra_buf, WL_EXTRA_BUF_MAX); 2710 cfg->extra_buf, WL_EXTRA_BUF_MAX);
2219 if (err) { 2711 if (err) {
2220 WL_ERR("Could not get bss info %d\n", err); 2712 WL_ERR("Could not get bss info %d\n", err);
2221 goto update_bss_info_out; 2713 goto update_bss_info_out;
2222 } 2714 }
2223 2715
2224 bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4); 2716 bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
2225 err = brcmf_inform_single_bss(cfg_priv, bi); 2717 err = brcmf_inform_single_bss(cfg, bi);
2226 if (err) 2718 if (err)
2227 goto update_bss_info_out; 2719 goto update_bss_info_out;
2228 2720
@@ -2240,7 +2732,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
2240 * so we speficially query dtim information to dongle. 2732 * so we speficially query dtim information to dongle.
2241 */ 2733 */
2242 u32 var; 2734 u32 var;
2243 err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv), 2735 err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
2244 "dtim_assoc", &var); 2736 "dtim_assoc", &var);
2245 if (err) { 2737 if (err) {
2246 WL_ERR("wl dtim_assoc failed (%d)\n", err); 2738 WL_ERR("wl dtim_assoc failed (%d)\n", err);
@@ -2249,20 +2741,22 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
2249 dtim_period = (u8)var; 2741 dtim_period = (u8)var;
2250 } 2742 }
2251 2743
2252 brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT); 2744 profile->beacon_interval = beacon_interval;
2253 brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD); 2745 profile->dtim_period = dtim_period;
2254 2746
2255update_bss_info_out: 2747update_bss_info_out:
2256 WL_TRACE("Exit"); 2748 WL_TRACE("Exit");
2257 return err; 2749 return err;
2258} 2750}
2259 2751
2260static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv) 2752static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
2261{ 2753{
2262 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 2754 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2755 struct escan_info *escan = &cfg->escan_info;
2263 struct brcmf_ssid ssid; 2756 struct brcmf_ssid ssid;
2264 2757
2265 if (cfg_priv->iscan_on) { 2758 set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
2759 if (cfg->iscan_on) {
2266 iscan->state = WL_ISCAN_STATE_IDLE; 2760 iscan->state = WL_ISCAN_STATE_IDLE;
2267 2761
2268 if (iscan->timer_on) { 2762 if (iscan->timer_on) {
@@ -2275,27 +2769,40 @@ static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
2275 /* Abort iscan running in FW */ 2769 /* Abort iscan running in FW */
2276 memset(&ssid, 0, sizeof(ssid)); 2770 memset(&ssid, 0, sizeof(ssid));
2277 brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT); 2771 brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
2772
2773 if (cfg->scan_request) {
2774 /* Indidate scan abort to cfg80211 layer */
2775 WL_INFO("Terminating scan in progress\n");
2776 cfg80211_scan_done(cfg->scan_request, true);
2777 cfg->scan_request = NULL;
2778 }
2779 }
2780 if (cfg->escan_on && cfg->scan_request) {
2781 escan->escan_state = WL_ESCAN_STATE_IDLE;
2782 brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
2278 } 2783 }
2784 clear_bit(WL_STATUS_SCANNING, &cfg->status);
2785 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
2279} 2786}
2280 2787
2281static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan, 2788static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
2282 bool aborted) 2789 bool aborted)
2283{ 2790{
2284 struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); 2791 struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
2285 struct net_device *ndev = cfg_to_ndev(cfg_priv); 2792 struct net_device *ndev = cfg_to_ndev(cfg);
2286 2793
2287 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { 2794 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
2288 WL_ERR("Scan complete while device not scanning\n"); 2795 WL_ERR("Scan complete while device not scanning\n");
2289 return; 2796 return;
2290 } 2797 }
2291 if (cfg_priv->scan_request) { 2798 if (cfg->scan_request) {
2292 WL_SCAN("ISCAN Completed scan: %s\n", 2799 WL_SCAN("ISCAN Completed scan: %s\n",
2293 aborted ? "Aborted" : "Done"); 2800 aborted ? "Aborted" : "Done");
2294 cfg80211_scan_done(cfg_priv->scan_request, aborted); 2801 cfg80211_scan_done(cfg->scan_request, aborted);
2295 brcmf_set_mpc(ndev, 1); 2802 brcmf_set_mpc(ndev, 1);
2296 cfg_priv->scan_request = NULL; 2803 cfg->scan_request = NULL;
2297 } 2804 }
2298 cfg_priv->iscan_kickstart = false; 2805 cfg->iscan_kickstart = false;
2299} 2806}
2300 2807
2301static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan) 2808static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
@@ -2348,21 +2855,21 @@ brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
2348 return err; 2855 return err;
2349} 2856}
2350 2857
2351static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv) 2858static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
2352{ 2859{
2353 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2860 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2354 s32 err = 0; 2861 s32 err = 0;
2355 2862
2356 iscan->state = WL_ISCAN_STATE_IDLE; 2863 iscan->state = WL_ISCAN_STATE_IDLE;
2357 brcmf_inform_bss(cfg_priv); 2864 brcmf_inform_bss(cfg);
2358 brcmf_notify_iscan_complete(iscan, false); 2865 brcmf_notify_iscan_complete(iscan, false);
2359 2866
2360 return err; 2867 return err;
2361} 2868}
2362 2869
2363static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv) 2870static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
2364{ 2871{
2365 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2872 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2366 s32 err = 0; 2873 s32 err = 0;
2367 2874
2368 /* Reschedule the timer */ 2875 /* Reschedule the timer */
@@ -2372,12 +2879,12 @@ static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
2372 return err; 2879 return err;
2373} 2880}
2374 2881
2375static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv) 2882static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
2376{ 2883{
2377 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2884 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2378 s32 err = 0; 2885 s32 err = 0;
2379 2886
2380 brcmf_inform_bss(cfg_priv); 2887 brcmf_inform_bss(cfg);
2381 brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE); 2888 brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
2382 /* Reschedule the timer */ 2889 /* Reschedule the timer */
2383 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); 2890 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -2386,9 +2893,9 @@ static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
2386 return err; 2893 return err;
2387} 2894}
2388 2895
2389static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv) 2896static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
2390{ 2897{
2391 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2898 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2392 s32 err = 0; 2899 s32 err = 0;
2393 2900
2394 iscan->state = WL_ISCAN_STATE_IDLE; 2901 iscan->state = WL_ISCAN_STATE_IDLE;
@@ -2402,7 +2909,7 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
2402 struct brcmf_cfg80211_iscan_ctrl *iscan = 2909 struct brcmf_cfg80211_iscan_ctrl *iscan =
2403 container_of(work, struct brcmf_cfg80211_iscan_ctrl, 2910 container_of(work, struct brcmf_cfg80211_iscan_ctrl,
2404 work); 2911 work);
2405 struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); 2912 struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
2406 struct brcmf_cfg80211_iscan_eloop *el = &iscan->el; 2913 struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
2407 u32 status = BRCMF_SCAN_RESULTS_PARTIAL; 2914 u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
2408 2915
@@ -2411,12 +2918,12 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
2411 iscan->timer_on = 0; 2918 iscan->timer_on = 0;
2412 } 2919 }
2413 2920
2414 if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) { 2921 if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
2415 status = BRCMF_SCAN_RESULTS_ABORTED; 2922 status = BRCMF_SCAN_RESULTS_ABORTED;
2416 WL_ERR("Abort iscan\n"); 2923 WL_ERR("Abort iscan\n");
2417 } 2924 }
2418 2925
2419 el->handler[status](cfg_priv); 2926 el->handler[status](cfg);
2420} 2927}
2421 2928
2422static void brcmf_iscan_timer(unsigned long data) 2929static void brcmf_iscan_timer(unsigned long data)
@@ -2431,11 +2938,11 @@ static void brcmf_iscan_timer(unsigned long data)
2431 } 2938 }
2432} 2939}
2433 2940
2434static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv) 2941static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
2435{ 2942{
2436 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 2943 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2437 2944
2438 if (cfg_priv->iscan_on) { 2945 if (cfg->iscan_on) {
2439 iscan->state = WL_ISCAN_STATE_IDLE; 2946 iscan->state = WL_ISCAN_STATE_IDLE;
2440 INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler); 2947 INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
2441 } 2948 }
@@ -2453,26 +2960,192 @@ static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
2453 el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted; 2960 el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
2454} 2961}
2455 2962
2456static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv) 2963static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
2457{ 2964{
2458 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 2965 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2459 int err = 0; 2966 int err = 0;
2460 2967
2461 if (cfg_priv->iscan_on) { 2968 if (cfg->iscan_on) {
2462 iscan->ndev = cfg_to_ndev(cfg_priv); 2969 iscan->ndev = cfg_to_ndev(cfg);
2463 brcmf_init_iscan_eloop(&iscan->el); 2970 brcmf_init_iscan_eloop(&iscan->el);
2464 iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS; 2971 iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
2465 init_timer(&iscan->timer); 2972 init_timer(&iscan->timer);
2466 iscan->timer.data = (unsigned long) iscan; 2973 iscan->timer.data = (unsigned long) iscan;
2467 iscan->timer.function = brcmf_iscan_timer; 2974 iscan->timer.function = brcmf_iscan_timer;
2468 err = brcmf_invoke_iscan(cfg_priv); 2975 err = brcmf_invoke_iscan(cfg);
2469 if (!err) 2976 if (!err)
2470 iscan->data = cfg_priv; 2977 iscan->data = cfg;
2471 } 2978 }
2472 2979
2473 return err; 2980 return err;
2474} 2981}
2475 2982
2983static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
2984{
2985 struct brcmf_cfg80211_info *cfg =
2986 container_of(work, struct brcmf_cfg80211_info,
2987 escan_timeout_work);
2988
2989 brcmf_notify_escan_complete(cfg,
2990 cfg->escan_info.ndev, true, true);
2991}
2992
2993static void brcmf_escan_timeout(unsigned long data)
2994{
2995 struct brcmf_cfg80211_info *cfg =
2996 (struct brcmf_cfg80211_info *)data;
2997
2998 if (cfg->scan_request) {
2999 WL_ERR("timer expired\n");
3000 if (cfg->escan_on)
3001 schedule_work(&cfg->escan_timeout_work);
3002 }
3003}
3004
3005static s32
3006brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
3007 struct brcmf_bss_info_le *bss_info_le)
3008{
3009 if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
3010 (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) ==
3011 CHSPEC_BAND(le16_to_cpu(bss->chanspec))) &&
3012 bss_info_le->SSID_len == bss->SSID_len &&
3013 !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
3014 if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
3015 (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
3016 s16 bss_rssi = le16_to_cpu(bss->RSSI);
3017 s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
3018
3019 /* preserve max RSSI if the measurements are
3020 * both on-channel or both off-channel
3021 */
3022 if (bss_info_rssi > bss_rssi)
3023 bss->RSSI = bss_info_le->RSSI;
3024 } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
3025 (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
3026 /* preserve the on-channel rssi measurement
3027 * if the new measurement is off channel
3028 */
3029 bss->RSSI = bss_info_le->RSSI;
3030 bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
3031 }
3032 return 1;
3033 }
3034 return 0;
3035}
3036
3037static s32
3038brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3039 struct net_device *ndev,
3040 const struct brcmf_event_msg *e, void *data)
3041{
3042 s32 status;
3043 s32 err = 0;
3044 struct brcmf_escan_result_le *escan_result_le;
3045 struct brcmf_bss_info_le *bss_info_le;
3046 struct brcmf_bss_info_le *bss = NULL;
3047 u32 bi_length;
3048 struct brcmf_scan_results *list;
3049 u32 i;
3050 bool aborted;
3051
3052 status = be32_to_cpu(e->status);
3053
3054 if (!ndev || !cfg->escan_on ||
3055 !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
3056 WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
3057 ndev, cfg->escan_on,
3058 !test_bit(WL_STATUS_SCANNING, &cfg->status));
3059 return -EPERM;
3060 }
3061
3062 if (status == BRCMF_E_STATUS_PARTIAL) {
3063 WL_SCAN("ESCAN Partial result\n");
3064 escan_result_le = (struct brcmf_escan_result_le *) data;
3065 if (!escan_result_le) {
3066 WL_ERR("Invalid escan result (NULL pointer)\n");
3067 goto exit;
3068 }
3069 if (!cfg->scan_request) {
3070 WL_SCAN("result without cfg80211 request\n");
3071 goto exit;
3072 }
3073
3074 if (le16_to_cpu(escan_result_le->bss_count) != 1) {
3075 WL_ERR("Invalid bss_count %d: ignoring\n",
3076 escan_result_le->bss_count);
3077 goto exit;
3078 }
3079 bss_info_le = &escan_result_le->bss_info_le;
3080
3081 bi_length = le32_to_cpu(bss_info_le->length);
3082 if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
3083 WL_ESCAN_RESULTS_FIXED_SIZE)) {
3084 WL_ERR("Invalid bss_info length %d: ignoring\n",
3085 bi_length);
3086 goto exit;
3087 }
3088
3089 if (!(cfg_to_wiphy(cfg)->interface_modes &
3090 BIT(NL80211_IFTYPE_ADHOC))) {
3091 if (le16_to_cpu(bss_info_le->capability) &
3092 WLAN_CAPABILITY_IBSS) {
3093 WL_ERR("Ignoring IBSS result\n");
3094 goto exit;
3095 }
3096 }
3097
3098 list = (struct brcmf_scan_results *)
3099 cfg->escan_info.escan_buf;
3100 if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
3101 WL_ERR("Buffer is too small: ignoring\n");
3102 goto exit;
3103 }
3104
3105 for (i = 0; i < list->count; i++) {
3106 bss = bss ? (struct brcmf_bss_info_le *)
3107 ((unsigned char *)bss +
3108 le32_to_cpu(bss->length)) : list->bss_info_le;
3109 if (brcmf_compare_update_same_bss(bss, bss_info_le))
3110 goto exit;
3111 }
3112 memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
3113 bss_info_le, bi_length);
3114 list->version = le32_to_cpu(bss_info_le->version);
3115 list->buflen += bi_length;
3116 list->count++;
3117 } else {
3118 cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
3119 if (cfg->scan_request) {
3120 cfg->bss_list = (struct brcmf_scan_results *)
3121 cfg->escan_info.escan_buf;
3122 brcmf_inform_bss(cfg);
3123 aborted = status != BRCMF_E_STATUS_SUCCESS;
3124 brcmf_notify_escan_complete(cfg, ndev, aborted,
3125 false);
3126 } else
3127 WL_ERR("Unexpected scan result 0x%x\n", status);
3128 }
3129exit:
3130 return err;
3131}
3132
3133static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
3134{
3135
3136 if (cfg->escan_on) {
3137 cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
3138 brcmf_cfg80211_escan_handler;
3139 cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
3140 /* Init scan_timeout timer */
3141 init_timer(&cfg->escan_timeout);
3142 cfg->escan_timeout.data = (unsigned long) cfg;
3143 cfg->escan_timeout.function = brcmf_escan_timeout;
3144 INIT_WORK(&cfg->escan_timeout_work,
3145 brcmf_cfg80211_escan_timeout_worker);
3146 }
3147}
3148
2476static __always_inline void brcmf_delay(u32 ms) 3149static __always_inline void brcmf_delay(u32 ms)
2477{ 3150{
2478 if (ms < 1000 / HZ) { 3151 if (ms < 1000 / HZ) {
@@ -2485,7 +3158,7 @@ static __always_inline void brcmf_delay(u32 ms)
2485 3158
2486static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) 3159static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
2487{ 3160{
2488 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3161 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2489 3162
2490 /* 3163 /*
2491 * Check for WL_STATUS_READY before any function call which 3164 * Check for WL_STATUS_READY before any function call which
@@ -2494,7 +3167,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
2494 */ 3167 */
2495 WL_TRACE("Enter\n"); 3168 WL_TRACE("Enter\n");
2496 3169
2497 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) 3170 if (test_bit(WL_STATUS_READY, &cfg->status))
2498 brcmf_invoke_iscan(wiphy_to_cfg(wiphy)); 3171 brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
2499 3172
2500 WL_TRACE("Exit\n"); 3173 WL_TRACE("Exit\n");
@@ -2504,8 +3177,8 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
2504static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, 3177static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2505 struct cfg80211_wowlan *wow) 3178 struct cfg80211_wowlan *wow)
2506{ 3179{
2507 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3180 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2508 struct net_device *ndev = cfg_to_ndev(cfg_priv); 3181 struct net_device *ndev = cfg_to_ndev(cfg);
2509 3182
2510 WL_TRACE("Enter\n"); 3183 WL_TRACE("Enter\n");
2511 3184
@@ -2519,12 +3192,12 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2519 * While going to suspend if associated with AP disassociate 3192 * While going to suspend if associated with AP disassociate
2520 * from AP to save power while system is in suspended state 3193 * from AP to save power while system is in suspended state
2521 */ 3194 */
2522 if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || 3195 if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
2523 test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && 3196 test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
2524 test_bit(WL_STATUS_READY, &cfg_priv->status)) { 3197 test_bit(WL_STATUS_READY, &cfg->status)) {
2525 WL_INFO("Disassociating from AP" 3198 WL_INFO("Disassociating from AP"
2526 " while entering suspend state\n"); 3199 " while entering suspend state\n");
2527 brcmf_link_down(cfg_priv); 3200 brcmf_link_down(cfg);
2528 3201
2529 /* 3202 /*
2530 * Make sure WPA_Supplicant receives all the event 3203 * Make sure WPA_Supplicant receives all the event
@@ -2534,24 +3207,14 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2534 brcmf_delay(500); 3207 brcmf_delay(500);
2535 } 3208 }
2536 3209
2537 set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); 3210 if (test_bit(WL_STATUS_READY, &cfg->status))
2538 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) 3211 brcmf_abort_scanning(cfg);
2539 brcmf_term_iscan(cfg_priv); 3212 else
2540 3213 clear_bit(WL_STATUS_SCANNING, &cfg->status);
2541 if (cfg_priv->scan_request) {
2542 /* Indidate scan abort to cfg80211 layer */
2543 WL_INFO("Terminating scan in progress\n");
2544 cfg80211_scan_done(cfg_priv->scan_request, true);
2545 cfg_priv->scan_request = NULL;
2546 }
2547 clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
2548 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
2549 3214
2550 /* Turn off watchdog timer */ 3215 /* Turn off watchdog timer */
2551 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { 3216 if (test_bit(WL_STATUS_READY, &cfg->status))
2552 WL_INFO("Enable MPC\n");
2553 brcmf_set_mpc(ndev, 1); 3217 brcmf_set_mpc(ndev, 1);
2554 }
2555 3218
2556 WL_TRACE("Exit\n"); 3219 WL_TRACE("Exit\n");
2557 3220
@@ -2561,14 +3224,14 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2561static __used s32 3224static __used s32
2562brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len) 3225brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
2563{ 3226{
2564 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 3227 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
2565 u32 buflen; 3228 u32 buflen;
2566 3229
2567 buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf, 3230 buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
2568 WL_DCMD_LEN_MAX); 3231 WL_DCMD_LEN_MAX);
2569 BUG_ON(!buflen); 3232 BUG_ON(!buflen);
2570 3233
2571 return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf, 3234 return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
2572 buflen); 3235 buflen);
2573} 3236}
2574 3237
@@ -2576,20 +3239,20 @@ static s32
2576brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf, 3239brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
2577 s32 buf_len) 3240 s32 buf_len)
2578{ 3241{
2579 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 3242 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
2580 u32 len; 3243 u32 len;
2581 s32 err = 0; 3244 s32 err = 0;
2582 3245
2583 len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf, 3246 len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
2584 WL_DCMD_LEN_MAX); 3247 WL_DCMD_LEN_MAX);
2585 BUG_ON(!len); 3248 BUG_ON(!len);
2586 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf, 3249 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
2587 WL_DCMD_LEN_MAX); 3250 WL_DCMD_LEN_MAX);
2588 if (err) { 3251 if (err) {
2589 WL_ERR("error (%d)\n", err); 3252 WL_ERR("error (%d)\n", err);
2590 return err; 3253 return err;
2591 } 3254 }
2592 memcpy(buf, cfg_priv->dcmd_buf, buf_len); 3255 memcpy(buf, cfg->dcmd_buf, buf_len);
2593 3256
2594 return err; 3257 return err;
2595} 3258}
@@ -2622,8 +3285,8 @@ static s32
2622brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, 3285brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2623 struct cfg80211_pmksa *pmksa) 3286 struct cfg80211_pmksa *pmksa)
2624{ 3287{
2625 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3288 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2626 struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids; 3289 struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
2627 s32 err = 0; 3290 s32 err = 0;
2628 int i; 3291 int i;
2629 int pmkid_len; 3292 int pmkid_len;
@@ -2651,7 +3314,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2651 for (i = 0; i < WLAN_PMKID_LEN; i++) 3314 for (i = 0; i < WLAN_PMKID_LEN; i++)
2652 WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]); 3315 WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
2653 3316
2654 err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); 3317 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
2655 3318
2656 WL_TRACE("Exit\n"); 3319 WL_TRACE("Exit\n");
2657 return err; 3320 return err;
@@ -2661,7 +3324,7 @@ static s32
2661brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, 3324brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2662 struct cfg80211_pmksa *pmksa) 3325 struct cfg80211_pmksa *pmksa)
2663{ 3326{
2664 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3327 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2665 struct pmkid_list pmkid; 3328 struct pmkid_list pmkid;
2666 s32 err = 0; 3329 s32 err = 0;
2667 int i, pmkid_len; 3330 int i, pmkid_len;
@@ -2678,30 +3341,30 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2678 for (i = 0; i < WLAN_PMKID_LEN; i++) 3341 for (i = 0; i < WLAN_PMKID_LEN; i++)
2679 WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]); 3342 WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
2680 3343
2681 pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid); 3344 pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
2682 for (i = 0; i < pmkid_len; i++) 3345 for (i = 0; i < pmkid_len; i++)
2683 if (!memcmp 3346 if (!memcmp
2684 (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, 3347 (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
2685 ETH_ALEN)) 3348 ETH_ALEN))
2686 break; 3349 break;
2687 3350
2688 if ((pmkid_len > 0) 3351 if ((pmkid_len > 0)
2689 && (i < pmkid_len)) { 3352 && (i < pmkid_len)) {
2690 memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0, 3353 memset(&cfg->pmk_list->pmkids.pmkid[i], 0,
2691 sizeof(struct pmkid)); 3354 sizeof(struct pmkid));
2692 for (; i < (pmkid_len - 1); i++) { 3355 for (; i < (pmkid_len - 1); i++) {
2693 memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, 3356 memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
2694 &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID, 3357 &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
2695 ETH_ALEN); 3358 ETH_ALEN);
2696 memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID, 3359 memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
2697 &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID, 3360 &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
2698 WLAN_PMKID_LEN); 3361 WLAN_PMKID_LEN);
2699 } 3362 }
2700 cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1); 3363 cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
2701 } else 3364 } else
2702 err = -EINVAL; 3365 err = -EINVAL;
2703 3366
2704 err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); 3367 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
2705 3368
2706 WL_TRACE("Exit\n"); 3369 WL_TRACE("Exit\n");
2707 return err; 3370 return err;
@@ -2711,21 +3374,979 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2711static s32 3374static s32
2712brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) 3375brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
2713{ 3376{
2714 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3377 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2715 s32 err = 0; 3378 s32 err = 0;
2716 3379
2717 WL_TRACE("Enter\n"); 3380 WL_TRACE("Enter\n");
2718 if (!check_sys_up(wiphy)) 3381 if (!check_sys_up(wiphy))
2719 return -EIO; 3382 return -EIO;
2720 3383
2721 memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list)); 3384 memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
2722 err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); 3385 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
2723 3386
2724 WL_TRACE("Exit\n"); 3387 WL_TRACE("Exit\n");
2725 return err; 3388 return err;
2726 3389
2727} 3390}
2728 3391
3392/*
3393 * PFN result doesn't have all the info which are
3394 * required by the supplicant
3395 * (For e.g IEs) Do a target Escan so that sched scan results are reported
3396 * via wl_inform_single_bss in the required format. Escan does require the
3397 * scan request in the form of cfg80211_scan_request. For timebeing, create
3398 * cfg80211_scan_request one out of the received PNO event.
3399 */
3400static s32
3401brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
3402 struct net_device *ndev,
3403 const struct brcmf_event_msg *e, void *data)
3404{
3405 struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
3406 struct cfg80211_scan_request *request = NULL;
3407 struct cfg80211_ssid *ssid = NULL;
3408 struct ieee80211_channel *channel = NULL;
3409 struct wiphy *wiphy = cfg_to_wiphy(cfg);
3410 int err = 0;
3411 int channel_req = 0;
3412 int band = 0;
3413 struct brcmf_pno_scanresults_le *pfn_result;
3414 u32 result_count;
3415 u32 status;
3416
3417 WL_SCAN("Enter\n");
3418
3419 if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
3420 WL_SCAN("PFN NET LOST event. Do Nothing\n");
3421 return 0;
3422 }
3423
3424 pfn_result = (struct brcmf_pno_scanresults_le *)data;
3425 result_count = le32_to_cpu(pfn_result->count);
3426 status = le32_to_cpu(pfn_result->status);
3427
3428 /*
3429 * PFN event is limited to fit 512 bytes so we may get
3430 * multiple NET_FOUND events. For now place a warning here.
3431 */
3432 WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
3433 WL_SCAN("PFN NET FOUND event. count: %d\n", result_count);
3434 if (result_count > 0) {
3435 int i;
3436
3437 request = kzalloc(sizeof(*request), GFP_KERNEL);
3438 ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
3439 channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
3440 if (!request || !ssid || !channel) {
3441 err = -ENOMEM;
3442 goto out_err;
3443 }
3444
3445 request->wiphy = wiphy;
3446 data += sizeof(struct brcmf_pno_scanresults_le);
3447 netinfo_start = (struct brcmf_pno_net_info_le *)data;
3448
3449 for (i = 0; i < result_count; i++) {
3450 netinfo = &netinfo_start[i];
3451 if (!netinfo) {
3452 WL_ERR("Invalid netinfo ptr. index: %d\n", i);
3453 err = -EINVAL;
3454 goto out_err;
3455 }
3456
3457 WL_SCAN("SSID:%s Channel:%d\n",
3458 netinfo->SSID, netinfo->channel);
3459 memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
3460 ssid[i].ssid_len = netinfo->SSID_len;
3461 request->n_ssids++;
3462
3463 channel_req = netinfo->channel;
3464 if (channel_req <= CH_MAX_2G_CHANNEL)
3465 band = NL80211_BAND_2GHZ;
3466 else
3467 band = NL80211_BAND_5GHZ;
3468 channel[i].center_freq =
3469 ieee80211_channel_to_frequency(channel_req,
3470 band);
3471 channel[i].band = band;
3472 channel[i].flags |= IEEE80211_CHAN_NO_HT40;
3473 request->channels[i] = &channel[i];
3474 request->n_channels++;
3475 }
3476
3477 /* assign parsed ssid array */
3478 if (request->n_ssids)
3479 request->ssids = &ssid[0];
3480
3481 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
3482 /* Abort any on-going scan */
3483 brcmf_abort_scanning(cfg);
3484 }
3485
3486 set_bit(WL_STATUS_SCANNING, &cfg->status);
3487 err = brcmf_do_escan(cfg, wiphy, ndev, request);
3488 if (err) {
3489 clear_bit(WL_STATUS_SCANNING, &cfg->status);
3490 goto out_err;
3491 }
3492 cfg->sched_escan = true;
3493 cfg->scan_request = request;
3494 } else {
3495 WL_ERR("FALSE PNO Event. (pfn_count == 0)\n");
3496 goto out_err;
3497 }
3498
3499 kfree(ssid);
3500 kfree(channel);
3501 kfree(request);
3502 return 0;
3503
3504out_err:
3505 kfree(ssid);
3506 kfree(channel);
3507 kfree(request);
3508 cfg80211_sched_scan_stopped(wiphy);
3509 return err;
3510}
3511
3512#ifndef CONFIG_BRCMISCAN
3513static int brcmf_dev_pno_clean(struct net_device *ndev)
3514{
3515 char iovbuf[128];
3516 int ret;
3517
3518 /* Disable pfn */
3519 ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
3520 if (ret == 0) {
3521 /* clear pfn */
3522 ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
3523 iovbuf, sizeof(iovbuf));
3524 }
3525 if (ret < 0)
3526 WL_ERR("failed code %d\n", ret);
3527
3528 return ret;
3529}
3530
3531static int brcmf_dev_pno_config(struct net_device *ndev)
3532{
3533 struct brcmf_pno_param_le pfn_param;
3534 char iovbuf[128];
3535
3536 memset(&pfn_param, 0, sizeof(pfn_param));
3537 pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
3538
3539 /* set extra pno params */
3540 pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
3541 pfn_param.repeat = BRCMF_PNO_REPEAT;
3542 pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
3543
3544 /* set up pno scan fr */
3545 pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
3546
3547 return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
3548 &pfn_param, sizeof(pfn_param),
3549 iovbuf, sizeof(iovbuf));
3550}
3551
3552static int
3553brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3554 struct net_device *ndev,
3555 struct cfg80211_sched_scan_request *request)
3556{
3557 char iovbuf[128];
3558 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
3559 struct brcmf_pno_net_param_le pfn;
3560 int i;
3561 int ret = 0;
3562
3563 WL_SCAN("Enter n_match_sets:%d n_ssids:%d\n",
3564 request->n_match_sets, request->n_ssids);
3565 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
3566 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
3567 return -EAGAIN;
3568 }
3569
3570 if (!request || !request->n_ssids || !request->n_match_sets) {
3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
3572 request->n_ssids);
3573 return -EINVAL;
3574 }
3575
3576 if (request->n_ssids > 0) {
3577 for (i = 0; i < request->n_ssids; i++) {
3578 /* Active scan req for ssids */
3579 WL_SCAN(">>> Active scan req for ssid (%s)\n",
3580 request->ssids[i].ssid);
3581
3582 /*
3583 * match_set ssids is a supert set of n_ssid list,
3584 * so we need not add these set seperately.
3585 */
3586 }
3587 }
3588
3589 if (request->n_match_sets > 0) {
3590 /* clean up everything */
3591 ret = brcmf_dev_pno_clean(ndev);
3592 if (ret < 0) {
3593 WL_ERR("failed error=%d\n", ret);
3594 return ret;
3595 }
3596
3597 /* configure pno */
3598 ret = brcmf_dev_pno_config(ndev);
3599 if (ret < 0) {
3600 WL_ERR("PNO setup failed!! ret=%d\n", ret);
3601 return -EINVAL;
3602 }
3603
3604 /* configure each match set */
3605 for (i = 0; i < request->n_match_sets; i++) {
3606 struct cfg80211_ssid *ssid;
3607 u32 ssid_len;
3608
3609 ssid = &request->match_sets[i].ssid;
3610 ssid_len = ssid->ssid_len;
3611
3612 if (!ssid_len) {
3613 WL_ERR("skip broadcast ssid\n");
3614 continue;
3615 }
3616 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
3617 pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
3618 pfn.wsec = cpu_to_le32(0);
3619 pfn.infra = cpu_to_le32(1);
3620 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
3621 pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
3622 memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
3623 ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
3624 &pfn, sizeof(pfn),
3625 iovbuf, sizeof(iovbuf));
3626 WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
3627 ret == 0 ? "set" : "failed",
3628 ssid->ssid);
3629 }
3630 /* Enable the PNO */
3631 if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
3632 WL_ERR("PNO enable failed!! ret=%d\n", ret);
3633 return -EINVAL;
3634 }
3635 } else {
3636 return -EINVAL;
3637 }
3638
3639 return 0;
3640}
3641
3642static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
3643 struct net_device *ndev)
3644{
3645 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3646
3647 WL_SCAN("enter\n");
3648 brcmf_dev_pno_clean(ndev);
3649 if (cfg->sched_escan)
3650 brcmf_notify_escan_complete(cfg, ndev, true, true);
3651 return 0;
3652}
3653#endif /* CONFIG_BRCMISCAN */
3654
3655#ifdef CONFIG_NL80211_TESTMODE
3656static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
3657{
3658 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3659 struct net_device *ndev = cfg->wdev->netdev;
3660 struct brcmf_dcmd *dcmd = data;
3661 struct sk_buff *reply;
3662 int ret;
3663
3664 ret = brcmf_netlink_dcmd(ndev, dcmd);
3665 if (ret == 0) {
3666 reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
3667 nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
3668 ret = cfg80211_testmode_reply(reply);
3669 }
3670 return ret;
3671}
3672#endif
3673
3674static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
3675{
3676 s32 err;
3677
3678 /* set auth */
3679 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
3680 if (err < 0) {
3681 WL_ERR("auth error %d\n", err);
3682 return err;
3683 }
3684 /* set wsec */
3685 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
3686 if (err < 0) {
3687 WL_ERR("wsec error %d\n", err);
3688 return err;
3689 }
3690 /* set upper-layer auth */
3691 err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
3692 WPA_AUTH_NONE, bssidx);
3693 if (err < 0) {
3694 WL_ERR("wpa_auth error %d\n", err);
3695 return err;
3696 }
3697
3698 return 0;
3699}
3700
3701static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
3702{
3703 if (is_rsn_ie)
3704 return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0);
3705
3706 return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0);
3707}
3708
3709static s32
3710brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3711 bool is_rsn_ie, s32 bssidx)
3712{
3713 u32 auth = 0; /* d11 open authentication */
3714 u16 count;
3715 s32 err = 0;
3716 s32 len = 0;
3717 u32 i;
3718 u32 wsec;
3719 u32 pval = 0;
3720 u32 gval = 0;
3721 u32 wpa_auth = 0;
3722 u32 offset;
3723 u8 *data;
3724 u16 rsn_cap;
3725 u32 wme_bss_disable;
3726
3727 WL_TRACE("Enter\n");
3728 if (wpa_ie == NULL)
3729 goto exit;
3730
3731 len = wpa_ie->len + TLV_HDR_LEN;
3732 data = (u8 *)wpa_ie;
3733 offset = 0;
3734 if (!is_rsn_ie)
3735 offset += VS_IE_FIXED_HDR_LEN;
3736 offset += WPA_IE_VERSION_LEN;
3737
3738 /* check for multicast cipher suite */
3739 if (offset + WPA_IE_MIN_OUI_LEN > len) {
3740 err = -EINVAL;
3741 WL_ERR("no multicast cipher suite\n");
3742 goto exit;
3743 }
3744
3745 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3746 err = -EINVAL;
3747 WL_ERR("ivalid OUI\n");
3748 goto exit;
3749 }
3750 offset += TLV_OUI_LEN;
3751
3752 /* pick up multicast cipher */
3753 switch (data[offset]) {
3754 case WPA_CIPHER_NONE:
3755 gval = 0;
3756 break;
3757 case WPA_CIPHER_WEP_40:
3758 case WPA_CIPHER_WEP_104:
3759 gval = WEP_ENABLED;
3760 break;
3761 case WPA_CIPHER_TKIP:
3762 gval = TKIP_ENABLED;
3763 break;
3764 case WPA_CIPHER_AES_CCM:
3765 gval = AES_ENABLED;
3766 break;
3767 default:
3768 err = -EINVAL;
3769 WL_ERR("Invalid multi cast cipher info\n");
3770 goto exit;
3771 }
3772
3773 offset++;
3774 /* walk thru unicast cipher list and pick up what we recognize */
3775 count = data[offset] + (data[offset + 1] << 8);
3776 offset += WPA_IE_SUITE_COUNT_LEN;
3777 /* Check for unicast suite(s) */
3778 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
3779 err = -EINVAL;
3780 WL_ERR("no unicast cipher suite\n");
3781 goto exit;
3782 }
3783 for (i = 0; i < count; i++) {
3784 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3785 err = -EINVAL;
3786 WL_ERR("ivalid OUI\n");
3787 goto exit;
3788 }
3789 offset += TLV_OUI_LEN;
3790 switch (data[offset]) {
3791 case WPA_CIPHER_NONE:
3792 break;
3793 case WPA_CIPHER_WEP_40:
3794 case WPA_CIPHER_WEP_104:
3795 pval |= WEP_ENABLED;
3796 break;
3797 case WPA_CIPHER_TKIP:
3798 pval |= TKIP_ENABLED;
3799 break;
3800 case WPA_CIPHER_AES_CCM:
3801 pval |= AES_ENABLED;
3802 break;
3803 default:
3804 WL_ERR("Ivalid unicast security info\n");
3805 }
3806 offset++;
3807 }
3808 /* walk thru auth management suite list and pick up what we recognize */
3809 count = data[offset] + (data[offset + 1] << 8);
3810 offset += WPA_IE_SUITE_COUNT_LEN;
3811 /* Check for auth key management suite(s) */
3812 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
3813 err = -EINVAL;
3814 WL_ERR("no auth key mgmt suite\n");
3815 goto exit;
3816 }
3817 for (i = 0; i < count; i++) {
3818 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3819 err = -EINVAL;
3820 WL_ERR("ivalid OUI\n");
3821 goto exit;
3822 }
3823 offset += TLV_OUI_LEN;
3824 switch (data[offset]) {
3825 case RSN_AKM_NONE:
3826 WL_TRACE("RSN_AKM_NONE\n");
3827 wpa_auth |= WPA_AUTH_NONE;
3828 break;
3829 case RSN_AKM_UNSPECIFIED:
3830 WL_TRACE("RSN_AKM_UNSPECIFIED\n");
3831 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
3832 (wpa_auth |= WPA_AUTH_UNSPECIFIED);
3833 break;
3834 case RSN_AKM_PSK:
3835 WL_TRACE("RSN_AKM_PSK\n");
3836 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
3837 (wpa_auth |= WPA_AUTH_PSK);
3838 break;
3839 default:
3840 WL_ERR("Ivalid key mgmt info\n");
3841 }
3842 offset++;
3843 }
3844
3845 if (is_rsn_ie) {
3846 wme_bss_disable = 1;
3847 if ((offset + RSN_CAP_LEN) <= len) {
3848 rsn_cap = data[offset] + (data[offset + 1] << 8);
3849 if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
3850 wme_bss_disable = 0;
3851 }
3852 /* set wme_bss_disable to sync RSN Capabilities */
3853 err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
3854 wme_bss_disable, bssidx);
3855 if (err < 0) {
3856 WL_ERR("wme_bss_disable error %d\n", err);
3857 goto exit;
3858 }
3859 }
3860 /* FOR WPS , set SES_OW_ENABLED */
3861 wsec = (pval | gval | SES_OW_ENABLED);
3862
3863 /* set auth */
3864 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
3865 if (err < 0) {
3866 WL_ERR("auth error %d\n", err);
3867 goto exit;
3868 }
3869 /* set wsec */
3870 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
3871 if (err < 0) {
3872 WL_ERR("wsec error %d\n", err);
3873 goto exit;
3874 }
3875 /* set upper-layer auth */
3876 err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
3877 if (err < 0) {
3878 WL_ERR("wpa_auth error %d\n", err);
3879 goto exit;
3880 }
3881
3882exit:
3883 return err;
3884}
3885
3886static s32
3887brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
3888 struct parsed_vndr_ies *vndr_ies)
3889{
3890 s32 err = 0;
3891 struct brcmf_vs_tlv *vndrie;
3892 struct brcmf_tlv *ie;
3893 struct parsed_vndr_ie_info *parsed_info;
3894 s32 remaining_len;
3895
3896 remaining_len = (s32)vndr_ie_len;
3897 memset(vndr_ies, 0, sizeof(*vndr_ies));
3898
3899 ie = (struct brcmf_tlv *)vndr_ie_buf;
3900 while (ie) {
3901 if (ie->id != WLAN_EID_VENDOR_SPECIFIC)
3902 goto next;
3903 vndrie = (struct brcmf_vs_tlv *)ie;
3904 /* len should be bigger than OUI length + one */
3905 if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
3906 WL_ERR("invalid vndr ie. length is too small %d\n",
3907 vndrie->len);
3908 goto next;
3909 }
3910 /* if wpa or wme ie, do not add ie */
3911 if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
3912 ((vndrie->oui_type == WPA_OUI_TYPE) ||
3913 (vndrie->oui_type == WME_OUI_TYPE))) {
3914 WL_TRACE("Found WPA/WME oui. Do not add it\n");
3915 goto next;
3916 }
3917
3918 parsed_info = &vndr_ies->ie_info[vndr_ies->count];
3919
3920 /* save vndr ie information */
3921 parsed_info->ie_ptr = (char *)vndrie;
3922 parsed_info->ie_len = vndrie->len + TLV_HDR_LEN;
3923 memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie));
3924
3925 vndr_ies->count++;
3926
3927 WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n",
3928 parsed_info->vndrie.oui[0],
3929 parsed_info->vndrie.oui[1],
3930 parsed_info->vndrie.oui[2],
3931 parsed_info->vndrie.oui_type);
3932
3933 if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
3934 break;
3935next:
3936 remaining_len -= ie->len;
3937 if (remaining_len <= 2)
3938 ie = NULL;
3939 else
3940 ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len);
3941 }
3942 return err;
3943}
3944
3945static u32
3946brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
3947{
3948
3949 __le32 iecount_le;
3950 __le32 pktflag_le;
3951
3952 strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
3953 iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
3954
3955 iecount_le = cpu_to_le32(1);
3956 memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
3957
3958 pktflag_le = cpu_to_le32(pktflag);
3959 memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
3960
3961 memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
3962
3963 return ie_len + VNDR_IE_HDR_SIZE;
3964}
3965
3966s32
3967brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3968 struct net_device *ndev, s32 bssidx, s32 pktflag,
3969 u8 *vndr_ie_buf, u32 vndr_ie_len)
3970{
3971 s32 err = 0;
3972 u8 *iovar_ie_buf;
3973 u8 *curr_ie_buf;
3974 u8 *mgmt_ie_buf = NULL;
3975 u32 mgmt_ie_buf_len = 0;
3976 u32 *mgmt_ie_len = 0;
3977 u32 del_add_ie_buf_len = 0;
3978 u32 total_ie_buf_len = 0;
3979 u32 parsed_ie_buf_len = 0;
3980 struct parsed_vndr_ies old_vndr_ies;
3981 struct parsed_vndr_ies new_vndr_ies;
3982 struct parsed_vndr_ie_info *vndrie_info;
3983 s32 i;
3984 u8 *ptr;
3985 u32 remained_buf_len;
3986
3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
3989 if (!iovar_ie_buf)
3990 return -ENOMEM;
3991 curr_ie_buf = iovar_ie_buf;
3992 if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
3993 test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
3994 switch (pktflag) {
3995 case VNDR_IE_PRBRSP_FLAG:
3996 mgmt_ie_buf = cfg->ap_info->probe_res_ie;
3997 mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
3998 mgmt_ie_buf_len =
3999 sizeof(cfg->ap_info->probe_res_ie);
4000 break;
4001 case VNDR_IE_BEACON_FLAG:
4002 mgmt_ie_buf = cfg->ap_info->beacon_ie;
4003 mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
4004 mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
4005 break;
4006 default:
4007 err = -EPERM;
4008 WL_ERR("not suitable type\n");
4009 goto exit;
4010 }
4011 bssidx = 0;
4012 } else {
4013 err = -EPERM;
4014 WL_ERR("not suitable type\n");
4015 goto exit;
4016 }
4017
4018 if (vndr_ie_len > mgmt_ie_buf_len) {
4019 err = -ENOMEM;
4020 WL_ERR("extra IE size too big\n");
4021 goto exit;
4022 }
4023
4024 /* parse and save new vndr_ie in curr_ie_buff before comparing it */
4025 if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) {
4026 ptr = curr_ie_buf;
4027 brcmf_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies);
4028 for (i = 0; i < new_vndr_ies.count; i++) {
4029 vndrie_info = &new_vndr_ies.ie_info[i];
4030 memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
4031 vndrie_info->ie_len);
4032 parsed_ie_buf_len += vndrie_info->ie_len;
4033 }
4034 }
4035
4036 if (mgmt_ie_buf != NULL) {
4037 if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
4038 (memcmp(mgmt_ie_buf, curr_ie_buf,
4039 parsed_ie_buf_len) == 0)) {
4040 WL_TRACE("Previous mgmt IE is equals to current IE");
4041 goto exit;
4042 }
4043
4044 /* parse old vndr_ie */
4045 brcmf_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies);
4046
4047 /* make a command to delete old ie */
4048 for (i = 0; i < old_vndr_ies.count; i++) {
4049 vndrie_info = &old_vndr_ies.ie_info[i];
4050
4051 WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
4052 vndrie_info->vndrie.id,
4053 vndrie_info->vndrie.len,
4054 vndrie_info->vndrie.oui[0],
4055 vndrie_info->vndrie.oui[1],
4056 vndrie_info->vndrie.oui[2]);
4057
4058 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
4059 vndrie_info->ie_ptr,
4060 vndrie_info->ie_len,
4061 "del");
4062 curr_ie_buf += del_add_ie_buf_len;
4063 total_ie_buf_len += del_add_ie_buf_len;
4064 }
4065 }
4066
4067 *mgmt_ie_len = 0;
4068 /* Add if there is any extra IE */
4069 if (mgmt_ie_buf && parsed_ie_buf_len) {
4070 ptr = mgmt_ie_buf;
4071
4072 remained_buf_len = mgmt_ie_buf_len;
4073
4074 /* make a command to add new ie */
4075 for (i = 0; i < new_vndr_ies.count; i++) {
4076 vndrie_info = &new_vndr_ies.ie_info[i];
4077
4078 WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
4079 vndrie_info->vndrie.id,
4080 vndrie_info->vndrie.len,
4081 vndrie_info->vndrie.oui[0],
4082 vndrie_info->vndrie.oui[1],
4083 vndrie_info->vndrie.oui[2]);
4084
4085 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
4086 vndrie_info->ie_ptr,
4087 vndrie_info->ie_len,
4088 "add");
4089 /* verify remained buf size before copy data */
4090 remained_buf_len -= vndrie_info->ie_len;
4091 if (remained_buf_len < 0) {
4092 WL_ERR("no space in mgmt_ie_buf: len left %d",
4093 remained_buf_len);
4094 break;
4095 }
4096
4097 /* save the parsed IE in wl struct */
4098 memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
4099 vndrie_info->ie_len);
4100 *mgmt_ie_len += vndrie_info->ie_len;
4101
4102 curr_ie_buf += del_add_ie_buf_len;
4103 total_ie_buf_len += del_add_ie_buf_len;
4104 }
4105 }
4106 if (total_ie_buf_len) {
4107 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
4108 iovar_ie_buf,
4109 total_ie_buf_len,
4110 cfg->extra_buf,
4111 WL_EXTRA_BUF_MAX, bssidx);
4112 if (err)
4113 WL_ERR("vndr ie set error : %d\n", err);
4114 }
4115
4116exit:
4117 kfree(iovar_ie_buf);
4118 return err;
4119}
4120
4121static s32
4122brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4123 struct cfg80211_ap_settings *settings)
4124{
4125 s32 ie_offset;
4126 struct brcmf_tlv *ssid_ie;
4127 struct brcmf_ssid_le ssid_le;
4128 s32 ioctl_value;
4129 s32 err = -EPERM;
4130 struct brcmf_tlv *rsn_ie;
4131 struct brcmf_vs_tlv *wpa_ie;
4132 struct brcmf_join_params join_params;
4133 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4134 s32 bssidx = 0;
4135
4136 WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
4137 settings->channel_type, settings->beacon_interval,
4138 settings->dtim_period);
4139 WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
4140 settings->ssid, settings->ssid_len, settings->auth_type,
4141 settings->inactivity_timeout);
4142
4143 if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
4144 WL_ERR("Not in AP creation mode\n");
4145 return -EPERM;
4146 }
4147
4148 memset(&ssid_le, 0, sizeof(ssid_le));
4149 if (settings->ssid == NULL || settings->ssid_len == 0) {
4150 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
4151 ssid_ie = brcmf_parse_tlvs(
4152 (u8 *)&settings->beacon.head[ie_offset],
4153 settings->beacon.head_len - ie_offset,
4154 WLAN_EID_SSID);
4155 if (!ssid_ie)
4156 return -EINVAL;
4157
4158 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
4159 ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
4160 WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID);
4161 } else {
4162 memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
4163 ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
4164 }
4165
4166 brcmf_set_mpc(ndev, 0);
4167 ioctl_value = 1;
4168 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
4169 if (err < 0) {
4170 WL_ERR("BRCMF_C_DOWN error %d\n", err);
4171 goto exit;
4172 }
4173 ioctl_value = 1;
4174 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
4175 if (err < 0) {
4176 WL_ERR("SET INFRA error %d\n", err);
4177 goto exit;
4178 }
4179 ioctl_value = 1;
4180 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
4181 if (err < 0) {
4182 WL_ERR("setting AP mode failed %d\n", err);
4183 goto exit;
4184 }
4185
4186 /* find the RSN_IE */
4187 rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
4188 settings->beacon.tail_len, WLAN_EID_RSN);
4189
4190 /* find the WPA_IE */
4191 wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
4192 settings->beacon.tail_len);
4193
4194 kfree(cfg->ap_info->rsn_ie);
4195 cfg->ap_info->rsn_ie = NULL;
4196 kfree(cfg->ap_info->wpa_ie);
4197 cfg->ap_info->wpa_ie = NULL;
4198
4199 if ((wpa_ie != NULL || rsn_ie != NULL)) {
4200 WL_TRACE("WPA(2) IE is found\n");
4201 if (wpa_ie != NULL) {
4202 /* WPA IE */
4203 err = brcmf_configure_wpaie(ndev, wpa_ie, false,
4204 bssidx);
4205 if (err < 0)
4206 goto exit;
4207 cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
4208 wpa_ie->len +
4209 TLV_HDR_LEN,
4210 GFP_KERNEL);
4211 } else {
4212 /* RSN IE */
4213 err = brcmf_configure_wpaie(ndev,
4214 (struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
4215 if (err < 0)
4216 goto exit;
4217 cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
4218 rsn_ie->len +
4219 TLV_HDR_LEN,
4220 GFP_KERNEL);
4221 }
4222 cfg->ap_info->security_mode = true;
4223 } else {
4224 WL_TRACE("No WPA(2) IEs found\n");
4225 brcmf_configure_opensecurity(ndev, bssidx);
4226 cfg->ap_info->security_mode = false;
4227 }
4228 /* Set Beacon IEs to FW */
4229 err = brcmf_set_management_ie(cfg, ndev, bssidx,
4230 VNDR_IE_BEACON_FLAG,
4231 (u8 *)settings->beacon.tail,
4232 settings->beacon.tail_len);
4233 if (err)
4234 WL_ERR("Set Beacon IE Failed\n");
4235 else
4236 WL_TRACE("Applied Vndr IEs for Beacon\n");
4237
4238 /* Set Probe Response IEs to FW */
4239 err = brcmf_set_management_ie(cfg, ndev, bssidx,
4240 VNDR_IE_PRBRSP_FLAG,
4241 (u8 *)settings->beacon.proberesp_ies,
4242 settings->beacon.proberesp_ies_len);
4243 if (err)
4244 WL_ERR("Set Probe Resp IE Failed\n");
4245 else
4246 WL_TRACE("Applied Vndr IEs for Probe Resp\n");
4247
4248 if (settings->beacon_interval) {
4249 ioctl_value = settings->beacon_interval;
4250 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
4251 &ioctl_value);
4252 if (err < 0) {
4253 WL_ERR("Beacon Interval Set Error, %d\n", err);
4254 goto exit;
4255 }
4256 }
4257 if (settings->dtim_period) {
4258 ioctl_value = settings->dtim_period;
4259 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
4260 &ioctl_value);
4261 if (err < 0) {
4262 WL_ERR("DTIM Interval Set Error, %d\n", err);
4263 goto exit;
4264 }
4265 }
4266 ioctl_value = 1;
4267 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
4268 if (err < 0) {
4269 WL_ERR("BRCMF_C_UP error (%d)\n", err);
4270 goto exit;
4271 }
4272
4273 memset(&join_params, 0, sizeof(join_params));
4274 /* join parameters starts with ssid */
4275 memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
4276 /* create softap */
4277 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
4278 sizeof(join_params));
4279 if (err < 0) {
4280 WL_ERR("SET SSID error (%d)\n", err);
4281 goto exit;
4282 }
4283 clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
4284 set_bit(WL_STATUS_AP_CREATED, &cfg->status);
4285
4286exit:
4287 if (err)
4288 brcmf_set_mpc(ndev, 1);
4289 return err;
4290}
4291
4292static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
4293{
4294 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4295 s32 ioctl_value;
4296 s32 err = -EPERM;
4297
4298 WL_TRACE("Enter\n");
4299
4300 if (cfg->conf->mode == WL_MODE_AP) {
4301 /* Due to most likely deauths outstanding we sleep */
4302 /* first to make sure they get processed by fw. */
4303 msleep(400);
4304 ioctl_value = 0;
4305 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
4306 if (err < 0) {
4307 WL_ERR("setting AP mode failed %d\n", err);
4308 goto exit;
4309 }
4310 ioctl_value = 0;
4311 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
4312 if (err < 0) {
4313 WL_ERR("BRCMF_C_UP error %d\n", err);
4314 goto exit;
4315 }
4316 brcmf_set_mpc(ndev, 1);
4317 clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
4318 clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
4319 }
4320exit:
4321 return err;
4322}
4323
4324static int
4325brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
4326 u8 *mac)
4327{
4328 struct brcmf_scb_val_le scbval;
4329 s32 err;
4330
4331 if (!mac)
4332 return -EFAULT;
4333
4334 WL_TRACE("Enter %pM\n", mac);
4335
4336 if (!check_sys_up(wiphy))
4337 return -EIO;
4338
4339 memcpy(&scbval.ea, mac, ETH_ALEN);
4340 scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
4341 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
4342 &scbval, sizeof(scbval));
4343 if (err)
4344 WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
4345
4346 WL_TRACE("Exit\n");
4347 return err;
4348}
4349
2729static struct cfg80211_ops wl_cfg80211_ops = { 4350static struct cfg80211_ops wl_cfg80211_ops = {
2730 .change_virtual_intf = brcmf_cfg80211_change_iface, 4351 .change_virtual_intf = brcmf_cfg80211_change_iface,
2731 .scan = brcmf_cfg80211_scan, 4352 .scan = brcmf_cfg80211_scan,
@@ -2748,7 +4369,18 @@ static struct cfg80211_ops wl_cfg80211_ops = {
2748 .resume = brcmf_cfg80211_resume, 4369 .resume = brcmf_cfg80211_resume,
2749 .set_pmksa = brcmf_cfg80211_set_pmksa, 4370 .set_pmksa = brcmf_cfg80211_set_pmksa,
2750 .del_pmksa = brcmf_cfg80211_del_pmksa, 4371 .del_pmksa = brcmf_cfg80211_del_pmksa,
2751 .flush_pmksa = brcmf_cfg80211_flush_pmksa 4372 .flush_pmksa = brcmf_cfg80211_flush_pmksa,
4373 .start_ap = brcmf_cfg80211_start_ap,
4374 .stop_ap = brcmf_cfg80211_stop_ap,
4375 .del_station = brcmf_cfg80211_del_station,
4376#ifndef CONFIG_BRCMISCAN
4377 /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
4378 .sched_scan_start = brcmf_cfg80211_sched_scan_start,
4379 .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
4380#endif
4381#ifdef CONFIG_NL80211_TESTMODE
4382 .testmode_cmd = brcmf_cfg80211_testmode
4383#endif
2752}; 4384};
2753 4385
2754static s32 brcmf_mode_to_nl80211_iftype(s32 mode) 4386static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
@@ -2767,8 +4399,18 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
2767 return err; 4399 return err;
2768} 4400}
2769 4401
2770static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface, 4402static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
2771 struct device *ndev) 4403{
4404#ifndef CONFIG_BRCMFISCAN
4405 /* scheduled scan settings */
4406 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
4407 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
4408 wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
4409 wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
4410#endif
4411}
4412
4413static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
2772{ 4414{
2773 struct wireless_dev *wdev; 4415 struct wireless_dev *wdev;
2774 s32 err = 0; 4416 s32 err = 0;
@@ -2777,9 +4419,8 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
2777 if (!wdev) 4419 if (!wdev)
2778 return ERR_PTR(-ENOMEM); 4420 return ERR_PTR(-ENOMEM);
2779 4421
2780 wdev->wiphy = 4422 wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
2781 wiphy_new(&wl_cfg80211_ops, 4423 sizeof(struct brcmf_cfg80211_info));
2782 sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
2783 if (!wdev->wiphy) { 4424 if (!wdev->wiphy) {
2784 WL_ERR("Could not allocate wiphy device\n"); 4425 WL_ERR("Could not allocate wiphy device\n");
2785 err = -ENOMEM; 4426 err = -ENOMEM;
@@ -2788,8 +4429,9 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
2788 set_wiphy_dev(wdev->wiphy, ndev); 4429 set_wiphy_dev(wdev->wiphy, ndev);
2789 wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; 4430 wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
2790 wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; 4431 wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
2791 wdev->wiphy->interface_modes = 4432 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2792 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 4433 BIT(NL80211_IFTYPE_ADHOC) |
4434 BIT(NL80211_IFTYPE_AP);
2793 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; 4435 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
2794 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set 4436 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
2795 * it as 11a by default. 4437 * it as 11a by default.
@@ -2805,6 +4447,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
2805 * save mode 4447 * save mode
2806 * by default 4448 * by default
2807 */ 4449 */
4450 brcmf_wiphy_pno_params(wdev->wiphy);
2808 err = wiphy_register(wdev->wiphy); 4451 err = wiphy_register(wdev->wiphy);
2809 if (err < 0) { 4452 if (err < 0) {
2810 WL_ERR("Could not register wiphy device (%d)\n", err); 4453 WL_ERR("Could not register wiphy device (%d)\n", err);
@@ -2821,9 +4464,9 @@ wiphy_new_out:
2821 return ERR_PTR(err); 4464 return ERR_PTR(err);
2822} 4465}
2823 4466
2824static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv) 4467static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
2825{ 4468{
2826 struct wireless_dev *wdev = cfg_priv->wdev; 4469 struct wireless_dev *wdev = cfg->wdev;
2827 4470
2828 if (!wdev) { 4471 if (!wdev) {
2829 WL_ERR("wdev is invalid\n"); 4472 WL_ERR("wdev is invalid\n");
@@ -2832,10 +4475,10 @@ static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
2832 wiphy_unregister(wdev->wiphy); 4475 wiphy_unregister(wdev->wiphy);
2833 wiphy_free(wdev->wiphy); 4476 wiphy_free(wdev->wiphy);
2834 kfree(wdev); 4477 kfree(wdev);
2835 cfg_priv->wdev = NULL; 4478 cfg->wdev = NULL;
2836} 4479}
2837 4480
2838static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv, 4481static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
2839 const struct brcmf_event_msg *e) 4482 const struct brcmf_event_msg *e)
2840{ 4483{
2841 u32 event = be32_to_cpu(e->event_type); 4484 u32 event = be32_to_cpu(e->event_type);
@@ -2843,14 +4486,14 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
2843 4486
2844 if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) { 4487 if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
2845 WL_CONN("Processing set ssid\n"); 4488 WL_CONN("Processing set ssid\n");
2846 cfg_priv->link_up = true; 4489 cfg->link_up = true;
2847 return true; 4490 return true;
2848 } 4491 }
2849 4492
2850 return false; 4493 return false;
2851} 4494}
2852 4495
2853static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv, 4496static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
2854 const struct brcmf_event_msg *e) 4497 const struct brcmf_event_msg *e)
2855{ 4498{
2856 u32 event = be32_to_cpu(e->event_type); 4499 u32 event = be32_to_cpu(e->event_type);
@@ -2863,7 +4506,7 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
2863 return false; 4506 return false;
2864} 4507}
2865 4508
2866static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv, 4509static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
2867 const struct brcmf_event_msg *e) 4510 const struct brcmf_event_msg *e)
2868{ 4511{
2869 u32 event = be32_to_cpu(e->event_type); 4512 u32 event = be32_to_cpu(e->event_type);
@@ -2884,9 +4527,9 @@ static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
2884 return false; 4527 return false;
2885} 4528}
2886 4529
2887static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) 4530static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
2888{ 4531{
2889 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4532 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
2890 4533
2891 kfree(conn_info->req_ie); 4534 kfree(conn_info->req_ie);
2892 conn_info->req_ie = NULL; 4535 conn_info->req_ie = NULL;
@@ -2896,30 +4539,30 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2896 conn_info->resp_ie_len = 0; 4539 conn_info->resp_ie_len = 0;
2897} 4540}
2898 4541
2899static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) 4542static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
2900{ 4543{
2901 struct net_device *ndev = cfg_to_ndev(cfg_priv); 4544 struct net_device *ndev = cfg_to_ndev(cfg);
2902 struct brcmf_cfg80211_assoc_ielen_le *assoc_info; 4545 struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
2903 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4546 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
2904 u32 req_len; 4547 u32 req_len;
2905 u32 resp_len; 4548 u32 resp_len;
2906 s32 err = 0; 4549 s32 err = 0;
2907 4550
2908 brcmf_clear_assoc_ies(cfg_priv); 4551 brcmf_clear_assoc_ies(cfg);
2909 4552
2910 err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf, 4553 err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
2911 WL_ASSOC_INFO_MAX); 4554 WL_ASSOC_INFO_MAX);
2912 if (err) { 4555 if (err) {
2913 WL_ERR("could not get assoc info (%d)\n", err); 4556 WL_ERR("could not get assoc info (%d)\n", err);
2914 return err; 4557 return err;
2915 } 4558 }
2916 assoc_info = 4559 assoc_info =
2917 (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf; 4560 (struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
2918 req_len = le32_to_cpu(assoc_info->req_len); 4561 req_len = le32_to_cpu(assoc_info->req_len);
2919 resp_len = le32_to_cpu(assoc_info->resp_len); 4562 resp_len = le32_to_cpu(assoc_info->resp_len);
2920 if (req_len) { 4563 if (req_len) {
2921 err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies", 4564 err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
2922 cfg_priv->extra_buf, 4565 cfg->extra_buf,
2923 WL_ASSOC_INFO_MAX); 4566 WL_ASSOC_INFO_MAX);
2924 if (err) { 4567 if (err) {
2925 WL_ERR("could not get assoc req (%d)\n", err); 4568 WL_ERR("could not get assoc req (%d)\n", err);
@@ -2927,7 +4570,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2927 } 4570 }
2928 conn_info->req_ie_len = req_len; 4571 conn_info->req_ie_len = req_len;
2929 conn_info->req_ie = 4572 conn_info->req_ie =
2930 kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len, 4573 kmemdup(cfg->extra_buf, conn_info->req_ie_len,
2931 GFP_KERNEL); 4574 GFP_KERNEL);
2932 } else { 4575 } else {
2933 conn_info->req_ie_len = 0; 4576 conn_info->req_ie_len = 0;
@@ -2935,7 +4578,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2935 } 4578 }
2936 if (resp_len) { 4579 if (resp_len) {
2937 err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies", 4580 err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
2938 cfg_priv->extra_buf, 4581 cfg->extra_buf,
2939 WL_ASSOC_INFO_MAX); 4582 WL_ASSOC_INFO_MAX);
2940 if (err) { 4583 if (err) {
2941 WL_ERR("could not get assoc resp (%d)\n", err); 4584 WL_ERR("could not get assoc resp (%d)\n", err);
@@ -2943,7 +4586,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2943 } 4586 }
2944 conn_info->resp_ie_len = resp_len; 4587 conn_info->resp_ie_len = resp_len;
2945 conn_info->resp_ie = 4588 conn_info->resp_ie =
2946 kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len, 4589 kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
2947 GFP_KERNEL); 4590 GFP_KERNEL);
2948 } else { 4591 } else {
2949 conn_info->resp_ie_len = 0; 4592 conn_info->resp_ie_len = 0;
@@ -2956,12 +4599,13 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2956} 4599}
2957 4600
2958static s32 4601static s32
2959brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv, 4602brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
2960 struct net_device *ndev, 4603 struct net_device *ndev,
2961 const struct brcmf_event_msg *e) 4604 const struct brcmf_event_msg *e)
2962{ 4605{
2963 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4606 struct brcmf_cfg80211_profile *profile = cfg->profile;
2964 struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); 4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4608 struct wiphy *wiphy = cfg_to_wiphy(cfg);
2965 struct brcmf_channel_info_le channel_le; 4609 struct brcmf_channel_info_le channel_le;
2966 struct ieee80211_channel *notify_channel; 4610 struct ieee80211_channel *notify_channel;
2967 struct ieee80211_supported_band *band; 4611 struct ieee80211_supported_band *band;
@@ -2971,9 +4615,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
2971 4615
2972 WL_TRACE("Enter\n"); 4616 WL_TRACE("Enter\n");
2973 4617
2974 brcmf_get_assoc_ies(cfg_priv); 4618 brcmf_get_assoc_ies(cfg);
2975 brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID); 4619 memcpy(profile->bssid, e->addr, ETH_ALEN);
2976 brcmf_update_bss_info(cfg_priv); 4620 brcmf_update_bss_info(cfg);
2977 4621
2978 brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le, 4622 brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
2979 sizeof(channel_le)); 4623 sizeof(channel_le));
@@ -2989,37 +4633,35 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
2989 freq = ieee80211_channel_to_frequency(target_channel, band->band); 4633 freq = ieee80211_channel_to_frequency(target_channel, band->band);
2990 notify_channel = ieee80211_get_channel(wiphy, freq); 4634 notify_channel = ieee80211_get_channel(wiphy, freq);
2991 4635
2992 cfg80211_roamed(ndev, notify_channel, 4636 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
2993 (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
2994 conn_info->req_ie, conn_info->req_ie_len, 4637 conn_info->req_ie, conn_info->req_ie_len,
2995 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); 4638 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
2996 WL_CONN("Report roaming result\n"); 4639 WL_CONN("Report roaming result\n");
2997 4640
2998 set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 4641 set_bit(WL_STATUS_CONNECTED, &cfg->status);
2999 WL_TRACE("Exit\n"); 4642 WL_TRACE("Exit\n");
3000 return err; 4643 return err;
3001} 4644}
3002 4645
3003static s32 4646static s32
3004brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv, 4647brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
3005 struct net_device *ndev, const struct brcmf_event_msg *e, 4648 struct net_device *ndev, const struct brcmf_event_msg *e,
3006 bool completed) 4649 bool completed)
3007{ 4650{
3008 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4651 struct brcmf_cfg80211_profile *profile = cfg->profile;
4652 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
3009 s32 err = 0; 4653 s32 err = 0;
3010 4654
3011 WL_TRACE("Enter\n"); 4655 WL_TRACE("Enter\n");
3012 4656
3013 if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { 4657 if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
3014 if (completed) { 4658 if (completed) {
3015 brcmf_get_assoc_ies(cfg_priv); 4659 brcmf_get_assoc_ies(cfg);
3016 brcmf_update_prof(cfg_priv, NULL, &e->addr, 4660 memcpy(profile->bssid, e->addr, ETH_ALEN);
3017 WL_PROF_BSSID); 4661 brcmf_update_bss_info(cfg);
3018 brcmf_update_bss_info(cfg_priv);
3019 } 4662 }
3020 cfg80211_connect_result(ndev, 4663 cfg80211_connect_result(ndev,
3021 (u8 *)brcmf_read_prof(cfg_priv, 4664 (u8 *)profile->bssid,
3022 WL_PROF_BSSID),
3023 conn_info->req_ie, 4665 conn_info->req_ie,
3024 conn_info->req_ie_len, 4666 conn_info->req_ie_len,
3025 conn_info->resp_ie, 4667 conn_info->resp_ie,
@@ -3028,7 +4670,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
3028 WLAN_STATUS_AUTH_TIMEOUT, 4670 WLAN_STATUS_AUTH_TIMEOUT,
3029 GFP_KERNEL); 4671 GFP_KERNEL);
3030 if (completed) 4672 if (completed)
3031 set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 4673 set_bit(WL_STATUS_CONNECTED, &cfg->status);
3032 WL_CONN("Report connect result - connection %s\n", 4674 WL_CONN("Report connect result - connection %s\n",
3033 completed ? "succeeded" : "failed"); 4675 completed ? "succeeded" : "failed");
3034 } 4676 }
@@ -3037,52 +4679,93 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
3037} 4679}
3038 4680
3039static s32 4681static s32
3040brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv, 4682brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
4683 struct net_device *ndev,
4684 const struct brcmf_event_msg *e, void *data)
4685{
4686 s32 err = 0;
4687 u32 event = be32_to_cpu(e->event_type);
4688 u32 reason = be32_to_cpu(e->reason);
4689 u32 len = be32_to_cpu(e->datalen);
4690 static int generation;
4691
4692 struct station_info sinfo;
4693
4694 WL_CONN("event %d, reason %d\n", event, reason);
4695 memset(&sinfo, 0, sizeof(sinfo));
4696
4697 sinfo.filled = 0;
4698 if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
4699 reason == BRCMF_E_STATUS_SUCCESS) {
4700 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
4701 if (!data) {
4702 WL_ERR("No IEs present in ASSOC/REASSOC_IND");
4703 return -EINVAL;
4704 }
4705 sinfo.assoc_req_ies = data;
4706 sinfo.assoc_req_ies_len = len;
4707 generation++;
4708 sinfo.generation = generation;
4709 cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
4710 } else if ((event == BRCMF_E_DISASSOC_IND) ||
4711 (event == BRCMF_E_DEAUTH_IND) ||
4712 (event == BRCMF_E_DEAUTH)) {
4713 generation++;
4714 sinfo.generation = generation;
4715 cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
4716 }
4717 return err;
4718}
4719
4720static s32
4721brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
3041 struct net_device *ndev, 4722 struct net_device *ndev,
3042 const struct brcmf_event_msg *e, void *data) 4723 const struct brcmf_event_msg *e, void *data)
3043{ 4724{
4725 struct brcmf_cfg80211_profile *profile = cfg->profile;
3044 s32 err = 0; 4726 s32 err = 0;
3045 4727
3046 if (brcmf_is_linkup(cfg_priv, e)) { 4728 if (cfg->conf->mode == WL_MODE_AP) {
4729 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
4730 } else if (brcmf_is_linkup(cfg, e)) {
3047 WL_CONN("Linkup\n"); 4731 WL_CONN("Linkup\n");
3048 if (brcmf_is_ibssmode(cfg_priv)) { 4732 if (brcmf_is_ibssmode(cfg)) {
3049 brcmf_update_prof(cfg_priv, NULL, (void *)e->addr, 4733 memcpy(profile->bssid, e->addr, ETH_ALEN);
3050 WL_PROF_BSSID); 4734 wl_inform_ibss(cfg, ndev, e->addr);
3051 wl_inform_ibss(cfg_priv, ndev, e->addr);
3052 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL); 4735 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
3053 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 4736 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
3054 set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 4737 set_bit(WL_STATUS_CONNECTED, &cfg->status);
3055 } else 4738 } else
3056 brcmf_bss_connect_done(cfg_priv, ndev, e, true); 4739 brcmf_bss_connect_done(cfg, ndev, e, true);
3057 } else if (brcmf_is_linkdown(cfg_priv, e)) { 4740 } else if (brcmf_is_linkdown(cfg, e)) {
3058 WL_CONN("Linkdown\n"); 4741 WL_CONN("Linkdown\n");
3059 if (brcmf_is_ibssmode(cfg_priv)) { 4742 if (brcmf_is_ibssmode(cfg)) {
3060 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 4743 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
3061 if (test_and_clear_bit(WL_STATUS_CONNECTED, 4744 if (test_and_clear_bit(WL_STATUS_CONNECTED,
3062 &cfg_priv->status)) 4745 &cfg->status))
3063 brcmf_link_down(cfg_priv); 4746 brcmf_link_down(cfg);
3064 } else { 4747 } else {
3065 brcmf_bss_connect_done(cfg_priv, ndev, e, false); 4748 brcmf_bss_connect_done(cfg, ndev, e, false);
3066 if (test_and_clear_bit(WL_STATUS_CONNECTED, 4749 if (test_and_clear_bit(WL_STATUS_CONNECTED,
3067 &cfg_priv->status)) { 4750 &cfg->status)) {
3068 cfg80211_disconnected(ndev, 0, NULL, 0, 4751 cfg80211_disconnected(ndev, 0, NULL, 0,
3069 GFP_KERNEL); 4752 GFP_KERNEL);
3070 brcmf_link_down(cfg_priv); 4753 brcmf_link_down(cfg);
3071 } 4754 }
3072 } 4755 }
3073 brcmf_init_prof(cfg_priv->profile); 4756 brcmf_init_prof(cfg->profile);
3074 } else if (brcmf_is_nonetwork(cfg_priv, e)) { 4757 } else if (brcmf_is_nonetwork(cfg, e)) {
3075 if (brcmf_is_ibssmode(cfg_priv)) 4758 if (brcmf_is_ibssmode(cfg))
3076 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 4759 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
3077 else 4760 else
3078 brcmf_bss_connect_done(cfg_priv, ndev, e, false); 4761 brcmf_bss_connect_done(cfg, ndev, e, false);
3079 } 4762 }
3080 4763
3081 return err; 4764 return err;
3082} 4765}
3083 4766
3084static s32 4767static s32
3085brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv, 4768brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
3086 struct net_device *ndev, 4769 struct net_device *ndev,
3087 const struct brcmf_event_msg *e, void *data) 4770 const struct brcmf_event_msg *e, void *data)
3088{ 4771{
@@ -3091,17 +4774,17 @@ brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
3091 u32 status = be32_to_cpu(e->status); 4774 u32 status = be32_to_cpu(e->status);
3092 4775
3093 if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { 4776 if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
3094 if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) 4777 if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
3095 brcmf_bss_roaming_done(cfg_priv, ndev, e); 4778 brcmf_bss_roaming_done(cfg, ndev, e);
3096 else 4779 else
3097 brcmf_bss_connect_done(cfg_priv, ndev, e, true); 4780 brcmf_bss_connect_done(cfg, ndev, e, true);
3098 } 4781 }
3099 4782
3100 return err; 4783 return err;
3101} 4784}
3102 4785
3103static s32 4786static s32
3104brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv, 4787brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
3105 struct net_device *ndev, 4788 struct net_device *ndev,
3106 const struct brcmf_event_msg *e, void *data) 4789 const struct brcmf_event_msg *e, void *data)
3107{ 4790{
@@ -3120,7 +4803,7 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
3120} 4803}
3121 4804
3122static s32 4805static s32
3123brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv, 4806brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
3124 struct net_device *ndev, 4807 struct net_device *ndev,
3125 const struct brcmf_event_msg *e, void *data) 4808 const struct brcmf_event_msg *e, void *data)
3126{ 4809{
@@ -3133,12 +4816,12 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
3133 4816
3134 WL_TRACE("Enter\n"); 4817 WL_TRACE("Enter\n");
3135 4818
3136 if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) { 4819 if (cfg->iscan_on && cfg->iscan_kickstart) {
3137 WL_TRACE("Exit\n"); 4820 WL_TRACE("Exit\n");
3138 return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv)); 4821 return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
3139 } 4822 }
3140 4823
3141 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { 4824 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
3142 WL_ERR("Scan complete while device not scanning\n"); 4825 WL_ERR("Scan complete while device not scanning\n");
3143 scan_abort = true; 4826 scan_abort = true;
3144 err = -EINVAL; 4827 err = -EINVAL;
@@ -3155,35 +4838,33 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
3155 scan_channel = le32_to_cpu(channel_inform_le.scan_channel); 4838 scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
3156 if (scan_channel) 4839 if (scan_channel)
3157 WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel); 4840 WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
3158 cfg_priv->bss_list = cfg_priv->scan_results; 4841 cfg->bss_list = cfg->scan_results;
3159 bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list; 4842 bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
3160 4843
3161 memset(cfg_priv->scan_results, 0, len); 4844 memset(cfg->scan_results, 0, len);
3162 bss_list_le->buflen = cpu_to_le32(len); 4845 bss_list_le->buflen = cpu_to_le32(len);
3163 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS, 4846 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
3164 cfg_priv->scan_results, len); 4847 cfg->scan_results, len);
3165 if (err) { 4848 if (err) {
3166 WL_ERR("%s Scan_results error (%d)\n", ndev->name, err); 4849 WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
3167 err = -EINVAL; 4850 err = -EINVAL;
3168 scan_abort = true; 4851 scan_abort = true;
3169 goto scan_done_out; 4852 goto scan_done_out;
3170 } 4853 }
3171 cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen); 4854 cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
3172 cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version); 4855 cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
3173 cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count); 4856 cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
3174 4857
3175 err = brcmf_inform_bss(cfg_priv); 4858 err = brcmf_inform_bss(cfg);
3176 if (err) { 4859 if (err)
3177 scan_abort = true; 4860 scan_abort = true;
3178 goto scan_done_out;
3179 }
3180 4861
3181scan_done_out: 4862scan_done_out:
3182 if (cfg_priv->scan_request) { 4863 if (cfg->scan_request) {
3183 WL_SCAN("calling cfg80211_scan_done\n"); 4864 WL_SCAN("calling cfg80211_scan_done\n");
3184 cfg80211_scan_done(cfg_priv->scan_request, scan_abort); 4865 cfg80211_scan_done(cfg->scan_request, scan_abort);
3185 brcmf_set_mpc(ndev, 1); 4866 brcmf_set_mpc(ndev, 1);
3186 cfg_priv->scan_request = NULL; 4867 cfg->scan_request = NULL;
3187 } 4868 }
3188 4869
3189 WL_TRACE("Exit\n"); 4870 WL_TRACE("Exit\n");
@@ -3206,68 +4887,85 @@ static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
3206 memset(el, 0, sizeof(*el)); 4887 memset(el, 0, sizeof(*el));
3207 el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status; 4888 el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
3208 el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status; 4889 el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
4890 el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
4891 el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
4892 el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
4893 el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
4894 el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
3209 el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status; 4895 el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
3210 el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status; 4896 el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
3211 el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status; 4897 el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
4898 el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
4899}
4900
4901static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
4902{
4903 kfree(cfg->scan_results);
4904 cfg->scan_results = NULL;
4905 kfree(cfg->bss_info);
4906 cfg->bss_info = NULL;
4907 kfree(cfg->conf);
4908 cfg->conf = NULL;
4909 kfree(cfg->profile);
4910 cfg->profile = NULL;
4911 kfree(cfg->scan_req_int);
4912 cfg->scan_req_int = NULL;
4913 kfree(cfg->escan_ioctl_buf);
4914 cfg->escan_ioctl_buf = NULL;
4915 kfree(cfg->dcmd_buf);
4916 cfg->dcmd_buf = NULL;
4917 kfree(cfg->extra_buf);
4918 cfg->extra_buf = NULL;
4919 kfree(cfg->iscan);
4920 cfg->iscan = NULL;
4921 kfree(cfg->pmk_list);
4922 cfg->pmk_list = NULL;
4923 if (cfg->ap_info) {
4924 kfree(cfg->ap_info->wpa_ie);
4925 kfree(cfg->ap_info->rsn_ie);
4926 kfree(cfg->ap_info);
4927 cfg->ap_info = NULL;
4928 }
3212} 4929}
3213 4930
3214static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv) 4931static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
3215{ 4932{
3216 kfree(cfg_priv->scan_results); 4933 cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
3217 cfg_priv->scan_results = NULL; 4934 if (!cfg->scan_results)
3218 kfree(cfg_priv->bss_info);
3219 cfg_priv->bss_info = NULL;
3220 kfree(cfg_priv->conf);
3221 cfg_priv->conf = NULL;
3222 kfree(cfg_priv->profile);
3223 cfg_priv->profile = NULL;
3224 kfree(cfg_priv->scan_req_int);
3225 cfg_priv->scan_req_int = NULL;
3226 kfree(cfg_priv->dcmd_buf);
3227 cfg_priv->dcmd_buf = NULL;
3228 kfree(cfg_priv->extra_buf);
3229 cfg_priv->extra_buf = NULL;
3230 kfree(cfg_priv->iscan);
3231 cfg_priv->iscan = NULL;
3232 kfree(cfg_priv->pmk_list);
3233 cfg_priv->pmk_list = NULL;
3234}
3235
3236static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
3237{
3238 cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
3239 if (!cfg_priv->scan_results)
3240 goto init_priv_mem_out; 4935 goto init_priv_mem_out;
3241 cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL); 4936 cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
3242 if (!cfg_priv->conf) 4937 if (!cfg->conf)
3243 goto init_priv_mem_out; 4938 goto init_priv_mem_out;
3244 cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL); 4939 cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
3245 if (!cfg_priv->profile) 4940 if (!cfg->profile)
3246 goto init_priv_mem_out; 4941 goto init_priv_mem_out;
3247 cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); 4942 cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
3248 if (!cfg_priv->bss_info) 4943 if (!cfg->bss_info)
3249 goto init_priv_mem_out; 4944 goto init_priv_mem_out;
3250 cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int), 4945 cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
3251 GFP_KERNEL); 4946 GFP_KERNEL);
3252 if (!cfg_priv->scan_req_int) 4947 if (!cfg->scan_req_int)
4948 goto init_priv_mem_out;
4949 cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
4950 if (!cfg->escan_ioctl_buf)
3253 goto init_priv_mem_out; 4951 goto init_priv_mem_out;
3254 cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL); 4952 cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
3255 if (!cfg_priv->dcmd_buf) 4953 if (!cfg->dcmd_buf)
3256 goto init_priv_mem_out; 4954 goto init_priv_mem_out;
3257 cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); 4955 cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
3258 if (!cfg_priv->extra_buf) 4956 if (!cfg->extra_buf)
3259 goto init_priv_mem_out; 4957 goto init_priv_mem_out;
3260 cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL); 4958 cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
3261 if (!cfg_priv->iscan) 4959 if (!cfg->iscan)
3262 goto init_priv_mem_out; 4960 goto init_priv_mem_out;
3263 cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL); 4961 cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
3264 if (!cfg_priv->pmk_list) 4962 if (!cfg->pmk_list)
3265 goto init_priv_mem_out; 4963 goto init_priv_mem_out;
3266 4964
3267 return 0; 4965 return 0;
3268 4966
3269init_priv_mem_out: 4967init_priv_mem_out:
3270 brcmf_deinit_priv_mem(cfg_priv); 4968 brcmf_deinit_priv_mem(cfg);
3271 4969
3272 return -ENOMEM; 4970 return -ENOMEM;
3273} 4971}
@@ -3277,17 +4975,17 @@ init_priv_mem_out:
3277*/ 4975*/
3278 4976
3279static struct brcmf_cfg80211_event_q *brcmf_deq_event( 4977static struct brcmf_cfg80211_event_q *brcmf_deq_event(
3280 struct brcmf_cfg80211_priv *cfg_priv) 4978 struct brcmf_cfg80211_info *cfg)
3281{ 4979{
3282 struct brcmf_cfg80211_event_q *e = NULL; 4980 struct brcmf_cfg80211_event_q *e = NULL;
3283 4981
3284 spin_lock_irq(&cfg_priv->evt_q_lock); 4982 spin_lock_irq(&cfg->evt_q_lock);
3285 if (!list_empty(&cfg_priv->evt_q_list)) { 4983 if (!list_empty(&cfg->evt_q_list)) {
3286 e = list_first_entry(&cfg_priv->evt_q_list, 4984 e = list_first_entry(&cfg->evt_q_list,
3287 struct brcmf_cfg80211_event_q, evt_q_list); 4985 struct brcmf_cfg80211_event_q, evt_q_list);
3288 list_del(&e->evt_q_list); 4986 list_del(&e->evt_q_list);
3289 } 4987 }
3290 spin_unlock_irq(&cfg_priv->evt_q_lock); 4988 spin_unlock_irq(&cfg->evt_q_lock);
3291 4989
3292 return e; 4990 return e;
3293} 4991}
@@ -3299,23 +4997,33 @@ static struct brcmf_cfg80211_event_q *brcmf_deq_event(
3299*/ 4997*/
3300 4998
3301static s32 4999static s32
3302brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event, 5000brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
3303 const struct brcmf_event_msg *msg) 5001 const struct brcmf_event_msg *msg, void *data)
3304{ 5002{
3305 struct brcmf_cfg80211_event_q *e; 5003 struct brcmf_cfg80211_event_q *e;
3306 s32 err = 0; 5004 s32 err = 0;
3307 ulong flags; 5005 ulong flags;
5006 u32 data_len;
5007 u32 total_len;
3308 5008
3309 e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC); 5009 total_len = sizeof(struct brcmf_cfg80211_event_q);
5010 if (data)
5011 data_len = be32_to_cpu(msg->datalen);
5012 else
5013 data_len = 0;
5014 total_len += data_len;
5015 e = kzalloc(total_len, GFP_ATOMIC);
3310 if (!e) 5016 if (!e)
3311 return -ENOMEM; 5017 return -ENOMEM;
3312 5018
3313 e->etype = event; 5019 e->etype = event;
3314 memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg)); 5020 memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
5021 if (data)
5022 memcpy(&e->edata, data, data_len);
3315 5023
3316 spin_lock_irqsave(&cfg_priv->evt_q_lock, flags); 5024 spin_lock_irqsave(&cfg->evt_q_lock, flags);
3317 list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list); 5025 list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
3318 spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags); 5026 spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
3319 5027
3320 return err; 5028 return err;
3321} 5029}
@@ -3327,12 +5035,12 @@ static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
3327 5035
3328static void brcmf_cfg80211_event_handler(struct work_struct *work) 5036static void brcmf_cfg80211_event_handler(struct work_struct *work)
3329{ 5037{
3330 struct brcmf_cfg80211_priv *cfg_priv = 5038 struct brcmf_cfg80211_info *cfg =
3331 container_of(work, struct brcmf_cfg80211_priv, 5039 container_of(work, struct brcmf_cfg80211_info,
3332 event_work); 5040 event_work);
3333 struct brcmf_cfg80211_event_q *e; 5041 struct brcmf_cfg80211_event_q *e;
3334 5042
3335 e = brcmf_deq_event(cfg_priv); 5043 e = brcmf_deq_event(cfg);
3336 if (unlikely(!e)) { 5044 if (unlikely(!e)) {
3337 WL_ERR("event queue empty...\n"); 5045 WL_ERR("event queue empty...\n");
3338 return; 5046 return;
@@ -3340,137 +5048,131 @@ static void brcmf_cfg80211_event_handler(struct work_struct *work)
3340 5048
3341 do { 5049 do {
3342 WL_INFO("event type (%d)\n", e->etype); 5050 WL_INFO("event type (%d)\n", e->etype);
3343 if (cfg_priv->el.handler[e->etype]) 5051 if (cfg->el.handler[e->etype])
3344 cfg_priv->el.handler[e->etype](cfg_priv, 5052 cfg->el.handler[e->etype](cfg,
3345 cfg_to_ndev(cfg_priv), 5053 cfg_to_ndev(cfg),
3346 &e->emsg, e->edata); 5054 &e->emsg, e->edata);
3347 else 5055 else
3348 WL_INFO("Unknown Event (%d): ignoring\n", e->etype); 5056 WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
3349 brcmf_put_event(e); 5057 brcmf_put_event(e);
3350 } while ((e = brcmf_deq_event(cfg_priv))); 5058 } while ((e = brcmf_deq_event(cfg)));
3351 5059
3352} 5060}
3353 5061
3354static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv) 5062static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
3355{ 5063{
3356 spin_lock_init(&cfg_priv->evt_q_lock); 5064 spin_lock_init(&cfg->evt_q_lock);
3357 INIT_LIST_HEAD(&cfg_priv->evt_q_list); 5065 INIT_LIST_HEAD(&cfg->evt_q_list);
3358} 5066}
3359 5067
3360static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv) 5068static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
3361{ 5069{
3362 struct brcmf_cfg80211_event_q *e; 5070 struct brcmf_cfg80211_event_q *e;
3363 5071
3364 spin_lock_irq(&cfg_priv->evt_q_lock); 5072 spin_lock_irq(&cfg->evt_q_lock);
3365 while (!list_empty(&cfg_priv->evt_q_list)) { 5073 while (!list_empty(&cfg->evt_q_list)) {
3366 e = list_first_entry(&cfg_priv->evt_q_list, 5074 e = list_first_entry(&cfg->evt_q_list,
3367 struct brcmf_cfg80211_event_q, evt_q_list); 5075 struct brcmf_cfg80211_event_q, evt_q_list);
3368 list_del(&e->evt_q_list); 5076 list_del(&e->evt_q_list);
3369 kfree(e); 5077 kfree(e);
3370 } 5078 }
3371 spin_unlock_irq(&cfg_priv->evt_q_lock); 5079 spin_unlock_irq(&cfg->evt_q_lock);
3372} 5080}
3373 5081
3374static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv) 5082static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
3375{ 5083{
3376 s32 err = 0; 5084 s32 err = 0;
3377 5085
3378 cfg_priv->scan_request = NULL; 5086 cfg->scan_request = NULL;
3379 cfg_priv->pwr_save = true; 5087 cfg->pwr_save = true;
3380 cfg_priv->iscan_on = true; /* iscan on & off switch. 5088#ifdef CONFIG_BRCMISCAN
5089 cfg->iscan_on = true; /* iscan on & off switch.
3381 we enable iscan per default */ 5090 we enable iscan per default */
3382 cfg_priv->roam_on = true; /* roam on & off switch. 5091 cfg->escan_on = false; /* escan on & off switch.
5092 we disable escan per default */
5093#else
5094 cfg->iscan_on = false; /* iscan on & off switch.
5095 we disable iscan per default */
5096 cfg->escan_on = true; /* escan on & off switch.
5097 we enable escan per default */
5098#endif
5099 cfg->roam_on = true; /* roam on & off switch.
3383 we enable roam per default */ 5100 we enable roam per default */
3384 5101
3385 cfg_priv->iscan_kickstart = false; 5102 cfg->iscan_kickstart = false;
3386 cfg_priv->active_scan = true; /* we do active scan for 5103 cfg->active_scan = true; /* we do active scan for
3387 specific scan per default */ 5104 specific scan per default */
3388 cfg_priv->dongle_up = false; /* dongle is not up yet */ 5105 cfg->dongle_up = false; /* dongle is not up yet */
3389 brcmf_init_eq(cfg_priv); 5106 brcmf_init_eq(cfg);
3390 err = brcmf_init_priv_mem(cfg_priv); 5107 err = brcmf_init_priv_mem(cfg);
3391 if (err) 5108 if (err)
3392 return err; 5109 return err;
3393 INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler); 5110 INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
3394 brcmf_init_eloop_handler(&cfg_priv->el); 5111 brcmf_init_eloop_handler(&cfg->el);
3395 mutex_init(&cfg_priv->usr_sync); 5112 mutex_init(&cfg->usr_sync);
3396 err = brcmf_init_iscan(cfg_priv); 5113 err = brcmf_init_iscan(cfg);
3397 if (err) 5114 if (err)
3398 return err; 5115 return err;
3399 brcmf_init_conf(cfg_priv->conf); 5116 brcmf_init_escan(cfg);
3400 brcmf_init_prof(cfg_priv->profile); 5117 brcmf_init_conf(cfg->conf);
3401 brcmf_link_down(cfg_priv); 5118 brcmf_init_prof(cfg->profile);
5119 brcmf_link_down(cfg);
3402 5120
3403 return err; 5121 return err;
3404} 5122}
3405 5123
3406static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv) 5124static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
3407{ 5125{
3408 cancel_work_sync(&cfg_priv->event_work); 5126 cancel_work_sync(&cfg->event_work);
3409 cfg_priv->dongle_up = false; /* dongle down */ 5127 cfg->dongle_up = false; /* dongle down */
3410 brcmf_flush_eq(cfg_priv); 5128 brcmf_flush_eq(cfg);
3411 brcmf_link_down(cfg_priv); 5129 brcmf_link_down(cfg);
3412 brcmf_term_iscan(cfg_priv); 5130 brcmf_abort_scanning(cfg);
3413 brcmf_deinit_priv_mem(cfg_priv); 5131 brcmf_deinit_priv_mem(cfg);
3414} 5132}
3415 5133
3416struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, 5134struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
3417 struct device *busdev, 5135 struct device *busdev,
3418 void *data) 5136 struct brcmf_pub *drvr)
3419{ 5137{
3420 struct wireless_dev *wdev; 5138 struct wireless_dev *wdev;
3421 struct brcmf_cfg80211_priv *cfg_priv; 5139 struct brcmf_cfg80211_info *cfg;
3422 struct brcmf_cfg80211_iface *ci;
3423 struct brcmf_cfg80211_dev *cfg_dev;
3424 s32 err = 0; 5140 s32 err = 0;
3425 5141
3426 if (!ndev) { 5142 if (!ndev) {
3427 WL_ERR("ndev is invalid\n"); 5143 WL_ERR("ndev is invalid\n");
3428 return NULL; 5144 return NULL;
3429 } 5145 }
3430 cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
3431 if (!cfg_dev)
3432 return NULL;
3433 5146
3434 wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev); 5147 wdev = brcmf_alloc_wdev(busdev);
3435 if (IS_ERR(wdev)) { 5148 if (IS_ERR(wdev)) {
3436 kfree(cfg_dev);
3437 return NULL; 5149 return NULL;
3438 } 5150 }
3439 5151
3440 wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS); 5152 wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
3441 cfg_priv = wdev_to_cfg(wdev); 5153 cfg = wdev_to_cfg(wdev);
3442 cfg_priv->wdev = wdev; 5154 cfg->wdev = wdev;
3443 cfg_priv->pub = data; 5155 cfg->pub = drvr;
3444 ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
3445 ci->cfg_priv = cfg_priv;
3446 ndev->ieee80211_ptr = wdev; 5156 ndev->ieee80211_ptr = wdev;
3447 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 5157 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
3448 wdev->netdev = ndev; 5158 wdev->netdev = ndev;
3449 err = wl_init_priv(cfg_priv); 5159 err = wl_init_priv(cfg);
3450 if (err) { 5160 if (err) {
3451 WL_ERR("Failed to init iwm_priv (%d)\n", err); 5161 WL_ERR("Failed to init iwm_priv (%d)\n", err);
3452 goto cfg80211_attach_out; 5162 goto cfg80211_attach_out;
3453 } 5163 }
3454 brcmf_set_drvdata(cfg_dev, ci);
3455 5164
3456 return cfg_dev; 5165 return cfg;
3457 5166
3458cfg80211_attach_out: 5167cfg80211_attach_out:
3459 brcmf_free_wdev(cfg_priv); 5168 brcmf_free_wdev(cfg);
3460 kfree(cfg_dev);
3461 return NULL; 5169 return NULL;
3462} 5170}
3463 5171
3464void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev) 5172void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
3465{ 5173{
3466 struct brcmf_cfg80211_priv *cfg_priv; 5174 wl_deinit_priv(cfg);
3467 5175 brcmf_free_wdev(cfg);
3468 cfg_priv = brcmf_priv_get(cfg_dev);
3469
3470 wl_deinit_priv(cfg_priv);
3471 brcmf_free_wdev(cfg_priv);
3472 brcmf_set_drvdata(cfg_dev, NULL);
3473 kfree(cfg_dev);
3474} 5176}
3475 5177
3476void 5178void
@@ -3478,10 +5180,10 @@ brcmf_cfg80211_event(struct net_device *ndev,
3478 const struct brcmf_event_msg *e, void *data) 5180 const struct brcmf_event_msg *e, void *data)
3479{ 5181{
3480 u32 event_type = be32_to_cpu(e->event_type); 5182 u32 event_type = be32_to_cpu(e->event_type);
3481 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 5183 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
3482 5184
3483 if (!brcmf_enq_event(cfg_priv, event_type, e)) 5185 if (!brcmf_enq_event(cfg, event_type, e, data))
3484 schedule_work(&cfg_priv->event_work); 5186 schedule_work(&cfg->event_work);
3485} 5187}
3486 5188
3487static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype) 5189static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
@@ -3502,6 +5204,9 @@ static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
3502 case NL80211_IFTYPE_STATION: 5204 case NL80211_IFTYPE_STATION:
3503 infra = 1; 5205 infra = 1;
3504 break; 5206 break;
5207 case NL80211_IFTYPE_AP:
5208 infra = 1;
5209 break;
3505 default: 5210 default:
3506 err = -EINVAL; 5211 err = -EINVAL;
3507 WL_ERR("invalid type (%d)\n", iftype); 5212 WL_ERR("invalid type (%d)\n", iftype);
@@ -3554,6 +5259,8 @@ static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
3554 setbit(eventmask, BRCMF_E_TXFAIL); 5259 setbit(eventmask, BRCMF_E_TXFAIL);
3555 setbit(eventmask, BRCMF_E_JOIN_START); 5260 setbit(eventmask, BRCMF_E_JOIN_START);
3556 setbit(eventmask, BRCMF_E_SCAN_COMPLETE); 5261 setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
5262 setbit(eventmask, BRCMF_E_ESCAN_RESULT);
5263 setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
3557 5264
3558 brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, 5265 brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
3559 iovbuf, sizeof(iovbuf)); 5266 iovbuf, sizeof(iovbuf));
@@ -3672,46 +5379,46 @@ dongle_scantime_out:
3672 return err; 5379 return err;
3673} 5380}
3674 5381
3675static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv) 5382static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
3676{ 5383{
3677 struct wiphy *wiphy; 5384 struct wiphy *wiphy;
3678 s32 phy_list; 5385 s32 phy_list;
3679 s8 phy; 5386 s8 phy;
3680 s32 err = 0; 5387 s32 err = 0;
3681 5388
3682 err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST, 5389 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
3683 &phy_list, sizeof(phy_list)); 5390 &phy_list, sizeof(phy_list));
3684 if (err) { 5391 if (err) {
3685 WL_ERR("error (%d)\n", err); 5392 WL_ERR("error (%d)\n", err);
3686 return err; 5393 return err;
3687 } 5394 }
3688 5395
3689 phy = ((char *)&phy_list)[1]; 5396 phy = ((char *)&phy_list)[0];
3690 WL_INFO("%c phy\n", phy); 5397 WL_INFO("%c phy\n", phy);
3691 if (phy == 'n' || phy == 'a') { 5398 if (phy == 'n' || phy == 'a') {
3692 wiphy = cfg_to_wiphy(cfg_priv); 5399 wiphy = cfg_to_wiphy(cfg);
3693 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; 5400 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
3694 } 5401 }
3695 5402
3696 return err; 5403 return err;
3697} 5404}
3698 5405
3699static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv) 5406static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
3700{ 5407{
3701 return wl_update_wiphybands(cfg_priv); 5408 return wl_update_wiphybands(cfg);
3702} 5409}
3703 5410
3704static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv) 5411static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
3705{ 5412{
3706 struct net_device *ndev; 5413 struct net_device *ndev;
3707 struct wireless_dev *wdev; 5414 struct wireless_dev *wdev;
3708 s32 power_mode; 5415 s32 power_mode;
3709 s32 err = 0; 5416 s32 err = 0;
3710 5417
3711 if (cfg_priv->dongle_up) 5418 if (cfg->dongle_up)
3712 return err; 5419 return err;
3713 5420
3714 ndev = cfg_to_ndev(cfg_priv); 5421 ndev = cfg_to_ndev(cfg);
3715 wdev = ndev->ieee80211_ptr; 5422 wdev = ndev->ieee80211_ptr;
3716 5423
3717 brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME, 5424 brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
@@ -3721,21 +5428,21 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
3721 if (err) 5428 if (err)
3722 goto default_conf_out; 5429 goto default_conf_out;
3723 5430
3724 power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF; 5431 power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
3725 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode); 5432 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
3726 if (err) 5433 if (err)
3727 goto default_conf_out; 5434 goto default_conf_out;
3728 WL_INFO("power save set to %s\n", 5435 WL_INFO("power save set to %s\n",
3729 (power_mode ? "enabled" : "disabled")); 5436 (power_mode ? "enabled" : "disabled"));
3730 5437
3731 err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1), 5438 err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
3732 WL_BEACON_TIMEOUT); 5439 WL_BEACON_TIMEOUT);
3733 if (err) 5440 if (err)
3734 goto default_conf_out; 5441 goto default_conf_out;
3735 err = brcmf_dongle_mode(ndev, wdev->iftype); 5442 err = brcmf_dongle_mode(ndev, wdev->iftype);
3736 if (err && err != -EINPROGRESS) 5443 if (err && err != -EINPROGRESS)
3737 goto default_conf_out; 5444 goto default_conf_out;
3738 err = brcmf_dongle_probecap(cfg_priv); 5445 err = brcmf_dongle_probecap(cfg);
3739 if (err) 5446 if (err)
3740 goto default_conf_out; 5447 goto default_conf_out;
3741 5448
@@ -3743,31 +5450,31 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
3743 5450
3744default_conf_out: 5451default_conf_out:
3745 5452
3746 cfg_priv->dongle_up = true; 5453 cfg->dongle_up = true;
3747 5454
3748 return err; 5455 return err;
3749 5456
3750} 5457}
3751 5458
3752static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv) 5459static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
3753{ 5460{
3754 char buf[10+IFNAMSIZ]; 5461 char buf[10+IFNAMSIZ];
3755 struct dentry *fd; 5462 struct dentry *fd;
3756 s32 err = 0; 5463 s32 err = 0;
3757 5464
3758 sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name); 5465 sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
3759 cfg_priv->debugfsdir = debugfs_create_dir(buf, 5466 cfg->debugfsdir = debugfs_create_dir(buf,
3760 cfg_to_wiphy(cfg_priv)->debugfsdir); 5467 cfg_to_wiphy(cfg)->debugfsdir);
3761 5468
3762 fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir, 5469 fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
3763 (u16 *)&cfg_priv->profile->beacon_interval); 5470 (u16 *)&cfg->profile->beacon_interval);
3764 if (!fd) { 5471 if (!fd) {
3765 err = -ENOMEM; 5472 err = -ENOMEM;
3766 goto err_out; 5473 goto err_out;
3767 } 5474 }
3768 5475
3769 fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir, 5476 fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
3770 (u8 *)&cfg_priv->profile->dtim_period); 5477 (u8 *)&cfg->profile->dtim_period);
3771 if (!fd) { 5478 if (!fd) {
3772 err = -ENOMEM; 5479 err = -ENOMEM;
3773 goto err_out; 5480 goto err_out;
@@ -3777,40 +5484,40 @@ err_out:
3777 return err; 5484 return err;
3778} 5485}
3779 5486
3780static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv) 5487static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
3781{ 5488{
3782 debugfs_remove_recursive(cfg_priv->debugfsdir); 5489 debugfs_remove_recursive(cfg->debugfsdir);
3783 cfg_priv->debugfsdir = NULL; 5490 cfg->debugfsdir = NULL;
3784} 5491}
3785 5492
3786static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv) 5493static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
3787{ 5494{
3788 s32 err = 0; 5495 s32 err = 0;
3789 5496
3790 set_bit(WL_STATUS_READY, &cfg_priv->status); 5497 set_bit(WL_STATUS_READY, &cfg->status);
3791 5498
3792 brcmf_debugfs_add_netdev_params(cfg_priv); 5499 brcmf_debugfs_add_netdev_params(cfg);
3793 5500
3794 err = brcmf_config_dongle(cfg_priv); 5501 err = brcmf_config_dongle(cfg);
3795 if (err) 5502 if (err)
3796 return err; 5503 return err;
3797 5504
3798 brcmf_invoke_iscan(cfg_priv); 5505 brcmf_invoke_iscan(cfg);
3799 5506
3800 return err; 5507 return err;
3801} 5508}
3802 5509
3803static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv) 5510static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
3804{ 5511{
3805 /* 5512 /*
3806 * While going down, if associated with AP disassociate 5513 * While going down, if associated with AP disassociate
3807 * from AP to save power 5514 * from AP to save power
3808 */ 5515 */
3809 if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || 5516 if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
3810 test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && 5517 test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
3811 test_bit(WL_STATUS_READY, &cfg_priv->status)) { 5518 test_bit(WL_STATUS_READY, &cfg->status)) {
3812 WL_INFO("Disassociating from AP"); 5519 WL_INFO("Disassociating from AP");
3813 brcmf_link_down(cfg_priv); 5520 brcmf_link_down(cfg);
3814 5521
3815 /* Make sure WPA_Supplicant receives all the event 5522 /* Make sure WPA_Supplicant receives all the event
3816 generated due to DISASSOC call to the fw to keep 5523 generated due to DISASSOC call to the fw to keep
@@ -3819,63 +5526,33 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
3819 brcmf_delay(500); 5526 brcmf_delay(500);
3820 } 5527 }
3821 5528
3822 set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); 5529 brcmf_abort_scanning(cfg);
3823 brcmf_term_iscan(cfg_priv); 5530 clear_bit(WL_STATUS_READY, &cfg->status);
3824 if (cfg_priv->scan_request) {
3825 cfg80211_scan_done(cfg_priv->scan_request, true);
3826 /* May need to perform this to cover rmmod */
3827 /* wl_set_mpc(cfg_to_ndev(wl), 1); */
3828 cfg_priv->scan_request = NULL;
3829 }
3830 clear_bit(WL_STATUS_READY, &cfg_priv->status);
3831 clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
3832 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
3833 5531
3834 brcmf_debugfs_remove_netdev(cfg_priv); 5532 brcmf_debugfs_remove_netdev(cfg);
3835 5533
3836 return 0; 5534 return 0;
3837} 5535}
3838 5536
3839s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev) 5537s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
3840{ 5538{
3841 struct brcmf_cfg80211_priv *cfg_priv;
3842 s32 err = 0; 5539 s32 err = 0;
3843 5540
3844 cfg_priv = brcmf_priv_get(cfg_dev); 5541 mutex_lock(&cfg->usr_sync);
3845 mutex_lock(&cfg_priv->usr_sync); 5542 err = __brcmf_cfg80211_up(cfg);
3846 err = __brcmf_cfg80211_up(cfg_priv); 5543 mutex_unlock(&cfg->usr_sync);
3847 mutex_unlock(&cfg_priv->usr_sync);
3848 5544
3849 return err; 5545 return err;
3850} 5546}
3851 5547
3852s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev) 5548s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
3853{ 5549{
3854 struct brcmf_cfg80211_priv *cfg_priv;
3855 s32 err = 0; 5550 s32 err = 0;
3856 5551
3857 cfg_priv = brcmf_priv_get(cfg_dev); 5552 mutex_lock(&cfg->usr_sync);
3858 mutex_lock(&cfg_priv->usr_sync); 5553 err = __brcmf_cfg80211_down(cfg);
3859 err = __brcmf_cfg80211_down(cfg_priv); 5554 mutex_unlock(&cfg->usr_sync);
3860 mutex_unlock(&cfg_priv->usr_sync);
3861 5555
3862 return err; 5556 return err;
3863} 5557}
3864 5558
3865static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
3866 u8 t, u8 l, u8 *v)
3867{
3868 struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
3869 s32 err = 0;
3870
3871 if (ie->offset + l + 2 > WL_TLV_INFO_MAX) {
3872 WL_ERR("ei crosses buffer boundary\n");
3873 return -ENOSPC;
3874 }
3875 ie->buf[ie->offset] = t;
3876 ie->buf[ie->offset + 1] = l;
3877 memcpy(&ie->buf[ie->offset + 2], v, l);
3878 ie->offset += l + 2;
3879
3880 return err;
3881}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index b5d9b36df3d0..71ced174748a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,12 +17,6 @@
17#ifndef _wl_cfg80211_h_ 17#ifndef _wl_cfg80211_h_
18#define _wl_cfg80211_h_ 18#define _wl_cfg80211_h_
19 19
20struct brcmf_cfg80211_conf;
21struct brcmf_cfg80211_iface;
22struct brcmf_cfg80211_priv;
23struct brcmf_cfg80211_security;
24struct brcmf_cfg80211_ibss;
25
26#define WL_DBG_NONE 0 20#define WL_DBG_NONE 0
27#define WL_DBG_CONN (1 << 5) 21#define WL_DBG_CONN (1 << 5)
28#define WL_DBG_SCAN (1 << 4) 22#define WL_DBG_SCAN (1 << 4)
@@ -123,13 +117,25 @@ do { \
123#define WL_SCAN_UNASSOC_TIME 40 117#define WL_SCAN_UNASSOC_TIME 40
124#define WL_SCAN_PASSIVE_TIME 120 118#define WL_SCAN_PASSIVE_TIME 120
125 119
120#define WL_ESCAN_BUF_SIZE (1024 * 64)
121#define WL_ESCAN_TIMER_INTERVAL_MS 8000 /* E-Scan timeout */
122
123#define WL_ESCAN_ACTION_START 1
124#define WL_ESCAN_ACTION_CONTINUE 2
125#define WL_ESCAN_ACTION_ABORT 3
126
127#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
128#define IE_MAX_LEN 512
129
126/* dongle status */ 130/* dongle status */
127enum wl_status { 131enum wl_status {
128 WL_STATUS_READY, 132 WL_STATUS_READY,
129 WL_STATUS_SCANNING, 133 WL_STATUS_SCANNING,
130 WL_STATUS_SCAN_ABORTING, 134 WL_STATUS_SCAN_ABORTING,
131 WL_STATUS_CONNECTING, 135 WL_STATUS_CONNECTING,
132 WL_STATUS_CONNECTED 136 WL_STATUS_CONNECTED,
137 WL_STATUS_AP_CREATING,
138 WL_STATUS_AP_CREATED
133}; 139};
134 140
135/* wi-fi mode */ 141/* wi-fi mode */
@@ -169,23 +175,17 @@ struct brcmf_cfg80211_conf {
169 struct ieee80211_channel channel; 175 struct ieee80211_channel channel;
170}; 176};
171 177
178/* forward declaration */
179struct brcmf_cfg80211_info;
180
172/* cfg80211 main event loop */ 181/* cfg80211 main event loop */
173struct brcmf_cfg80211_event_loop { 182struct brcmf_cfg80211_event_loop {
174 s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_priv *cfg_priv, 183 s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
175 struct net_device *ndev, 184 struct net_device *ndev,
176 const struct brcmf_event_msg *e, 185 const struct brcmf_event_msg *e,
177 void *data); 186 void *data);
178}; 187};
179 188
180/* representing interface of cfg80211 plane */
181struct brcmf_cfg80211_iface {
182 struct brcmf_cfg80211_priv *cfg_priv;
183};
184
185struct brcmf_cfg80211_dev {
186 void *driver_data; /* to store cfg80211 object information */
187};
188
189/* basic structure of scan request */ 189/* basic structure of scan request */
190struct brcmf_cfg80211_scan_req { 190struct brcmf_cfg80211_scan_req {
191 struct brcmf_ssid_le ssid_le; 191 struct brcmf_ssid_le ssid_le;
@@ -238,7 +238,7 @@ struct brcmf_cfg80211_profile {
238/* dongle iscan event loop */ 238/* dongle iscan event loop */
239struct brcmf_cfg80211_iscan_eloop { 239struct brcmf_cfg80211_iscan_eloop {
240 s32 (*handler[WL_SCAN_ERSULTS_LAST]) 240 s32 (*handler[WL_SCAN_ERSULTS_LAST])
241 (struct brcmf_cfg80211_priv *cfg_priv); 241 (struct brcmf_cfg80211_info *cfg);
242}; 242};
243 243
244/* dongle iscan controller */ 244/* dongle iscan controller */
@@ -275,92 +275,240 @@ struct brcmf_cfg80211_pmk_list {
275 struct pmkid foo[MAXPMKID - 1]; 275 struct pmkid foo[MAXPMKID - 1];
276}; 276};
277 277
278/* dongle private data of cfg80211 interface */ 278/* dongle escan state */
279struct brcmf_cfg80211_priv { 279enum wl_escan_state {
280 struct wireless_dev *wdev; /* representing wl cfg80211 device */ 280 WL_ESCAN_STATE_IDLE,
281 struct brcmf_cfg80211_conf *conf; /* dongle configuration */ 281 WL_ESCAN_STATE_SCANNING
282 struct cfg80211_scan_request *scan_request; /* scan request 282};
283 object */ 283
284 struct brcmf_cfg80211_event_loop el; /* main event loop */ 284struct escan_info {
285 struct list_head evt_q_list; /* used for event queue */ 285 u32 escan_state;
286 spinlock_t evt_q_lock; /* for event queue synchronization */ 286 u8 escan_buf[WL_ESCAN_BUF_SIZE];
287 struct mutex usr_sync; /* maily for dongle up/down synchronization */ 287 struct wiphy *wiphy;
288 struct brcmf_scan_results *bss_list; /* bss_list holding scanned 288 struct net_device *ndev;
289 ap information */ 289};
290
291/* Structure to hold WPS, WPA IEs for a AP */
292struct ap_info {
293 u8 probe_res_ie[IE_MAX_LEN];
294 u8 beacon_ie[IE_MAX_LEN];
295 u32 probe_res_ie_len;
296 u32 beacon_ie_len;
297 u8 *wpa_ie;
298 u8 *rsn_ie;
299 bool security_mode;
300};
301
302/**
303 * struct brcmf_pno_param_le - PNO scan configuration parameters
304 *
305 * @version: PNO parameters version.
306 * @scan_freq: scan frequency.
307 * @lost_network_timeout: #sec. to declare discovered network as lost.
308 * @flags: Bit field to control features of PFN such as sort criteria auto
309 * enable switch and background scan.
310 * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
311 * criteria.
312 * @bestn: number of best networks in each scan.
313 * @mscan: number of scans recorded.
314 * @repeat: minimum number of scan intervals before scan frequency changes
315 * in adaptive scan.
316 * @exp: exponent of 2 for maximum scan interval.
317 * @slow_freq: slow scan period.
318 */
319struct brcmf_pno_param_le {
320 __le32 version;
321 __le32 scan_freq;
322 __le32 lost_network_timeout;
323 __le16 flags;
324 __le16 rssi_margin;
325 u8 bestn;
326 u8 mscan;
327 u8 repeat;
328 u8 exp;
329 __le32 slow_freq;
330};
331
332/**
333 * struct brcmf_pno_net_param_le - scan parameters per preferred network.
334 *
335 * @ssid: ssid name and its length.
336 * @flags: bit2: hidden.
337 * @infra: BSS vs IBSS.
338 * @auth: Open vs Closed.
339 * @wpa_auth: WPA type.
340 * @wsec: wsec value.
341 */
342struct brcmf_pno_net_param_le {
343 struct brcmf_ssid_le ssid;
344 __le32 flags;
345 __le32 infra;
346 __le32 auth;
347 __le32 wpa_auth;
348 __le32 wsec;
349};
350
351/**
352 * struct brcmf_pno_net_info_le - information per found network.
353 *
354 * @bssid: BSS network identifier.
355 * @channel: channel number only.
356 * @SSID_len: length of ssid.
357 * @SSID: ssid characters.
358 * @RSSI: receive signal strength (in dBm).
359 * @timestamp: age in seconds.
360 */
361struct brcmf_pno_net_info_le {
362 u8 bssid[ETH_ALEN];
363 u8 channel;
364 u8 SSID_len;
365 u8 SSID[32];
366 __le16 RSSI;
367 __le16 timestamp;
368};
369
370/**
371 * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
372 *
373 * @version: PNO version identifier.
374 * @status: indicates completion status of PNO scan.
375 * @count: amount of brcmf_pno_net_info_le entries appended.
376 */
377struct brcmf_pno_scanresults_le {
378 __le32 version;
379 __le32 status;
380 __le32 count;
381};
382
383/**
384 * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
385 *
386 * @wdev: representing wl cfg80211 device.
387 * @conf: dongle configuration.
388 * @scan_request: cfg80211 scan request object.
389 * @el: main event loop.
390 * @evt_q_list: used for event queue.
391 * @evt_q_lock: for event queue synchronization.
392 * @usr_sync: mainly for dongle up/down synchronization.
393 * @bss_list: bss_list holding scanned ap information.
394 * @scan_results: results of the last scan.
395 * @scan_req_int: internal scan request object.
396 * @bss_info: bss information for cfg80211 layer.
397 * @ie: information element object for internal purpose.
398 * @profile: holding dongle profile.
399 * @iscan: iscan controller information.
400 * @conn_info: association info.
401 * @pmk_list: wpa2 pmk list.
402 * @event_work: event handler work struct.
403 * @status: current dongle status.
404 * @pub: common driver information.
405 * @channel: current channel.
406 * @iscan_on: iscan on/off switch.
407 * @iscan_kickstart: indicate iscan already started.
408 * @active_scan: current scan mode.
409 * @sched_escan: e-scan for scheduled scan support running.
410 * @ibss_starter: indicates this sta is ibss starter.
411 * @link_up: link/connection up flag.
412 * @pwr_save: indicate whether dongle to support power save mode.
413 * @dongle_up: indicate whether dongle up or not.
414 * @roam_on: on/off switch for dongle self-roaming.
415 * @scan_tried: indicates if first scan attempted.
416 * @dcmd_buf: dcmd buffer.
417 * @extra_buf: mainly to grab assoc information.
418 * @debugfsdir: debugfs folder for this device.
419 * @escan_on: escan on/off switch.
420 * @escan_info: escan information.
421 * @escan_timeout: Timer for catch scan timeout.
422 * @escan_timeout_work: scan timeout worker.
423 * @escan_ioctl_buf: dongle command buffer for escan commands.
424 * @ap_info: host ap information.
425 * @ci: used to link this structure to netdev private data.
426 */
427struct brcmf_cfg80211_info {
428 struct wireless_dev *wdev;
429 struct brcmf_cfg80211_conf *conf;
430 struct cfg80211_scan_request *scan_request;
431 struct brcmf_cfg80211_event_loop el;
432 struct list_head evt_q_list;
433 spinlock_t evt_q_lock;
434 struct mutex usr_sync;
435 struct brcmf_scan_results *bss_list;
290 struct brcmf_scan_results *scan_results; 436 struct brcmf_scan_results *scan_results;
291 struct brcmf_cfg80211_scan_req *scan_req_int; /* scan request object 437 struct brcmf_cfg80211_scan_req *scan_req_int;
292 for internal purpose */ 438 struct wl_cfg80211_bss_info *bss_info;
293 struct wl_cfg80211_bss_info *bss_info; /* bss information for 439 struct brcmf_cfg80211_ie ie;
294 cfg80211 layer */ 440 struct brcmf_cfg80211_profile *profile;
295 struct brcmf_cfg80211_ie ie; /* information element object for 441 struct brcmf_cfg80211_iscan_ctrl *iscan;
296 internal purpose */ 442 struct brcmf_cfg80211_connect_info conn_info;
297 struct brcmf_cfg80211_profile *profile; /* holding dongle profile */ 443 struct brcmf_cfg80211_pmk_list *pmk_list;
298 struct brcmf_cfg80211_iscan_ctrl *iscan; /* iscan controller */ 444 struct work_struct event_work;
299 struct brcmf_cfg80211_connect_info conn_info; /* association info */ 445 unsigned long status;
300 struct brcmf_cfg80211_pmk_list *pmk_list; /* wpa2 pmk list */ 446 struct brcmf_pub *pub;
301 struct work_struct event_work; /* event handler work struct */ 447 u32 channel;
302 unsigned long status; /* current dongle status */ 448 bool iscan_on;
303 void *pub; 449 bool iscan_kickstart;
304 u32 channel; /* current channel */ 450 bool active_scan;
305 bool iscan_on; /* iscan on/off switch */ 451 bool sched_escan;
306 bool iscan_kickstart; /* indicate iscan already started */ 452 bool ibss_starter;
307 bool active_scan; /* current scan mode */ 453 bool link_up;
308 bool ibss_starter; /* indicates this sta is ibss starter */ 454 bool pwr_save;
309 bool link_up; /* link/connection up flag */ 455 bool dongle_up;
310 bool pwr_save; /* indicate whether dongle to support 456 bool roam_on;
311 power save mode */ 457 bool scan_tried;
312 bool dongle_up; /* indicate whether dongle up or not */ 458 u8 *dcmd_buf;
313 bool roam_on; /* on/off switch for dongle self-roaming */ 459 u8 *extra_buf;
314 bool scan_tried; /* indicates if first scan attempted */
315 u8 *dcmd_buf; /* dcmd buffer */
316 u8 *extra_buf; /* maily to grab assoc information */
317 struct dentry *debugfsdir; 460 struct dentry *debugfsdir;
318 u8 ci[0] __aligned(NETDEV_ALIGN); 461 bool escan_on;
462 struct escan_info escan_info;
463 struct timer_list escan_timeout;
464 struct work_struct escan_timeout_work;
465 u8 *escan_ioctl_buf;
466 struct ap_info *ap_info;
319}; 467};
320 468
321static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_priv *w) 469static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
322{ 470{
323 return w->wdev->wiphy; 471 return w->wdev->wiphy;
324} 472}
325 473
326static inline struct brcmf_cfg80211_priv *wiphy_to_cfg(struct wiphy *w) 474static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
327{ 475{
328 return (struct brcmf_cfg80211_priv *)(wiphy_priv(w)); 476 return (struct brcmf_cfg80211_info *)(wiphy_priv(w));
329} 477}
330 478
331static inline struct brcmf_cfg80211_priv *wdev_to_cfg(struct wireless_dev *wd) 479static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
332{ 480{
333 return (struct brcmf_cfg80211_priv *)(wdev_priv(wd)); 481 return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
334} 482}
335 483
336static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_priv *cfg) 484static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
337{ 485{
338 return cfg->wdev->netdev; 486 return cfg->wdev->netdev;
339} 487}
340 488
341static inline struct brcmf_cfg80211_priv *ndev_to_cfg(struct net_device *ndev) 489static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
342{ 490{
343 return wdev_to_cfg(ndev->ieee80211_ptr); 491 return wdev_to_cfg(ndev->ieee80211_ptr);
344} 492}
345 493
346#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data)) 494#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
347#define cfg_to_iscan(w) (w->iscan) 495#define cfg_to_iscan(w) (w->iscan)
348 496
349static inline struct 497static inline struct
350brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg) 498brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
351{ 499{
352 return &cfg->conn_info; 500 return &cfg->conn_info;
353} 501}
354 502
355extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, 503struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
356 struct device *busdev, 504 struct device *busdev,
357 void *data); 505 struct brcmf_pub *drvr);
358extern void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg); 506void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
359 507
360/* event handler from dongle */ 508/* event handler from dongle */
361extern void brcmf_cfg80211_event(struct net_device *ndev, 509void brcmf_cfg80211_event(struct net_device *ndev,
362 const struct brcmf_event_msg *e, void *data); 510 const struct brcmf_event_msg *e, void *data);
363extern s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev); 511s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
364extern s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev); 512s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
365 513
366#endif /* _wl_cfg80211_h_ */ 514#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 8c9345dd37d2..b89f1272b93f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -535,9 +535,6 @@ void ai_detach(struct si_pub *sih)
535{ 535{
536 struct si_info *sii; 536 struct si_info *sii;
537 537
538 struct si_pub *si_local = NULL;
539 memcpy(&si_local, &sih, sizeof(struct si_pub **));
540
541 sii = container_of(sih, struct si_info, pub); 538 sii = container_of(sih, struct si_info, pub);
542 539
543 if (sii == NULL) 540 if (sii == NULL)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index a5edebeb0b4f..a744ea5a9559 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation");
86MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); 86MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
87MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); 87MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
88MODULE_LICENSE("Dual BSD/GPL"); 88MODULE_LICENSE("Dual BSD/GPL");
89 89/* This needs to be adjusted when brcms_firmwares changes */
90MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
91MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
90 92
91/* recognized BCMA Core IDs */ 93/* recognized BCMA Core IDs */
92static struct bcma_device_id brcms_coreid_table[] = { 94static struct bcma_device_id brcms_coreid_table[] = {
@@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
265 } 267 }
266} 268}
267 269
268static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 270static void brcms_ops_tx(struct ieee80211_hw *hw,
271 struct ieee80211_tx_control *control,
272 struct sk_buff *skb)
269{ 273{
270 struct brcms_info *wl = hw->priv; 274 struct brcms_info *wl = hw->priv;
271 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 275 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
277 goto done; 281 goto done;
278 } 282 }
279 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw); 283 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
280 tx_info->rate_driver_data[0] = tx_info->control.sta; 284 tx_info->rate_driver_data[0] = control->sta;
281 done: 285 done:
282 spin_unlock_bh(&wl->lock); 286 spin_unlock_bh(&wl->lock);
283} 287}
@@ -300,7 +304,10 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
300 wl->mute_tx = true; 304 wl->mute_tx = true;
301 305
302 if (!wl->pub->up) 306 if (!wl->pub->up)
303 err = brcms_up(wl); 307 if (!blocked)
308 err = brcms_up(wl);
309 else
310 err = -ERFKILL;
304 else 311 else
305 err = -ENODEV; 312 err = -ENODEV;
306 spin_unlock_bh(&wl->lock); 313 spin_unlock_bh(&wl->lock);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 03ca65324845..75086b37c817 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7512 7512
7513 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); 7513 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
7514 7514
7515 if (channel > 14) { 7515 rx_status->band =
7516 rx_status->band = IEEE80211_BAND_5GHZ; 7516 channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
7517 rx_status->freq = ieee80211_ofdm_chan_to_freq( 7517 rx_status->freq =
7518 WF_CHAN_FACTOR_5_G/2, channel); 7518 ieee80211_channel_to_frequency(channel, rx_status->band);
7519
7520 } else {
7521 rx_status->band = IEEE80211_BAND_2GHZ;
7522 rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
7523 }
7524 7519
7525 rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh); 7520 rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
7526 7521
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index bcc79b4e3267..e8682855b73a 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -34,6 +34,7 @@
34#define BCM43235_CHIP_ID 43235 34#define BCM43235_CHIP_ID 43235
35#define BCM43236_CHIP_ID 43236 35#define BCM43236_CHIP_ID 43236
36#define BCM43238_CHIP_ID 43238 36#define BCM43238_CHIP_ID 43238
37#define BCM43241_CHIP_ID 0x4324
37#define BCM4329_CHIP_ID 0x4329 38#define BCM4329_CHIP_ID 0x4329
38#define BCM4330_CHIP_ID 0x4330 39#define BCM4330_CHIP_ID 0x4330
39#define BCM4331_CHIP_ID 0x4331 40#define BCM4331_CHIP_ID 0x4331
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index f10d30274c23..c11a290a1edf 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -67,11 +67,6 @@
67#define WL_CHANSPEC_BAND_2G 0x2000 67#define WL_CHANSPEC_BAND_2G 0x2000
68#define INVCHANSPEC 255 68#define INVCHANSPEC 255
69 69
70/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
71#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
72#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
73#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
74
75#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK)) 70#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
76#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) 71#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
77 72
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 47932b28aac1..970a48baaf80 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/etherdevice.h>
7#include "hostap_wlan.h" 8#include "hostap_wlan.h"
8#include "hostap.h" 9#include "hostap.h"
9#include "hostap_ap.h" 10#include "hostap_ap.h"
@@ -463,8 +464,7 @@ static void handle_info_queue_scanresults(local_info_t *local)
463 prism2_host_roaming(local); 464 prism2_host_roaming(local);
464 465
465 if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA && 466 if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
466 memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00", 467 !is_zero_ether_addr(local->preferred_ap)) {
467 ETH_ALEN) != 0) {
468 /* 468 /*
469 * Firmware seems to be getting into odd state in host_roaming 469 * Firmware seems to be getting into odd state in host_roaming
470 * mode 2 when hostscan is used without join command, so try 470 * mode 2 when hostscan is used without join command, so try
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 18054d9c6688..ac074731335a 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -6,6 +6,7 @@
6#include <linux/ethtool.h> 6#include <linux/ethtool.h>
7#include <linux/if_arp.h> 7#include <linux/if_arp.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/etherdevice.h>
9#include <net/lib80211.h> 10#include <net/lib80211.h>
10 11
11#include "hostap_wlan.h" 12#include "hostap_wlan.h"
@@ -3221,8 +3222,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3221 return -EINVAL; 3222 return -EINVAL;
3222 3223
3223 addr = ext->addr.sa_data; 3224 addr = ext->addr.sa_data;
3224 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && 3225 if (is_broadcast_ether_addr(addr)) {
3225 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3226 sta_ptr = NULL; 3226 sta_ptr = NULL;
3227 crypt = &local->crypt_info.crypt[i]; 3227 crypt = &local->crypt_info.crypt[i];
3228 } else { 3228 } else {
@@ -3394,8 +3394,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
3394 i--; 3394 i--;
3395 3395
3396 addr = ext->addr.sa_data; 3396 addr = ext->addr.sa_data;
3397 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && 3397 if (is_broadcast_ether_addr(addr)) {
3398 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3399 sta_ptr = NULL; 3398 sta_ptr = NULL;
3400 crypt = &local->crypt_info.crypt[i]; 3399 crypt = &local->crypt_info.crypt[i];
3401 } else { 3400 } else {
@@ -3458,9 +3457,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3458 param->u.crypt.key_len) 3457 param->u.crypt.key_len)
3459 return -EINVAL; 3458 return -EINVAL;
3460 3459
3461 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && 3460 if (is_broadcast_ether_addr(param->sta_addr)) {
3462 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3463 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3464 if (param->u.crypt.idx >= WEP_KEYS) 3461 if (param->u.crypt.idx >= WEP_KEYS)
3465 return -EINVAL; 3462 return -EINVAL;
3466 sta_ptr = NULL; 3463 sta_ptr = NULL;
@@ -3593,9 +3590,7 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
3593 if (max_key_len < 0) 3590 if (max_key_len < 0)
3594 return -EINVAL; 3591 return -EINVAL;
3595 3592
3596 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && 3593 if (is_broadcast_ether_addr(param->sta_addr)) {
3597 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3598 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3599 sta_ptr = NULL; 3594 sta_ptr = NULL;
3600 if (param->u.crypt.idx >= WEP_KEYS) 3595 if (param->u.crypt.idx >= WEP_KEYS)
3601 param->u.crypt.idx = local->crypt_info.tx_keyidx; 3596 param->u.crypt.idx = local->crypt_info.tx_keyidx;
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 627bc12074c7..15f0fad39add 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -1084,7 +1084,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
1084 __le16 val = cpu_to_le16(reason); 1084 __le16 val = cpu_to_le16(reason);
1085 1085
1086 if (local->iw_mode != IW_MODE_INFRA || 1086 if (local->iw_mode != IW_MODE_INFRA ||
1087 memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 || 1087 is_zero_ether_addr(local->bssid) ||
1088 memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0) 1088 memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
1089 return 0; 1089 return 0;
1090 1090
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 534e6557e7e6..29b8fa1adefd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6962,13 +6962,6 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6962 struct ipw2100_priv *priv = libipw_priv(dev); 6962 struct ipw2100_priv *priv = libipw_priv(dev);
6963 int err = 0; 6963 int err = 0;
6964 6964
6965 static const unsigned char any[] = {
6966 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6967 };
6968 static const unsigned char off[] = {
6969 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
6970 };
6971
6972 // sanity checks 6965 // sanity checks
6973 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 6966 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
6974 return -EINVAL; 6967 return -EINVAL;
@@ -6979,8 +6972,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6979 goto done; 6972 goto done;
6980 } 6973 }
6981 6974
6982 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 6975 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
6983 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 6976 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
6984 /* we disable mandatory BSSID association */ 6977 /* we disable mandatory BSSID association */
6985 IPW_DEBUG_WX("exit - disable mandatory BSSID\n"); 6978 IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
6986 priv->config &= ~CFG_STATIC_BSSID; 6979 priv->config &= ~CFG_STATIC_BSSID;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0df459147394..935120fc8c93 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -9037,18 +9037,11 @@ static int ipw_wx_set_wap(struct net_device *dev,
9037{ 9037{
9038 struct ipw_priv *priv = libipw_priv(dev); 9038 struct ipw_priv *priv = libipw_priv(dev);
9039 9039
9040 static const unsigned char any[] = {
9041 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9042 };
9043 static const unsigned char off[] = {
9044 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9045 };
9046
9047 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 9040 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9048 return -EINVAL; 9041 return -EINVAL;
9049 mutex_lock(&priv->mutex); 9042 mutex_lock(&priv->mutex);
9050 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 9043 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
9051 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 9044 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
9052 /* we disable mandatory BSSID association */ 9045 /* we disable mandatory BSSID association */
9053 IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); 9046 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9054 priv->config &= ~CFG_STATIC_BSSID; 9047 priv->config &= ~CFG_STATIC_BSSID;
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 1571505b1a38..54aba4744438 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -675,7 +675,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
675 } 675 }
676 done: 676 done:
677 if (ieee->set_security) 677 if (ieee->set_security)
678 ieee->set_security(ieee->dev, &sec); 678 ieee->set_security(dev, &sec);
679 679
680 return ret; 680 return ret;
681} 681}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index faec40467208..e252acb9c862 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
460 * start C_TX command process 460 * start C_TX command process
461 */ 461 */
462static int 462static int
463il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) 463il3945_tx_skb(struct il_priv *il,
464 struct ieee80211_sta *sta,
465 struct sk_buff *skb)
464{ 466{
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
512 hdr_len = ieee80211_hdrlen(fc); 514 hdr_len = ieee80211_hdrlen(fc);
513 515
514 /* Find idx into station table for destination station */ 516 /* Find idx into station table for destination station */
515 sta_id = il_sta_id_or_broadcast(il, info->control.sta); 517 sta_id = il_sta_id_or_broadcast(il, sta);
516 if (sta_id == IL_INVALID_STATION) { 518 if (sta_id == IL_INVALID_STATION) {
517 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 519 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
518 goto drop; 520 goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
2859} 2861}
2860 2862
2861static void 2863static void
2862il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2864il3945_mac_tx(struct ieee80211_hw *hw,
2865 struct ieee80211_tx_control *control,
2866 struct sk_buff *skb)
2863{ 2867{
2864 struct il_priv *il = hw->priv; 2868 struct il_priv *il = hw->priv;
2865 2869
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2868 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2872 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2869 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2873 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2870 2874
2871 if (il3945_tx_skb(il, skb)) 2875 if (il3945_tx_skb(il, control->sta, skb))
2872 dev_kfree_skb_any(skb); 2876 dev_kfree_skb_any(skb);
2873 2877
2874 D_MAC80211("leave\n"); 2878 D_MAC80211("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 34f61a0581a2..eac4dc8bc879 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1526} 1526}
1527 1527
1528static void 1528static void
1529il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, 1529il4965_tx_cmd_build_rate(struct il_priv *il,
1530 struct ieee80211_tx_info *info, __le16 fc) 1530 struct il_tx_cmd *tx_cmd,
1531 struct ieee80211_tx_info *info,
1532 struct ieee80211_sta *sta,
1533 __le16 fc)
1531{ 1534{
1532 const u8 rts_retry_limit = 60; 1535 const u8 rts_retry_limit = 60;
1533 u32 rate_flags; 1536 u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
1561 rate_idx = info->control.rates[0].idx; 1564 rate_idx = info->control.rates[0].idx;
1562 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 1565 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1563 || rate_idx > RATE_COUNT_LEGACY) 1566 || rate_idx > RATE_COUNT_LEGACY)
1564 rate_idx = 1567 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1565 rate_lowest_index(&il->bands[info->band],
1566 info->control.sta);
1567 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 1568 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1568 if (info->band == IEEE80211_BAND_5GHZ) 1569 if (info->band == IEEE80211_BAND_5GHZ)
1569 rate_idx += IL_FIRST_OFDM_RATE; 1570 rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1630 * start C_TX command process 1631 * start C_TX command process
1631 */ 1632 */
1632int 1633int
1633il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) 1634il4965_tx_skb(struct il_priv *il,
1635 struct ieee80211_sta *sta,
1636 struct sk_buff *skb)
1634{ 1637{
1635 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1638 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1636 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1639 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1637 struct ieee80211_sta *sta = info->control.sta;
1638 struct il_station_priv *sta_priv = NULL; 1640 struct il_station_priv *sta_priv = NULL;
1639 struct il_tx_queue *txq; 1641 struct il_tx_queue *txq;
1640 struct il_queue *q; 1642 struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1680 sta_id = il->hw_params.bcast_id; 1682 sta_id = il->hw_params.bcast_id;
1681 else { 1683 else {
1682 /* Find idx into station table for destination station */ 1684 /* Find idx into station table for destination station */
1683 sta_id = il_sta_id_or_broadcast(il, info->control.sta); 1685 sta_id = il_sta_id_or_broadcast(il, sta);
1684 1686
1685 if (sta_id == IL_INVALID_STATION) { 1687 if (sta_id == IL_INVALID_STATION) {
1686 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 1688 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1786 /* TODO need this for burst mode later on */ 1788 /* TODO need this for burst mode later on */
1787 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); 1789 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1788 1790
1789 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); 1791 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1790 1792
1791 il_update_stats(il, true, fc, len); 1793 il_update_stats(il, true, fc, len);
1792 /* 1794 /*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
5828} 5830}
5829 5831
5830void 5832void
5831il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 5833il4965_mac_tx(struct ieee80211_hw *hw,
5834 struct ieee80211_tx_control *control,
5835 struct sk_buff *skb)
5832{ 5836{
5833 struct il_priv *il = hw->priv; 5837 struct il_priv *il = hw->priv;
5834 5838
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5837 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 5841 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5838 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 5842 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5839 5843
5840 if (il4965_tx_skb(il, skb)) 5844 if (il4965_tx_skb(il, control->sta, skb))
5841 dev_kfree_skb_any(skb); 5845 dev_kfree_skb_any(skb);
5842 5846
5843 D_MACDUMP("leave\n"); 5847 D_MACDUMP("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1db677689cfe..2d092f328547 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); 78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, 79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
80 struct ieee80211_tx_info *info); 80 struct ieee80211_tx_info *info);
81int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb); 81int il4965_tx_skb(struct il_priv *il,
82 struct ieee80211_sta *sta,
83 struct sk_buff *skb);
82int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, 84int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
83 struct ieee80211_sta *sta, u16 tid, u16 * ssn); 85 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
84int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, 86int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
163int il4965_eeprom_check_version(struct il_priv *il); 165int il4965_eeprom_check_version(struct il_priv *il);
164 166
165/* mac80211 handlers (for 4965) */ 167/* mac80211 handlers (for 4965) */
166void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 168void il4965_mac_tx(struct ieee80211_hw *hw,
169 struct ieee80211_tx_control *control,
170 struct sk_buff *skb);
167int il4965_mac_start(struct ieee80211_hw *hw); 171int il4965_mac_start(struct ieee80211_hw *hw);
168void il4965_mac_stop(struct ieee80211_hw *hw); 172void il4965_mac_stop(struct ieee80211_hw *hw);
169void il4965_configure_filter(struct ieee80211_hw *hw, 173void il4965_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 0370403fd0bd..318ed3c9fe74 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1586,9 +1586,9 @@ il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1586 return 0; 1586 return 0;
1587 1587
1588 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1588 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1589 memcpy(frame->da, il_bcast_addr, ETH_ALEN); 1589 eth_broadcast_addr(frame->da);
1590 memcpy(frame->sa, ta, ETH_ALEN); 1590 memcpy(frame->sa, ta, ETH_ALEN);
1591 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN); 1591 eth_broadcast_addr(frame->bssid);
1592 frame->seq_ctrl = 0; 1592 frame->seq_ctrl = 0;
1593 1593
1594 len += 24; 1594 len += 24;
@@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
4860 4860
4861#ifdef CONFIG_PM 4861#ifdef CONFIG_PM
4862 4862
4863int 4863static int
4864il_pci_suspend(struct device *device) 4864il_pci_suspend(struct device *device)
4865{ 4865{
4866 struct pci_dev *pdev = to_pci_dev(device); 4866 struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device)
4877 4877
4878 return 0; 4878 return 0;
4879} 4879}
4880EXPORT_SYMBOL(il_pci_suspend);
4881 4880
4882int 4881static int
4883il_pci_resume(struct device *device) 4882il_pci_resume(struct device *device)
4884{ 4883{
4885 struct pci_dev *pdev = to_pci_dev(device); 4884 struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device)
4906 4905
4907 return 0; 4906 return 0;
4908} 4907}
4909EXPORT_SYMBOL(il_pci_resume);
4910 4908
4911const struct dev_pm_ops il_pm_ops = { 4909SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4912 .suspend = il_pci_suspend,
4913 .resume = il_pci_resume,
4914 .freeze = il_pci_suspend,
4915 .thaw = il_pci_resume,
4916 .poweroff = il_pci_suspend,
4917 .restore = il_pci_resume,
4918};
4919EXPORT_SYMBOL(il_pm_ops); 4910EXPORT_SYMBOL(il_pm_ops);
4920 4911
4921#endif /* CONFIG_PM */ 4912#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 724682669060..b4bb813362bd 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1843,8 +1843,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
1843 u32 beacon_interval); 1843 u32 beacon_interval);
1844 1844
1845#ifdef CONFIG_PM 1845#ifdef CONFIG_PM
1846int il_pci_suspend(struct device *device);
1847int il_pci_resume(struct device *device);
1848extern const struct dev_pm_ops il_pm_ops; 1846extern const struct dev_pm_ops il_pm_ops;
1849 1847
1850#define IL_LEGACY_PM_OPS (&il_pm_ops) 1848#define IL_LEGACY_PM_OPS (&il_pm_ops)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 9bb16bdf6d26..75e12f29d9eb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
201 201
202 202
203/* tx */ 203/* tx */
204int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 204int iwlagn_tx_skb(struct iwl_priv *priv,
205 struct ieee80211_sta *sta,
206 struct sk_buff *skb);
205int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 207int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
206 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 208 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
207int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, 209int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
485} 487}
486 488
487#ifdef CONFIG_IWLWIFI_DEBUGFS 489#ifdef CONFIG_IWLWIFI_DEBUGFS
488int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); 490int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
489void iwl_dbgfs_unregister(struct iwl_priv *priv);
490#else 491#else
491static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 492static inline int iwl_dbgfs_register(struct iwl_priv *priv,
493 struct dentry *dbgfs_dir)
492{ 494{
493 return 0; 495 return 0;
494} 496}
495static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
496{
497}
498#endif /* CONFIG_IWLWIFI_DEBUGFS */ 497#endif /* CONFIG_IWLWIFI_DEBUGFS */
499 498
500#ifdef CONFIG_IWLWIFI_DEBUG 499#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 4a361c55c543..01128c96b5d8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -1055,8 +1055,9 @@ struct iwl_wep_cmd {
1055#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) 1055#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1056#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) 1056#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1057#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) 1057#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1058#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 1058#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
1059#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 1059#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1060#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
1060 1061
1061#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 1062#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1062#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) 1063#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index a47b306b522c..1a98fa3ab06d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2352 * Create the debugfs files and directories 2352 * Create the debugfs files and directories
2353 * 2353 *
2354 */ 2354 */
2355int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 2355int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2356{ 2356{
2357 struct dentry *phyd = priv->hw->wiphy->debugfsdir; 2357 struct dentry *dir_data, *dir_rf, *dir_debug;
2358 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
2359 2358
2360 dir_drv = debugfs_create_dir(name, phyd); 2359 priv->debugfs_dir = dbgfs_dir;
2361 if (!dir_drv)
2362 return -ENOMEM;
2363
2364 priv->debugfs_dir = dir_drv;
2365 2360
2366 dir_data = debugfs_create_dir("data", dir_drv); 2361 dir_data = debugfs_create_dir("data", dbgfs_dir);
2367 if (!dir_data) 2362 if (!dir_data)
2368 goto err; 2363 goto err;
2369 dir_rf = debugfs_create_dir("rf", dir_drv); 2364 dir_rf = debugfs_create_dir("rf", dbgfs_dir);
2370 if (!dir_rf) 2365 if (!dir_rf)
2371 goto err; 2366 goto err;
2372 dir_debug = debugfs_create_dir("debug", dir_drv); 2367 dir_debug = debugfs_create_dir("debug", dbgfs_dir);
2373 if (!dir_debug) 2368 if (!dir_debug)
2374 goto err; 2369 goto err;
2375 2370
@@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2415 /* Calibrations disabled/enabled status*/ 2410 /* Calibrations disabled/enabled status*/
2416 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); 2411 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2417 2412
2418 if (iwl_trans_dbgfs_register(priv->trans, dir_debug)) 2413 /*
2419 goto err; 2414 * Create a symlink with mac80211. This is not very robust, as it does
2415 * not remove the symlink created. The implicit assumption is that
2416 * when the opmode exits, mac80211 will also exit, and will remove
2417 * this symlink as part of its cleanup.
2418 */
2419 if (priv->mac80211_registered) {
2420 char buf[100];
2421 struct dentry *mac80211_dir, *dev_dir, *root_dir;
2422
2423 dev_dir = dbgfs_dir->d_parent;
2424 root_dir = dev_dir->d_parent;
2425 mac80211_dir = priv->hw->wiphy->debugfsdir;
2426
2427 snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
2428 dev_dir->d_name.name);
2429
2430 if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
2431 goto err;
2432 }
2433
2420 return 0; 2434 return 0;
2421 2435
2422err: 2436err:
2423 IWL_ERR(priv, "Can't create the debugfs directory\n"); 2437 IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
2424 iwl_dbgfs_unregister(priv);
2425 return -ENOMEM; 2438 return -ENOMEM;
2426} 2439}
2427
2428/**
2429 * Remove the debugfs files and directories
2430 *
2431 */
2432void iwl_dbgfs_unregister(struct iwl_priv *priv)
2433{
2434 if (!priv->debugfs_dir)
2435 return;
2436
2437 debugfs_remove_recursive(priv->debugfs_dir);
2438 priv->debugfs_dir = NULL;
2439}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 054f728f6266..8141f91c3725 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -771,6 +771,7 @@ struct iwl_priv {
771 u8 agg_tids_count; 771 u8 agg_tids_count;
772 772
773 struct iwl_rx_phy_res last_phy_res; 773 struct iwl_rx_phy_res last_phy_res;
774 u32 ampdu_ref;
774 bool last_phy_res_valid; 775 bool last_phy_res_valid;
775 776
776 /* 777 /*
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index a5f7bce96325..ff8162d4c454 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
195 ARRAY_SIZE(iwlagn_iface_combinations_dualmode); 195 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
196 } 196 }
197 197
198 hw->wiphy->max_remain_on_channel_duration = 1000; 198 hw->wiphy->max_remain_on_channel_duration = 500;
199 199
200 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 200 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
201 WIPHY_FLAG_DISABLE_BEACON_HINTS | 201 WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
511} 511}
512#endif 512#endif
513 513
514static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 514static void iwlagn_mac_tx(struct ieee80211_hw *hw,
515 struct ieee80211_tx_control *control,
516 struct sk_buff *skb)
515{ 517{
516 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 518 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
517 519
518 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 520 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
519 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 521 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
520 522
521 if (iwlagn_tx_skb(priv, skb)) 523 if (iwlagn_tx_skb(priv, control->sta, skb))
522 dev_kfree_skb_any(skb); 524 dev_kfree_skb_any(skb);
523} 525}
524 526
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 84d3db5aa506..7ff3f1430678 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
862 * No race since we hold the mutex here and a new one 862 * No race since we hold the mutex here and a new one
863 * can't come in at this time. 863 * can't come in at this time.
864 */ 864 */
865 ieee80211_remain_on_channel_expired(priv->hw); 865 if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
866 ieee80211_remain_on_channel_expired(priv->hw);
866 867
867 exit_pending = 868 exit_pending =
868 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 869 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
994 iwlagn_prepare_restart(priv); 995 iwlagn_prepare_restart(priv);
995 mutex_unlock(&priv->mutex); 996 mutex_unlock(&priv->mutex);
996 iwl_cancel_deferred_work(priv); 997 iwl_cancel_deferred_work(priv);
997 ieee80211_restart_hw(priv->hw); 998 if (priv->mac80211_registered)
999 ieee80211_restart_hw(priv->hw);
1000 else
1001 IWL_ERR(priv,
1002 "Cannot request restart before registrating with mac80211");
998 } else { 1003 } else {
999 WARN_ON(1); 1004 WARN_ON(1);
1000 } 1005 }
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1222 1227
1223static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1228static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1224 const struct iwl_cfg *cfg, 1229 const struct iwl_cfg *cfg,
1225 const struct iwl_fw *fw) 1230 const struct iwl_fw *fw,
1231 struct dentry *dbgfs_dir)
1226{ 1232{
1227 struct iwl_priv *priv; 1233 struct iwl_priv *priv;
1228 struct ieee80211_hw *hw; 1234 struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1466 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) 1472 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1467 goto out_destroy_workqueue; 1473 goto out_destroy_workqueue;
1468 1474
1469 if (iwl_dbgfs_register(priv, DRV_NAME)) 1475 if (iwl_dbgfs_register(priv, dbgfs_dir))
1470 IWL_ERR(priv, 1476 goto out_mac80211_unregister;
1471 "failed to create debugfs files. Ignoring error\n");
1472 1477
1473 return op_mode; 1478 return op_mode;
1474 1479
1480out_mac80211_unregister:
1481 iwlagn_mac_unregister(priv);
1475out_destroy_workqueue: 1482out_destroy_workqueue:
1483 iwl_tt_exit(priv);
1484 iwl_testmode_free(priv);
1485 iwl_cancel_deferred_work(priv);
1476 destroy_workqueue(priv->workqueue); 1486 destroy_workqueue(priv->workqueue);
1477 priv->workqueue = NULL; 1487 priv->workqueue = NULL;
1478 iwl_uninit_drv(priv); 1488 iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1493 1503
1494 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 1504 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1495 1505
1496 iwl_dbgfs_unregister(priv);
1497
1498 iwl_testmode_free(priv); 1506 iwl_testmode_free(priv);
1499 iwlagn_mac_unregister(priv); 1507 iwlagn_mac_unregister(priv);
1500 1508
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index fee5cffa1669..5a9c325804f6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -667,6 +667,7 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
667 struct iwl_rx_packet *pkt = rxb_addr(rxb); 667 struct iwl_rx_packet *pkt = rxb_addr(rxb);
668 668
669 priv->last_phy_res_valid = true; 669 priv->last_phy_res_valid = true;
670 priv->ampdu_ref++;
670 memcpy(&priv->last_phy_res, pkt->data, 671 memcpy(&priv->last_phy_res, pkt->data,
671 sizeof(struct iwl_rx_phy_res)); 672 sizeof(struct iwl_rx_phy_res));
672 return 0; 673 return 0;
@@ -981,6 +982,16 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
981 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 982 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
982 rx_status.flag |= RX_FLAG_SHORTPRE; 983 rx_status.flag |= RX_FLAG_SHORTPRE;
983 984
985 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
986 /*
987 * We know which subframes of an A-MPDU belong
988 * together since we get a single PHY response
989 * from the firmware for all of them
990 */
991 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
992 rx_status.ampdu_reference = priv->ampdu_ref;
993 }
994
984 /* Set up the HT phy flags */ 995 /* Set up the HT phy flags */
985 if (rate_n_flags & RATE_MCS_HT_MSK) 996 if (rate_n_flags & RATE_MCS_HT_MSK)
986 rx_status.flag |= RX_FLAG_HT; 997 rx_status.flag |= RX_FLAG_HT;
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index e3467fa86899..bb9f6252d28f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -612,9 +612,9 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
612 return 0; 612 return 0;
613 613
614 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 614 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
615 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN); 615 eth_broadcast_addr(frame->da);
616 memcpy(frame->sa, ta, ETH_ALEN); 616 memcpy(frame->sa, ta, ETH_ALEN);
617 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN); 617 eth_broadcast_addr(frame->bssid);
618 frame->seq_ctrl = 0; 618 frame->seq_ctrl = 0;
619 619
620 len += 24; 620 len += 24;
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b29b798f7550..cd9b6de4273e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -128,10 +128,11 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
128 struct iwl_device_cmd *cmd) 128 struct iwl_device_cmd *cmd)
129{ 129{
130 struct iwl_rx_packet *pkt = rxb_addr(rxb); 130 struct iwl_rx_packet *pkt = rxb_addr(rxb);
131 struct iwl_addsta_cmd *addsta =
132 (struct iwl_addsta_cmd *) cmd->payload;
133 131
134 return iwl_process_add_sta_resp(priv, addsta, pkt); 132 if (!cmd)
133 return 0;
134
135 return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
135} 136}
136 137
137int iwl_send_add_sta(struct iwl_priv *priv, 138int iwl_send_add_sta(struct iwl_priv *priv,
@@ -150,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
150 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
151 152
152 if (!(flags & CMD_ASYNC)) { 153 if (!(flags & CMD_ASYNC)) {
153 cmd.flags |= CMD_WANT_SKB; 154 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
154 might_sleep(); 155 might_sleep();
155 } 156 }
156 157
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5971a23aa47d..f5ca73a89870 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, 127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
128 struct iwl_tx_cmd *tx_cmd, 128 struct iwl_tx_cmd *tx_cmd,
129 struct ieee80211_tx_info *info, 129 struct ieee80211_tx_info *info,
130 struct ieee80211_sta *sta,
130 __le16 fc) 131 __le16 fc)
131{ 132{
132 u32 rate_flags; 133 u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
187 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
188 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
189 rate_idx = rate_lowest_index( 190 rate_idx = rate_lowest_index(
190 &priv->eeprom_data->bands[info->band], 191 &priv->eeprom_data->bands[info->band], sta);
191 info->control.sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ) 193 if (info->band == IEEE80211_BAND_5GHZ)
194 rate_idx += IWL_FIRST_OFDM_RATE; 194 rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
291/* 291/*
292 * start REPLY_TX command process 292 * start REPLY_TX command process
293 */ 293 */
294int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 294int iwlagn_tx_skb(struct iwl_priv *priv,
295 struct ieee80211_sta *sta,
296 struct sk_buff *skb)
295{ 297{
296 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 298 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
297 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
345 sta_id = ctx->bcast_sta_id; 347 sta_id = ctx->bcast_sta_id;
346 else { 348 else {
347 /* Find index into station table for destination station */ 349 /* Find index into station table for destination station */
348 sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta); 350 sta_id = iwl_sta_id_or_broadcast(ctx, sta);
349 if (sta_id == IWL_INVALID_STATION) { 351 if (sta_id == IWL_INVALID_STATION) {
350 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 352 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
351 hdr->addr1); 353 hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
355 357
356 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 358 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
357 359
358 if (info->control.sta) 360 if (sta)
359 sta_priv = (void *)info->control.sta->drv_priv; 361 sta_priv = (void *)sta->drv_priv;
360 362
361 if (sta_priv && sta_priv->asleep && 363 if (sta_priv && sta_priv->asleep &&
362 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { 364 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
397 /* TODO need this for burst mode later on */ 399 /* TODO need this for burst mode later on */
398 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); 400 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
399 401
400 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 402 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
401 403
402 memset(&info->status, 0, sizeof(info->status)); 404 memset(&info->status, 0, sizeof(info->status));
403 405
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
431 * only. Check this here. 433 * only. Check this here.
432 */ 434 */
433 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && 435 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
434 tid_data->agg.state != IWL_AGG_OFF, 436 tid_data->agg.state != IWL_AGG_OFF,
435 "Tx while agg.state = %d", tid_data->agg.state)) 437 "Tx while agg.state = %d", tid_data->agg.state))
436 goto drop_unlock_sta; 438 goto drop_unlock_sta;
437 439
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 6d8d6dd7943f..2cb1efbc5ed1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -295,7 +295,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
295static int iwl_verify_sec_sparse(struct iwl_priv *priv, 295static int iwl_verify_sec_sparse(struct iwl_priv *priv,
296 const struct fw_desc *fw_desc) 296 const struct fw_desc *fw_desc)
297{ 297{
298 __le32 *image = (__le32 *)fw_desc->v_addr; 298 __le32 *image = (__le32 *)fw_desc->data;
299 u32 len = fw_desc->len; 299 u32 len = fw_desc->len;
300 u32 val; 300 u32 val;
301 u32 i; 301 u32 i;
@@ -319,7 +319,7 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
319static void iwl_print_mismatch_sec(struct iwl_priv *priv, 319static void iwl_print_mismatch_sec(struct iwl_priv *priv,
320 const struct fw_desc *fw_desc) 320 const struct fw_desc *fw_desc)
321{ 321{
322 __le32 *image = (__le32 *)fw_desc->v_addr; 322 __le32 *image = (__le32 *)fw_desc->data;
323 u32 len = fw_desc->len; 323 u32 len = fw_desc->len;
324 u32 val; 324 u32 val;
325 u32 offs; 325 u32 offs;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06ca505bb2cc..59a5f78402fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,6 +29,7 @@
29 29
30#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
31#include <linux/device.h> 31#include <linux/device.h>
32#include "iwl-trans.h"
32 33
33 34
34#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) 35#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -237,27 +238,34 @@ TRACE_EVENT(iwlwifi_dbg,
237#define TRACE_SYSTEM iwlwifi 238#define TRACE_SYSTEM iwlwifi
238 239
239TRACE_EVENT(iwlwifi_dev_hcmd, 240TRACE_EVENT(iwlwifi_dev_hcmd,
240 TP_PROTO(const struct device *dev, u32 flags, 241 TP_PROTO(const struct device *dev,
241 const void *hcmd0, size_t len0, 242 struct iwl_host_cmd *cmd, u16 total_size,
242 const void *hcmd1, size_t len1, 243 const void *hdr, size_t hdr_len),
243 const void *hcmd2, size_t len2), 244 TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
244 TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
245 TP_STRUCT__entry( 245 TP_STRUCT__entry(
246 DEV_ENTRY 246 DEV_ENTRY
247 __dynamic_array(u8, hcmd0, len0) 247 __dynamic_array(u8, hcmd, total_size)
248 __dynamic_array(u8, hcmd1, len1)
249 __dynamic_array(u8, hcmd2, len2)
250 __field(u32, flags) 248 __field(u32, flags)
251 ), 249 ),
252 TP_fast_assign( 250 TP_fast_assign(
251 int i, offset = hdr_len;
252
253 DEV_ASSIGN; 253 DEV_ASSIGN;
254 memcpy(__get_dynamic_array(hcmd0), hcmd0, len0); 254 __entry->flags = cmd->flags;
255 memcpy(__get_dynamic_array(hcmd1), hcmd1, len1); 255 memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
256 memcpy(__get_dynamic_array(hcmd2), hcmd2, len2); 256
257 __entry->flags = flags; 257 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
258 if (!cmd->len[i])
259 continue;
260 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
261 continue;
262 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
263 cmd->data[i], cmd->len[i]);
264 offset += cmd->len[i];
265 }
258 ), 266 ),
259 TP_printk("[%s] hcmd %#.2x (%ssync)", 267 TP_printk("[%s] hcmd %#.2x (%ssync)",
260 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0], 268 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
261 __entry->flags & CMD_ASYNC ? "a" : "") 269 __entry->flags & CMD_ASYNC ? "a" : "")
262); 270);
263 271
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index cc41cfaedfbd..198634b75ed0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -64,6 +64,7 @@
64#include <linux/dma-mapping.h> 64#include <linux/dma-mapping.h>
65#include <linux/firmware.h> 65#include <linux/firmware.h>
66#include <linux/module.h> 66#include <linux/module.h>
67#include <linux/vmalloc.h>
67 68
68#include "iwl-drv.h" 69#include "iwl-drv.h"
69#include "iwl-debug.h" 70#include "iwl-debug.h"
@@ -101,6 +102,10 @@ MODULE_VERSION(DRV_VERSION);
101MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 102MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
102MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
103 104
105#ifdef CONFIG_IWLWIFI_DEBUGFS
106static struct dentry *iwl_dbgfs_root;
107#endif
108
104/** 109/**
105 * struct iwl_drv - drv common data 110 * struct iwl_drv - drv common data
106 * @list: list of drv structures using this opmode 111 * @list: list of drv structures using this opmode
@@ -126,6 +131,12 @@ struct iwl_drv {
126 char firmware_name[25]; /* name of firmware file to load */ 131 char firmware_name[25]; /* name of firmware file to load */
127 132
128 struct completion request_firmware_complete; 133 struct completion request_firmware_complete;
134
135#ifdef CONFIG_IWLWIFI_DEBUGFS
136 struct dentry *dbgfs_drv;
137 struct dentry *dbgfs_trans;
138 struct dentry *dbgfs_op_mode;
139#endif
129}; 140};
130 141
131#define DVM_OP_MODE 0 142#define DVM_OP_MODE 0
@@ -154,10 +165,8 @@ struct fw_sec {
154 165
155static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) 166static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
156{ 167{
157 if (desc->v_addr) 168 vfree(desc->data);
158 dma_free_coherent(drv->trans->dev, desc->len, 169 desc->data = NULL;
159 desc->v_addr, desc->p_addr);
160 desc->v_addr = NULL;
161 desc->len = 0; 170 desc->len = 0;
162} 171}
163 172
@@ -176,25 +185,29 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
176} 185}
177 186
178static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, 187static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
179 struct fw_sec *sec) 188 struct fw_sec *sec)
180{ 189{
181 if (!sec || !sec->size) { 190 void *data;
182 desc->v_addr = NULL; 191
192 desc->data = NULL;
193
194 if (!sec || !sec->size)
183 return -EINVAL; 195 return -EINVAL;
184 }
185 196
186 desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size, 197 data = vmalloc(sec->size);
187 &desc->p_addr, GFP_KERNEL); 198 if (!data)
188 if (!desc->v_addr)
189 return -ENOMEM; 199 return -ENOMEM;
190 200
191 desc->len = sec->size; 201 desc->len = sec->size;
192 desc->offset = sec->offset; 202 desc->offset = sec->offset;
193 memcpy(desc->v_addr, sec->data, sec->size); 203 memcpy(data, sec->data, desc->len);
204 desc->data = data;
205
194 return 0; 206 return 0;
195} 207}
196 208
197static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 209static void iwl_req_fw_callback(const struct firmware *ucode_raw,
210 void *context);
198 211
199#define UCODE_EXPERIMENTAL_INDEX 100 212#define UCODE_EXPERIMENTAL_INDEX 100
200#define UCODE_EXPERIMENTAL_TAG "exp" 213#define UCODE_EXPERIMENTAL_TAG "exp"
@@ -231,7 +244,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
231 244
232 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, 245 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
233 drv->trans->dev, 246 drv->trans->dev,
234 GFP_KERNEL, drv, iwl_ucode_callback); 247 GFP_KERNEL, drv, iwl_req_fw_callback);
235} 248}
236 249
237struct fw_img_parsing { 250struct fw_img_parsing {
@@ -759,13 +772,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
759 return 0; 772 return 0;
760} 773}
761 774
775static struct iwl_op_mode *
776_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
777{
778 const struct iwl_op_mode_ops *ops = op->ops;
779 struct dentry *dbgfs_dir = NULL;
780 struct iwl_op_mode *op_mode = NULL;
781
782#ifdef CONFIG_IWLWIFI_DEBUGFS
783 drv->dbgfs_op_mode = debugfs_create_dir(op->name,
784 drv->dbgfs_drv);
785 if (!drv->dbgfs_op_mode) {
786 IWL_ERR(drv,
787 "failed to create opmode debugfs directory\n");
788 return op_mode;
789 }
790 dbgfs_dir = drv->dbgfs_op_mode;
791#endif
792
793 op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
794
795#ifdef CONFIG_IWLWIFI_DEBUGFS
796 if (!op_mode) {
797 debugfs_remove_recursive(drv->dbgfs_op_mode);
798 drv->dbgfs_op_mode = NULL;
799 }
800#endif
801
802 return op_mode;
803}
804
805static void _iwl_op_mode_stop(struct iwl_drv *drv)
806{
807 /* op_mode can be NULL if its start failed */
808 if (drv->op_mode) {
809 iwl_op_mode_stop(drv->op_mode);
810 drv->op_mode = NULL;
811
812#ifdef CONFIG_IWLWIFI_DEBUGFS
813 debugfs_remove_recursive(drv->dbgfs_op_mode);
814 drv->dbgfs_op_mode = NULL;
815#endif
816 }
817}
818
762/** 819/**
763 * iwl_ucode_callback - callback when firmware was loaded 820 * iwl_req_fw_callback - callback when firmware was loaded
764 * 821 *
765 * If loaded successfully, copies the firmware into buffers 822 * If loaded successfully, copies the firmware into buffers
766 * for the card to fetch (via DMA). 823 * for the card to fetch (via DMA).
767 */ 824 */
768static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) 825static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
769{ 826{
770 struct iwl_drv *drv = context; 827 struct iwl_drv *drv = context;
771 struct iwl_fw *fw = &drv->fw; 828 struct iwl_fw *fw = &drv->fw;
@@ -908,8 +965,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
908 list_add_tail(&drv->list, &op->drv); 965 list_add_tail(&drv->list, &op->drv);
909 966
910 if (op->ops) { 967 if (op->ops) {
911 const struct iwl_op_mode_ops *ops = op->ops; 968 drv->op_mode = _iwl_op_mode_start(drv, op);
912 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
913 969
914 if (!drv->op_mode) { 970 if (!drv->op_mode) {
915 mutex_unlock(&iwlwifi_opmode_table_mtx); 971 mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,24 +1025,51 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
969 init_completion(&drv->request_firmware_complete); 1025 init_completion(&drv->request_firmware_complete);
970 INIT_LIST_HEAD(&drv->list); 1026 INIT_LIST_HEAD(&drv->list);
971 1027
1028#ifdef CONFIG_IWLWIFI_DEBUGFS
1029 /* Create the device debugfs entries. */
1030 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
1031 iwl_dbgfs_root);
1032
1033 if (!drv->dbgfs_drv) {
1034 IWL_ERR(drv, "failed to create debugfs directory\n");
1035 goto err_free_drv;
1036 }
1037
1038 /* Create transport layer debugfs dir */
1039 drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
1040
1041 if (!drv->trans->dbgfs_dir) {
1042 IWL_ERR(drv, "failed to create transport debugfs directory\n");
1043 goto err_free_dbgfs;
1044 }
1045#endif
1046
972 ret = iwl_request_firmware(drv, true); 1047 ret = iwl_request_firmware(drv, true);
973 1048
974 if (ret) { 1049 if (ret) {
975 IWL_ERR(trans, "Couldn't request the fw\n"); 1050 IWL_ERR(trans, "Couldn't request the fw\n");
976 kfree(drv); 1051 goto err_fw;
977 drv = NULL;
978 } 1052 }
979 1053
980 return drv; 1054 return drv;
1055
1056err_fw:
1057#ifdef CONFIG_IWLWIFI_DEBUGFS
1058err_free_dbgfs:
1059 debugfs_remove_recursive(drv->dbgfs_drv);
1060err_free_drv:
1061#endif
1062 kfree(drv);
1063 drv = NULL;
1064
1065 return drv;
981} 1066}
982 1067
983void iwl_drv_stop(struct iwl_drv *drv) 1068void iwl_drv_stop(struct iwl_drv *drv)
984{ 1069{
985 wait_for_completion(&drv->request_firmware_complete); 1070 wait_for_completion(&drv->request_firmware_complete);
986 1071
987 /* op_mode can be NULL if its start failed */ 1072 _iwl_op_mode_stop(drv);
988 if (drv->op_mode)
989 iwl_op_mode_stop(drv->op_mode);
990 1073
991 iwl_dealloc_ucode(drv); 1074 iwl_dealloc_ucode(drv);
992 1075
@@ -1000,6 +1083,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
1000 list_del(&drv->list); 1083 list_del(&drv->list);
1001 mutex_unlock(&iwlwifi_opmode_table_mtx); 1084 mutex_unlock(&iwlwifi_opmode_table_mtx);
1002 1085
1086#ifdef CONFIG_IWLWIFI_DEBUGFS
1087 debugfs_remove_recursive(drv->dbgfs_drv);
1088#endif
1089
1003 kfree(drv); 1090 kfree(drv);
1004} 1091}
1005 1092
@@ -1022,15 +1109,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1022{ 1109{
1023 int i; 1110 int i;
1024 struct iwl_drv *drv; 1111 struct iwl_drv *drv;
1112 struct iwlwifi_opmode_table *op;
1025 1113
1026 mutex_lock(&iwlwifi_opmode_table_mtx); 1114 mutex_lock(&iwlwifi_opmode_table_mtx);
1027 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { 1115 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1028 if (strcmp(iwlwifi_opmode_table[i].name, name)) 1116 op = &iwlwifi_opmode_table[i];
1117 if (strcmp(op->name, name))
1029 continue; 1118 continue;
1030 iwlwifi_opmode_table[i].ops = ops; 1119 op->ops = ops;
1031 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) 1120 /* TODO: need to handle exceptional case */
1032 drv->op_mode = ops->start(drv->trans, drv->cfg, 1121 list_for_each_entry(drv, &op->drv, list)
1033 &drv->fw); 1122 drv->op_mode = _iwl_op_mode_start(drv, op);
1123
1034 mutex_unlock(&iwlwifi_opmode_table_mtx); 1124 mutex_unlock(&iwlwifi_opmode_table_mtx);
1035 return 0; 1125 return 0;
1036 } 1126 }
@@ -1051,12 +1141,9 @@ void iwl_opmode_deregister(const char *name)
1051 iwlwifi_opmode_table[i].ops = NULL; 1141 iwlwifi_opmode_table[i].ops = NULL;
1052 1142
1053 /* call the stop routine for all devices */ 1143 /* call the stop routine for all devices */
1054 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) { 1144 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1055 if (drv->op_mode) { 1145 _iwl_op_mode_stop(drv);
1056 iwl_op_mode_stop(drv->op_mode); 1146
1057 drv->op_mode = NULL;
1058 }
1059 }
1060 mutex_unlock(&iwlwifi_opmode_table_mtx); 1147 mutex_unlock(&iwlwifi_opmode_table_mtx);
1061 return; 1148 return;
1062 } 1149 }
@@ -1076,6 +1163,14 @@ static int __init iwl_drv_init(void)
1076 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 1163 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
1077 pr_info(DRV_COPYRIGHT "\n"); 1164 pr_info(DRV_COPYRIGHT "\n");
1078 1165
1166#ifdef CONFIG_IWLWIFI_DEBUGFS
1167 /* Create the root of iwlwifi debugfs subsystem. */
1168 iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
1169
1170 if (!iwl_dbgfs_root)
1171 return -EFAULT;
1172#endif
1173
1079 return iwl_pci_register_driver(); 1174 return iwl_pci_register_driver();
1080} 1175}
1081module_init(iwl_drv_init); 1176module_init(iwl_drv_init);
@@ -1083,6 +1178,10 @@ module_init(iwl_drv_init);
1083static void __exit iwl_drv_exit(void) 1178static void __exit iwl_drv_exit(void)
1084{ 1179{
1085 iwl_pci_unregister_driver(); 1180 iwl_pci_unregister_driver();
1181
1182#ifdef CONFIG_IWLWIFI_DEBUGFS
1183 debugfs_remove_recursive(iwl_dbgfs_root);
1184#endif
1086} 1185}
1087module_exit(iwl_drv_exit); 1186module_exit(iwl_drv_exit);
1088 1187
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 2cbf137b25bf..285de5f68c05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -90,9 +90,9 @@
90 * 4) The bus specific component configures the bus 90 * 4) The bus specific component configures the bus
91 * 5) The bus specific component calls to the drv bus agnostic part 91 * 5) The bus specific component calls to the drv bus agnostic part
92 * (iwl_drv_start) 92 * (iwl_drv_start)
93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback 93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
94 * 7) iwl_ucode_callback parses the fw file 94 * 7) iwl_req_fw_callback parses the fw file
95 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw 95 * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
96 */ 96 */
97 97
98struct iwl_drv; 98struct iwl_drv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 9c07c670a1ce..a5e425718f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -85,8 +85,6 @@ struct iwl_eeprom_data {
85 int n_hw_addrs; 85 int n_hw_addrs;
86 u8 hw_addr[ETH_ALEN]; 86 u8 hw_addr[ETH_ALEN];
87 87
88 u16 radio_config;
89
90 u8 calib_version; 88 u8 calib_version;
91 __le16 calib_voltage; 89 __le16 calib_voltage;
92 90
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 2153e4cc5572..d1a86b66bc51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -124,8 +124,7 @@ struct iwl_ucode_capabilities {
124 124
125/* one for each uCode image (inst/data, init/runtime/wowlan) */ 125/* one for each uCode image (inst/data, init/runtime/wowlan) */
126struct fw_desc { 126struct fw_desc {
127 dma_addr_t p_addr; /* hardware address */ 127 const void *data; /* vmalloc'ed data */
128 void *v_addr; /* software address */
129 u32 len; /* size in bytes */ 128 u32 len; /* size in bytes */
130 u32 offset; /* offset in the device */ 129 u32 offset; /* offset in the device */
131}; 130};
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 64886f95664f..c8d9b9517468 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -134,7 +134,8 @@ struct iwl_cfg;
134struct iwl_op_mode_ops { 134struct iwl_op_mode_ops {
135 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 135 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
136 const struct iwl_cfg *cfg, 136 const struct iwl_cfg *cfg,
137 const struct iwl_fw *fw); 137 const struct iwl_fw *fw,
138 struct dentry *dbgfs_dir);
138 void (*stop)(struct iwl_op_mode *op_mode); 139 void (*stop)(struct iwl_op_mode *op_mode);
139 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 140 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
140 struct iwl_device_cmd *cmd); 141 struct iwl_device_cmd *cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 92576a3e84ef..ff1154232885 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command 184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
185 * @CMD_ASYNC: Return right away and don't want for the response 185 * @CMD_ASYNC: Return right away and don't want for the response
186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
187 * response. 187 * response. The caller needs to call iwl_free_resp when done.
188 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
189 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
190 * copied. The pointer passed to the response handler is in the transport
191 * ownership and don't need to be freed by the op_mode. This also means
192 * that the pointer is invalidated after the op_mode's handler returns.
188 * @CMD_ON_DEMAND: This command is sent by the test mode pipe. 193 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
189 */ 194 */
190enum CMD_MODE { 195enum CMD_MODE {
191 CMD_SYNC = 0, 196 CMD_SYNC = 0,
192 CMD_ASYNC = BIT(0), 197 CMD_ASYNC = BIT(0),
193 CMD_WANT_SKB = BIT(1), 198 CMD_WANT_SKB = BIT(1),
194 CMD_ON_DEMAND = BIT(2), 199 CMD_WANT_HCMD = BIT(2),
200 CMD_ON_DEMAND = BIT(3),
195}; 201};
196 202
197#define DEF_CMD_PAYLOAD_SIZE 320 203#define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
460 size_t dev_cmd_headroom; 466 size_t dev_cmd_headroom;
461 char dev_cmd_pool_name[50]; 467 char dev_cmd_pool_name[50];
462 468
469 struct dentry *dbgfs_dir;
470
463 /* pointer to trans specific struct */ 471 /* pointer to trans specific struct */
464 /*Ensure that this pointer will always be aligned to sizeof pointer */ 472 /*Ensure that this pointer will always be aligned to sizeof pointer */
465 char trans_specific[0] __aligned(sizeof(void *)); 473 char trans_specific[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f4c3500b68c6..2a4675396707 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -263,8 +263,6 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
263/* PCI registers */ 263/* PCI registers */
264#define PCI_CFG_RETRY_TIMEOUT 0x041 264#define PCI_CFG_RETRY_TIMEOUT 0x041
265 265
266#ifndef CONFIG_IWLWIFI_IDI
267
268static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 266static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
269{ 267{
270 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 268 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -282,8 +280,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
282 if (!trans_pcie->drv) 280 if (!trans_pcie->drv)
283 goto out_free_trans; 281 goto out_free_trans;
284 282
283 /* register transport layer debugfs here */
284 if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
285 goto out_free_drv;
286
285 return 0; 287 return 0;
286 288
289out_free_drv:
290 iwl_drv_stop(trans_pcie->drv);
287out_free_trans: 291out_free_trans:
288 iwl_trans_pcie_free(iwl_trans); 292 iwl_trans_pcie_free(iwl_trans);
289 pci_set_drvdata(pdev, NULL); 293 pci_set_drvdata(pdev, NULL);
@@ -301,8 +305,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
301 pci_set_drvdata(pdev, NULL); 305 pci_set_drvdata(pdev, NULL);
302} 306}
303 307
304#endif /* CONFIG_IWLWIFI_IDI */
305
306#ifdef CONFIG_PM_SLEEP 308#ifdef CONFIG_PM_SLEEP
307 309
308static int iwl_pci_suspend(struct device *device) 310static int iwl_pci_suspend(struct device *device)
@@ -347,15 +349,6 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
347 349
348#endif 350#endif
349 351
350#ifdef CONFIG_IWLWIFI_IDI
351/*
352 * Defined externally in iwl-idi.c
353 */
354int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
355void __devexit iwl_pci_remove(struct pci_dev *pdev);
356
357#endif /* CONFIG_IWLWIFI_IDI */
358
359static struct pci_driver iwl_pci_driver = { 352static struct pci_driver iwl_pci_driver = {
360 .name = DRV_NAME, 353 .name = DRV_NAME,
361 .id_table = iwl_hw_card_ids, 354 .id_table = iwl_hw_card_ids,
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 4ffc18dc3a57..401178f44a3b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -184,6 +184,7 @@ struct iwl_queue {
184 184
185struct iwl_pcie_tx_queue_entry { 185struct iwl_pcie_tx_queue_entry {
186 struct iwl_device_cmd *cmd; 186 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd;
187 struct sk_buff *skb; 188 struct sk_buff *skb;
188 struct iwl_cmd_meta meta; 189 struct iwl_cmd_meta meta;
189}; 190};
@@ -310,7 +311,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
310******************************************************/ 311******************************************************/
311void iwl_bg_rx_replenish(struct work_struct *data); 312void iwl_bg_rx_replenish(struct work_struct *data);
312void iwl_irq_tasklet(struct iwl_trans *trans); 313void iwl_irq_tasklet(struct iwl_trans *trans);
313void iwlagn_rx_replenish(struct iwl_trans *trans); 314void iwl_rx_replenish(struct iwl_trans *trans);
314void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
315 struct iwl_rx_queue *q); 316 struct iwl_rx_queue *q);
316 317
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index d1a61ba6247a..17c8e5d82681 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -35,10 +35,6 @@
35#include "internal.h" 35#include "internal.h"
36#include "iwl-op-mode.h" 36#include "iwl-op-mode.h"
37 37
38#ifdef CONFIG_IWLWIFI_IDI
39#include "iwl-amfh.h"
40#endif
41
42/****************************************************************************** 38/******************************************************************************
43 * 39 *
44 * RX path functions 40 * RX path functions
@@ -181,15 +177,15 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
181} 177}
182 178
183/** 179/**
184 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 180 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
185 */ 181 */
186static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr) 182static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
187{ 183{
188 return cpu_to_le32((u32)(dma_addr >> 8)); 184 return cpu_to_le32((u32)(dma_addr >> 8));
189} 185}
190 186
191/** 187/**
192 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool 188 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
193 * 189 *
194 * If there are slots in the RX queue that need to be restocked, 190 * If there are slots in the RX queue that need to be restocked,
195 * and we have free pre-allocated buffers, fill the ranks as much 191 * and we have free pre-allocated buffers, fill the ranks as much
@@ -199,7 +195,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
199 * also updates the memory address in the firmware to reference the new 195 * also updates the memory address in the firmware to reference the new
200 * target buffer. 196 * target buffer.
201 */ 197 */
202static void iwlagn_rx_queue_restock(struct iwl_trans *trans) 198static void iwl_rx_queue_restock(struct iwl_trans *trans)
203{ 199{
204 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
205 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 201 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -207,6 +203,17 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
207 struct iwl_rx_mem_buffer *rxb; 203 struct iwl_rx_mem_buffer *rxb;
208 unsigned long flags; 204 unsigned long flags;
209 205
206 /*
207 * If the device isn't enabled - not need to try to add buffers...
208 * This can happen when we stop the device and still have an interrupt
209 * pending. We stop the APM before we sync the interrupts / tasklets
210 * because we have to (see comment there). On the other hand, since
211 * the APM is stopped, we cannot access the HW (in particular not prph).
212 * So don't try to restock if the APM has been already stopped.
213 */
214 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
215 return;
216
210 spin_lock_irqsave(&rxq->lock, flags); 217 spin_lock_irqsave(&rxq->lock, flags);
211 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 218 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
212 /* The overwritten rxb must be a used one */ 219 /* The overwritten rxb must be a used one */
@@ -219,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
219 list_del(element); 226 list_del(element);
220 227
221 /* Point to Rx buffer via next RBD in circular buffer */ 228 /* Point to Rx buffer via next RBD in circular buffer */
222 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma); 229 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
223 rxq->queue[rxq->write] = rxb; 230 rxq->queue[rxq->write] = rxb;
224 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 231 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
225 rxq->free_count--; 232 rxq->free_count--;
@@ -230,7 +237,6 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
230 if (rxq->free_count <= RX_LOW_WATERMARK) 237 if (rxq->free_count <= RX_LOW_WATERMARK)
231 schedule_work(&trans_pcie->rx_replenish); 238 schedule_work(&trans_pcie->rx_replenish);
232 239
233
234 /* If we've added more space for the firmware to place data, tell it. 240 /* If we've added more space for the firmware to place data, tell it.
235 * Increment device's write pointer in multiples of 8. */ 241 * Increment device's write pointer in multiples of 8. */
236 if (rxq->write_actual != (rxq->write & ~0x7)) { 242 if (rxq->write_actual != (rxq->write & ~0x7)) {
@@ -241,15 +247,16 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
241 } 247 }
242} 248}
243 249
244/** 250/*
245 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free 251 * iwl_rx_allocate - allocate a page for each used RBD
246 *
247 * When moving to rx_free an SKB is allocated for the slot.
248 * 252 *
249 * Also restock the Rx queue via iwl_rx_queue_restock. 253 * A used RBD is an Rx buffer that has been given to the stack. To use it again
250 * This is called as a scheduled work item (except for during initialization) 254 * a page must be allocated and the RBD must point to the page. This function
255 * doesn't change the HW pointer but handles the list of pages that is used by
256 * iwl_rx_queue_restock. The latter function will update the HW to use the newly
257 * allocated buffers.
251 */ 258 */
252static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) 259static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
253{ 260{
254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 261 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
255 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 262 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -328,23 +335,31 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
328 } 335 }
329} 336}
330 337
331void iwlagn_rx_replenish(struct iwl_trans *trans) 338/*
339 * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
340 *
341 * When moving to rx_free an page is allocated for the slot.
342 *
343 * Also restock the Rx queue via iwl_rx_queue_restock.
344 * This is called as a scheduled work item (except for during initialization)
345 */
346void iwl_rx_replenish(struct iwl_trans *trans)
332{ 347{
333 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
334 unsigned long flags; 349 unsigned long flags;
335 350
336 iwlagn_rx_allocate(trans, GFP_KERNEL); 351 iwl_rx_allocate(trans, GFP_KERNEL);
337 352
338 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 353 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
339 iwlagn_rx_queue_restock(trans); 354 iwl_rx_queue_restock(trans);
340 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 355 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
341} 356}
342 357
343static void iwlagn_rx_replenish_now(struct iwl_trans *trans) 358static void iwl_rx_replenish_now(struct iwl_trans *trans)
344{ 359{
345 iwlagn_rx_allocate(trans, GFP_ATOMIC); 360 iwl_rx_allocate(trans, GFP_ATOMIC);
346 361
347 iwlagn_rx_queue_restock(trans); 362 iwl_rx_queue_restock(trans);
348} 363}
349 364
350void iwl_bg_rx_replenish(struct work_struct *data) 365void iwl_bg_rx_replenish(struct work_struct *data)
@@ -352,7 +367,7 @@ void iwl_bg_rx_replenish(struct work_struct *data)
352 struct iwl_trans_pcie *trans_pcie = 367 struct iwl_trans_pcie *trans_pcie =
353 container_of(data, struct iwl_trans_pcie, rx_replenish); 368 container_of(data, struct iwl_trans_pcie, rx_replenish);
354 369
355 iwlagn_rx_replenish(trans_pcie->trans); 370 iwl_rx_replenish(trans_pcie->trans);
356} 371}
357 372
358static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, 373static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
@@ -421,13 +436,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
421 index = SEQ_TO_INDEX(sequence); 436 index = SEQ_TO_INDEX(sequence);
422 cmd_index = get_cmd_index(&txq->q, index); 437 cmd_index = get_cmd_index(&txq->q, index);
423 438
424 if (reclaim) 439 if (reclaim) {
425 cmd = txq->entries[cmd_index].cmd; 440 struct iwl_pcie_tx_queue_entry *ent;
426 else 441 ent = &txq->entries[cmd_index];
442 cmd = ent->copy_cmd;
443 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
444 } else {
427 cmd = NULL; 445 cmd = NULL;
446 }
428 447
429 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 448 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
430 449
450 if (reclaim) {
451 /* The original command isn't needed any more */
452 kfree(txq->entries[cmd_index].copy_cmd);
453 txq->entries[cmd_index].copy_cmd = NULL;
454 }
455
431 /* 456 /*
432 * After here, we should always check rxcb._page_stolen, 457 * After here, we should always check rxcb._page_stolen,
433 * if it is true then one of the handlers took the page. 458 * if it is true then one of the handlers took the page.
@@ -520,7 +545,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
520 count++; 545 count++;
521 if (count >= 8) { 546 if (count >= 8) {
522 rxq->read = i; 547 rxq->read = i;
523 iwlagn_rx_replenish_now(trans); 548 iwl_rx_replenish_now(trans);
524 count = 0; 549 count = 0;
525 } 550 }
526 } 551 }
@@ -529,9 +554,9 @@ static void iwl_rx_handle(struct iwl_trans *trans)
529 /* Backtrack one entry */ 554 /* Backtrack one entry */
530 rxq->read = i; 555 rxq->read = i;
531 if (fill_rx) 556 if (fill_rx)
532 iwlagn_rx_replenish_now(trans); 557 iwl_rx_replenish_now(trans);
533 else 558 else
534 iwlagn_rx_queue_restock(trans); 559 iwl_rx_queue_restock(trans);
535} 560}
536 561
537/** 562/**
@@ -713,11 +738,9 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
713 /* Disable periodic interrupt; we use it as just a one-shot. */ 738 /* Disable periodic interrupt; we use it as just a one-shot. */
714 iwl_write8(trans, CSR_INT_PERIODIC_REG, 739 iwl_write8(trans, CSR_INT_PERIODIC_REG,
715 CSR_INT_PERIODIC_DIS); 740 CSR_INT_PERIODIC_DIS);
716#ifdef CONFIG_IWLWIFI_IDI 741
717 iwl_amfh_rx_handler();
718#else
719 iwl_rx_handle(trans); 742 iwl_rx_handle(trans);
720#endif 743
721 /* 744 /*
722 * Enable periodic interrupt in 8 msec only if we received 745 * Enable periodic interrupt in 8 msec only if we received
723 * real RX interrupt (instead of just periodic int), to catch 746 * real RX interrupt (instead of just periodic int), to catch
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 063ecaff5b56..fe0fffd04304 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -216,7 +216,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
216 rxq->free_count = 0; 216 rxq->free_count = 0;
217 spin_unlock_irqrestore(&rxq->lock, flags); 217 spin_unlock_irqrestore(&rxq->lock, flags);
218 218
219 iwlagn_rx_replenish(trans); 219 iwl_rx_replenish(trans);
220 220
221 iwl_trans_rx_hw_init(trans, rxq); 221 iwl_trans_rx_hw_init(trans, rxq);
222 222
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
492 iwl_tx_queue_unmap(trans, txq_id); 492 iwl_tx_queue_unmap(trans, txq_id);
493 493
494 /* De-alloc array of command/tx buffers */ 494 /* De-alloc array of command/tx buffers */
495
496 if (txq_id == trans_pcie->cmd_queue) 495 if (txq_id == trans_pcie->cmd_queue)
497 for (i = 0; i < txq->q.n_window; i++) 496 for (i = 0; i < txq->q.n_window; i++) {
498 kfree(txq->entries[i].cmd); 497 kfree(txq->entries[i].cmd);
498 kfree(txq->entries[i].copy_cmd);
499 }
499 500
500 /* De-alloc circular buffer of TFDs */ 501 /* De-alloc circular buffer of TFDs */
501 if (txq->q.n_bd) { 502 if (txq->q.n_bd) {
@@ -851,10 +852,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
851 852
852 iwl_op_mode_nic_config(trans->op_mode); 853 iwl_op_mode_nic_config(trans->op_mode);
853 854
854#ifndef CONFIG_IWLWIFI_IDI
855 /* Allocate the RX queue, or reset if it is already allocated */ 855 /* Allocate the RX queue, or reset if it is already allocated */
856 iwl_rx_init(trans); 856 iwl_rx_init(trans);
857#endif
858 857
859 /* Allocate or reset and init all Tx and Command queues */ 858 /* Allocate or reset and init all Tx and Command queues */
860 if (iwl_tx_init(trans)) 859 if (iwl_tx_init(trans))
@@ -893,6 +892,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
893static int iwl_prepare_card_hw(struct iwl_trans *trans) 892static int iwl_prepare_card_hw(struct iwl_trans *trans)
894{ 893{
895 int ret; 894 int ret;
895 int t = 0;
896 896
897 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 897 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
898 898
@@ -905,30 +905,25 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
905 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 905 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
906 CSR_HW_IF_CONFIG_REG_PREPARE); 906 CSR_HW_IF_CONFIG_REG_PREPARE);
907 907
908 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 908 do {
909 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 909 ret = iwl_set_hw_ready(trans);
910 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 910 if (ret >= 0)
911 return 0;
911 912
912 if (ret < 0) 913 usleep_range(200, 1000);
913 return ret; 914 t += 200;
915 } while (t < 150000);
914 916
915 /* HW should be ready by now, check again. */
916 ret = iwl_set_hw_ready(trans);
917 if (ret >= 0)
918 return 0;
919 return ret; 917 return ret;
920} 918}
921 919
922/* 920/*
923 * ucode 921 * ucode
924 */ 922 */
925static int iwl_load_section(struct iwl_trans *trans, u8 section_num, 923static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
926 const struct fw_desc *section) 924 dma_addr_t phy_addr, u32 byte_cnt)
927{ 925{
928 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 926 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
929 dma_addr_t phy_addr = section->p_addr;
930 u32 byte_cnt = section->len;
931 u32 dst_addr = section->offset;
932 int ret; 927 int ret;
933 928
934 trans_pcie->ucode_write_complete = false; 929 trans_pcie->ucode_write_complete = false;
@@ -942,8 +937,8 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
942 dst_addr); 937 dst_addr);
943 938
944 iwl_write_direct32(trans, 939 iwl_write_direct32(trans,
945 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 940 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
946 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 941 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
947 942
948 iwl_write_direct32(trans, 943 iwl_write_direct32(trans,
949 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 944 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
@@ -962,33 +957,64 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
962 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 957 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
963 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 958 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
964 959
965 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
966 section_num);
967 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 960 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
968 trans_pcie->ucode_write_complete, 5 * HZ); 961 trans_pcie->ucode_write_complete, 5 * HZ);
969 if (!ret) { 962 if (!ret) {
970 IWL_ERR(trans, "Could not load the [%d] uCode section\n", 963 IWL_ERR(trans, "Failed to load firmware chunk!\n");
971 section_num);
972 return -ETIMEDOUT; 964 return -ETIMEDOUT;
973 } 965 }
974 966
975 return 0; 967 return 0;
976} 968}
977 969
978static int iwl_load_given_ucode(struct iwl_trans *trans, 970static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
979 const struct fw_img *image) 971 const struct fw_desc *section)
980{ 972{
973 u8 *v_addr;
974 dma_addr_t p_addr;
975 u32 offset;
981 int ret = 0; 976 int ret = 0;
982 int i;
983 977
984 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { 978 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
985 if (!image->sec[i].p_addr) 979 section_num);
986 break; 980
981 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
982 if (!v_addr)
983 return -ENOMEM;
984
985 for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
986 u32 copy_size;
987
988 copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
987 989
988 ret = iwl_load_section(trans, i, &image->sec[i]); 990 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
989 if (ret) 991 ret = iwl_load_firmware_chunk(trans, section->offset + offset,
990 return ret; 992 p_addr, copy_size);
993 if (ret) {
994 IWL_ERR(trans,
995 "Could not load the [%d] uCode section\n",
996 section_num);
997 break;
991 } 998 }
999 }
1000
1001 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
1002 return ret;
1003}
1004
1005static int iwl_load_given_ucode(struct iwl_trans *trans,
1006 const struct fw_img *image)
1007{
1008 int i, ret = 0;
1009
1010 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
1011 if (!image->sec[i].data)
1012 break;
1013
1014 ret = iwl_load_section(trans, i, &image->sec[i]);
1015 if (ret)
1016 return ret;
1017 }
992 1018
993 /* Remove all resets to allow NIC to operate */ 1019 /* Remove all resets to allow NIC to operate */
994 iwl_write32(trans, CSR_RESET, 0); 1020 iwl_write32(trans, CSR_RESET, 0);
@@ -1181,9 +1207,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1181 */ 1207 */
1182 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { 1208 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1183 iwl_trans_tx_stop(trans); 1209 iwl_trans_tx_stop(trans);
1184#ifndef CONFIG_IWLWIFI_IDI
1185 iwl_trans_rx_stop(trans); 1210 iwl_trans_rx_stop(trans);
1186#endif 1211
1187 /* Power-down device's busmaster DMA clocks */ 1212 /* Power-down device's busmaster DMA clocks */
1188 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1213 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1189 APMG_CLK_VAL_DMA_CLK_RQT); 1214 APMG_CLK_VAL_DMA_CLK_RQT);
@@ -1454,14 +1479,16 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1454 bool hw_rfkill; 1479 bool hw_rfkill;
1455 unsigned long flags; 1480 unsigned long flags;
1456 1481
1482 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1483 iwl_disable_interrupts(trans);
1484 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1485
1457 iwl_apm_stop(trans); 1486 iwl_apm_stop(trans);
1458 1487
1459 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1488 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1460 iwl_disable_interrupts(trans); 1489 iwl_disable_interrupts(trans);
1461 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1490 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1462 1491
1463 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1464
1465 if (!op_mode_leaving) { 1492 if (!op_mode_leaving) {
1466 /* 1493 /*
1467 * Even if we stop the HW, we still want the RF kill 1494 * Even if we stop the HW, we still want the RF kill
@@ -1549,9 +1576,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1549 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1550 1577
1551 iwl_trans_pcie_tx_free(trans); 1578 iwl_trans_pcie_tx_free(trans);
1552#ifndef CONFIG_IWLWIFI_IDI
1553 iwl_trans_pcie_rx_free(trans); 1579 iwl_trans_pcie_rx_free(trans);
1554#endif 1580
1555 if (trans_pcie->irq_requested == true) { 1581 if (trans_pcie->irq_requested == true) {
1556 free_irq(trans_pcie->irq, trans); 1582 free_irq(trans_pcie->irq, trans);
1557 iwl_free_isr_ict(trans); 1583 iwl_free_isr_ict(trans);
@@ -1769,7 +1795,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
1769#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 1795#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1770 if (!debugfs_create_file(#name, mode, parent, trans, \ 1796 if (!debugfs_create_file(#name, mode, parent, trans, \
1771 &iwl_dbgfs_##name##_ops)) \ 1797 &iwl_dbgfs_##name##_ops)) \
1772 return -ENOMEM; \ 1798 goto err; \
1773} while (0) 1799} while (0)
1774 1800
1775/* file operation */ 1801/* file operation */
@@ -2033,6 +2059,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2033 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2059 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2034 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); 2060 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
2035 return 0; 2061 return 0;
2062
2063err:
2064 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2065 return -ENOMEM;
2036} 2066}
2037#else 2067#else
2038static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2068static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6baf8deef519..105e3af3c621 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -521,12 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
521 u16 copy_size, cmd_size; 521 u16 copy_size, cmd_size;
522 bool had_nocopy = false; 522 bool had_nocopy = false;
523 int i; 523 int i;
524 u8 *cmd_dest; 524 u32 cmd_pos;
525#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
526 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
527 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
528 int trace_idx;
529#endif
530 525
531 copy_size = sizeof(out_cmd->hdr); 526 copy_size = sizeof(out_cmd->hdr);
532 cmd_size = sizeof(out_cmd->hdr); 527 cmd_size = sizeof(out_cmd->hdr);
@@ -584,15 +579,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
584 INDEX_TO_SEQ(q->write_ptr)); 579 INDEX_TO_SEQ(q->write_ptr));
585 580
586 /* and copy the data that needs to be copied */ 581 /* and copy the data that needs to be copied */
587 582 cmd_pos = offsetof(struct iwl_device_cmd, payload);
588 cmd_dest = out_cmd->payload;
589 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 583 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
590 if (!cmd->len[i]) 584 if (!cmd->len[i])
591 continue; 585 continue;
592 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) 586 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
593 break; 587 break;
594 memcpy(cmd_dest, cmd->data[i], cmd->len[i]); 588 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
595 cmd_dest += cmd->len[i]; 589 cmd_pos += cmd->len[i];
590 }
591
592 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
593
594 /*
595 * since out_cmd will be the source address of the FH, it will write
596 * the retry count there. So when the user needs to receivce the HCMD
597 * that corresponds to the response in the response handler, it needs
598 * to set CMD_WANT_HCMD.
599 */
600 if (cmd->flags & CMD_WANT_HCMD) {
601 txq->entries[idx].copy_cmd =
602 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
603 if (unlikely(!txq->entries[idx].copy_cmd)) {
604 idx = -ENOMEM;
605 goto out;
606 }
596 } 607 }
597 608
598 IWL_DEBUG_HC(trans, 609 IWL_DEBUG_HC(trans,
@@ -612,11 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
612 dma_unmap_len_set(out_meta, len, copy_size); 623 dma_unmap_len_set(out_meta, len, copy_size);
613 624
614 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); 625 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
615#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
616 trace_bufs[0] = &out_cmd->hdr;
617 trace_lens[0] = copy_size;
618 trace_idx = 1;
619#endif
620 626
621 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 627 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
622 if (!cmd->len[i]) 628 if (!cmd->len[i])
@@ -635,25 +641,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
635 641
636 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, 642 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
637 cmd->len[i], 0); 643 cmd->len[i], 0);
638#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
639 trace_bufs[trace_idx] = cmd->data[i];
640 trace_lens[trace_idx] = cmd->len[i];
641 trace_idx++;
642#endif
643 } 644 }
644 645
645 out_meta->flags = cmd->flags; 646 out_meta->flags = cmd->flags;
646 647
647 txq->need_update = 1; 648 txq->need_update = 1;
648 649
649 /* check that tracing gets all possible blocks */ 650 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
650 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); 651 &out_cmd->hdr, copy_size);
651#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
652 trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
653 trace_bufs[0], trace_lens[0],
654 trace_bufs[1], trace_lens[1],
655 trace_bufs[2], trace_lens[2]);
656#endif
657 652
658 /* start timer if queue currently empty */ 653 /* start timer if queue currently empty */
659 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 654 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 26e68326710b..aaa297315c47 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1159,6 +1159,22 @@ void lbs_set_mac_control(struct lbs_private *priv)
1159 lbs_deb_leave(LBS_DEB_CMD); 1159 lbs_deb_leave(LBS_DEB_CMD);
1160} 1160}
1161 1161
1162int lbs_set_mac_control_sync(struct lbs_private *priv)
1163{
1164 struct cmd_ds_mac_control cmd;
1165 int ret = 0;
1166
1167 lbs_deb_enter(LBS_DEB_CMD);
1168
1169 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1170 cmd.action = cpu_to_le16(priv->mac_control);
1171 cmd.reserved = 0;
1172 ret = lbs_cmd_with_response(priv, CMD_MAC_CONTROL, &cmd);
1173
1174 lbs_deb_leave(LBS_DEB_CMD);
1175 return ret;
1176}
1177
1162/** 1178/**
1163 * lbs_allocate_cmd_buffer - allocates the command buffer and links 1179 * lbs_allocate_cmd_buffer - allocates the command buffer and links
1164 * it to command free queue 1180 * it to command free queue
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index ab07608e13d0..4279e8ab95f2 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -96,6 +96,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv);
96int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); 96int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
97 97
98void lbs_set_mac_control(struct lbs_private *priv); 98void lbs_set_mac_control(struct lbs_private *priv);
99int lbs_set_mac_control_sync(struct lbs_private *priv);
99 100
100int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, 101int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
101 s16 *maxlevel); 102 s16 *maxlevel);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index fe1ea43c5149..0c02f0483d1f 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -682,8 +682,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
682 682
683 /* Send cmd to FW to enable 11D function */ 683 /* Send cmd to FW to enable 11D function */
684 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1); 684 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
685 if (ret)
686 goto done;
685 687
686 lbs_set_mac_control(priv); 688 ret = lbs_set_mac_control_sync(priv);
687done: 689done:
688 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 690 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
689 return ret; 691 return ret;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index a03457292c88..7001856241e6 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
227 lbtf_deb_leave(LBTF_DEB_MAIN); 227 lbtf_deb_leave(LBTF_DEB_MAIN);
228} 228}
229 229
230static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 230static void lbtf_op_tx(struct ieee80211_hw *hw,
231 struct ieee80211_tx_control *control,
232 struct sk_buff *skb)
231{ 233{
232 struct lbtf_private *priv = hw->priv; 234 struct lbtf_private *priv = hw->priv;
233 235
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 00838395778c..429ca3215fdb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen");
38MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); 38MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static u32 wmediumd_pid; 41static u32 wmediumd_portid;
42 42
43static int radios = 2; 43static int radios = 2;
44module_param(radios, int, 0444); 44module_param(radios, int, 0444);
@@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
545 545
546static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, 546static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
547 struct sk_buff *my_skb, 547 struct sk_buff *my_skb,
548 int dst_pid) 548 int dst_portid)
549{ 549{
550 struct sk_buff *skb; 550 struct sk_buff *skb;
551 struct mac80211_hwsim_data *data = hw->priv; 551 struct mac80211_hwsim_data *data = hw->priv;
@@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
619 goto nla_put_failure; 619 goto nla_put_failure;
620 620
621 genlmsg_end(skb, msg_head); 621 genlmsg_end(skb, msg_head);
622 genlmsg_unicast(&init_net, skb, dst_pid); 622 genlmsg_unicast(&init_net, skb, dst_portid);
623 623
624 /* Enqueue the packet */ 624 /* Enqueue the packet */
625 skb_queue_tail(&data->pending, my_skb); 625 skb_queue_tail(&data->pending, my_skb);
@@ -709,11 +709,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
709 return ack; 709 return ack;
710} 710}
711 711
712static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 712static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
713 struct ieee80211_tx_control *control,
714 struct sk_buff *skb)
713{ 715{
714 bool ack; 716 bool ack;
715 struct ieee80211_tx_info *txi; 717 struct ieee80211_tx_info *txi;
716 u32 _pid; 718 u32 _portid;
717 719
718 mac80211_hwsim_monitor_rx(hw, skb); 720 mac80211_hwsim_monitor_rx(hw, skb);
719 721
@@ -724,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
724 } 726 }
725 727
726 /* wmediumd mode check */ 728 /* wmediumd mode check */
727 _pid = ACCESS_ONCE(wmediumd_pid); 729 _portid = ACCESS_ONCE(wmediumd_portid);
728 730
729 if (_pid) 731 if (_portid)
730 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); 732 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
731 733
732 /* NO wmediumd detected, perfect medium simulation */ 734 /* NO wmediumd detected, perfect medium simulation */
733 ack = mac80211_hwsim_tx_frame_no_nl(hw, skb); 735 ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
@@ -812,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
812 struct ieee80211_hw *hw = arg; 814 struct ieee80211_hw *hw = arg;
813 struct sk_buff *skb; 815 struct sk_buff *skb;
814 struct ieee80211_tx_info *info; 816 struct ieee80211_tx_info *info;
815 u32 _pid; 817 u32 _portid;
816 818
817 hwsim_check_magic(vif); 819 hwsim_check_magic(vif);
818 820
@@ -829,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
829 mac80211_hwsim_monitor_rx(hw, skb); 831 mac80211_hwsim_monitor_rx(hw, skb);
830 832
831 /* wmediumd mode check */ 833 /* wmediumd mode check */
832 _pid = ACCESS_ONCE(wmediumd_pid); 834 _portid = ACCESS_ONCE(wmediumd_portid);
833 835
834 if (_pid) 836 if (_portid)
835 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); 837 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
836 838
837 mac80211_hwsim_tx_frame_no_nl(hw, skb); 839 mac80211_hwsim_tx_frame_no_nl(hw, skb);
838 dev_kfree_skb(skb); 840 dev_kfree_skb(skb);
@@ -1313,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1313 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1315 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1314 struct sk_buff *skb; 1316 struct sk_buff *skb;
1315 struct ieee80211_pspoll *pspoll; 1317 struct ieee80211_pspoll *pspoll;
1316 u32 _pid; 1318 u32 _portid;
1317 1319
1318 if (!vp->assoc) 1320 if (!vp->assoc)
1319 return; 1321 return;
@@ -1334,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1334 memcpy(pspoll->ta, mac, ETH_ALEN); 1336 memcpy(pspoll->ta, mac, ETH_ALEN);
1335 1337
1336 /* wmediumd mode check */ 1338 /* wmediumd mode check */
1337 _pid = ACCESS_ONCE(wmediumd_pid); 1339 _portid = ACCESS_ONCE(wmediumd_portid);
1338 1340
1339 if (_pid) 1341 if (_portid)
1340 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); 1342 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
1341 1343
1342 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) 1344 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
1343 printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__); 1345 printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
@@ -1351,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1351 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1353 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1352 struct sk_buff *skb; 1354 struct sk_buff *skb;
1353 struct ieee80211_hdr *hdr; 1355 struct ieee80211_hdr *hdr;
1354 u32 _pid; 1356 u32 _portid;
1355 1357
1356 if (!vp->assoc) 1358 if (!vp->assoc)
1357 return; 1359 return;
@@ -1373,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1373 memcpy(hdr->addr3, vp->bssid, ETH_ALEN); 1375 memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
1374 1376
1375 /* wmediumd mode check */ 1377 /* wmediumd mode check */
1376 _pid = ACCESS_ONCE(wmediumd_pid); 1378 _portid = ACCESS_ONCE(wmediumd_portid);
1377 1379
1378 if (_pid) 1380 if (_portid)
1379 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); 1381 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
1380 1382
1381 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) 1383 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
1382 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__); 1384 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
@@ -1630,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
1630 if (info == NULL) 1632 if (info == NULL)
1631 goto out; 1633 goto out;
1632 1634
1633 wmediumd_pid = info->snd_pid; 1635 wmediumd_portid = info->snd_portid;
1634 1636
1635 printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " 1637 printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
1636 "switching to wmediumd mode with pid %d\n", info->snd_pid); 1638 "switching to wmediumd mode with pid %d\n", info->snd_portid);
1637 1639
1638 return 0; 1640 return 0;
1639out: 1641out:
@@ -1670,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
1670 if (state != NETLINK_URELEASE) 1672 if (state != NETLINK_URELEASE)
1671 return NOTIFY_DONE; 1673 return NOTIFY_DONE;
1672 1674
1673 if (notify->pid == wmediumd_pid) { 1675 if (notify->portid == wmediumd_portid) {
1674 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" 1676 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
1675 " socket, switching to perfect channel medium\n"); 1677 " socket, switching to perfect channel medium\n");
1676 wmediumd_pid = 0; 1678 wmediumd_portid = 0;
1677 } 1679 }
1678 return NOTIFY_DONE; 1680 return NOTIFY_DONE;
1679 1681
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1727#endif 1729#endif
1728 BIT(NL80211_IFTYPE_AP) | 1730 BIT(NL80211_IFTYPE_AP) |
1729 BIT(NL80211_IFTYPE_P2P_GO) }, 1731 BIT(NL80211_IFTYPE_P2P_GO) },
1732 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
1730}; 1733};
1731 1734
1732static const struct ieee80211_iface_combination hwsim_if_comb = { 1735static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
1813 BIT(NL80211_IFTYPE_P2P_CLIENT) | 1816 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1814 BIT(NL80211_IFTYPE_P2P_GO) | 1817 BIT(NL80211_IFTYPE_P2P_GO) |
1815 BIT(NL80211_IFTYPE_ADHOC) | 1818 BIT(NL80211_IFTYPE_ADHOC) |
1816 BIT(NL80211_IFTYPE_MESH_POINT); 1819 BIT(NL80211_IFTYPE_MESH_POINT) |
1820 BIT(NL80211_IFTYPE_P2P_DEVICE);
1817 1821
1818 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1822 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1819 IEEE80211_HW_SIGNAL_DBM | 1823 IEEE80211_HW_SIGNAL_DBM |
@@ -2052,7 +2056,7 @@ failed:
2052 mac80211_hwsim_free(); 2056 mac80211_hwsim_free();
2053 return err; 2057 return err;
2054} 2058}
2055 2059module_init(init_mac80211_hwsim);
2056 2060
2057static void __exit exit_mac80211_hwsim(void) 2061static void __exit exit_mac80211_hwsim(void)
2058{ 2062{
@@ -2063,7 +2067,4 @@ static void __exit exit_mac80211_hwsim(void)
2063 mac80211_hwsim_free(); 2067 mac80211_hwsim_free();
2064 unregister_netdev(hwsim_mon); 2068 unregister_netdev(hwsim_mon);
2065} 2069}
2066
2067
2068module_init(init_mac80211_hwsim);
2069module_exit(exit_mac80211_hwsim); 2070module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index e535c937628b..245a371f1a43 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -176,23 +176,6 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
176} 176}
177 177
178/* 178/*
179 * This function handles the command response of 11n configuration request.
180 *
181 * Handling includes changing the header fields into CPU format.
182 */
183int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
184 struct mwifiex_ds_11n_tx_cfg *tx_cfg)
185{
186 struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
187
188 if (tx_cfg) {
189 tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
190 tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
191 }
192 return 0;
193}
194
195/*
196 * This function prepares command of reconfigure Tx buffer. 179 * This function prepares command of reconfigure Tx buffer.
197 * 180 *
198 * Preparation includes - 181 * Preparation includes -
@@ -258,27 +241,6 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
258} 241}
259 242
260/* 243/*
261 * This function handles the command response of AMSDU aggregation
262 * control request.
263 *
264 * Handling includes changing the header fields into CPU format.
265 */
266int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
267 struct mwifiex_ds_11n_amsdu_aggr_ctrl
268 *amsdu_aggr_ctrl)
269{
270 struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
271 &resp->params.amsdu_aggr_ctrl;
272
273 if (amsdu_aggr_ctrl) {
274 amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
275 amsdu_aggr_ctrl->curr_buf_size =
276 le16_to_cpu(amsdu_ctrl->curr_buf_size);
277 }
278 return 0;
279}
280
281/*
282 * This function prepares 11n configuration command. 244 * This function prepares 11n configuration command.
283 * 245 *
284 * Preparation includes - 246 * Preparation includes -
@@ -726,3 +688,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
726 688
727 return count; 689 return count;
728} 690}
691
692/*
693 * This function retrieves the entry for specific tx BA stream table by RA and
694 * deletes it.
695 */
696void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
697{
698 struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
699 unsigned long flags;
700
701 if (!ra)
702 return;
703
704 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
705 list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
706 if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
707 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
708 flags);
709 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
710 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
711 }
712 }
713 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
714
715 return;
716}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 28366e9211fb..46006a54a656 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -28,8 +28,6 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
28 struct host_cmd_ds_command *resp); 28 struct host_cmd_ds_command *resp);
29int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, 29int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
30 struct host_cmd_ds_command *resp); 30 struct host_cmd_ds_command *resp);
31int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
32 struct mwifiex_ds_11n_tx_cfg *tx_cfg);
33int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action, 31int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
34 struct mwifiex_ds_11n_tx_cfg *txcfg); 32 struct mwifiex_ds_11n_tx_cfg *txcfg);
35 33
@@ -60,15 +58,13 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
60 struct mwifiex_ds_rx_reorder_tbl *buf); 58 struct mwifiex_ds_rx_reorder_tbl *buf);
61int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv, 59int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
62 struct mwifiex_ds_tx_ba_stream_tbl *buf); 60 struct mwifiex_ds_tx_ba_stream_tbl *buf);
63int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
64 struct mwifiex_ds_11n_amsdu_aggr_ctrl
65 *amsdu_aggr_ctrl);
66int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv, 61int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
67 struct host_cmd_ds_command *cmd, 62 struct host_cmd_ds_command *cmd,
68 int cmd_action, u16 *buf_size); 63 int cmd_action, u16 *buf_size);
69int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd, 64int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
70 int cmd_action, 65 int cmd_action,
71 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl); 66 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
67void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
72 68
73/* 69/*
74 * This function checks whether AMPDU is allowed or not for a particular TID. 70 * This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +153,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
157 153
158 return false; 154 return false;
159} 155}
156
157/*
158 * This function checks whether associated station is 11n enabled
159 */
160static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
161 struct mwifiex_sta_node *node)
162{
163
164 if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
165 !priv->ap_11n_enabled)
166 return 0;
167
168 return node->is_11n_enabled;
169}
160#endif /* !_MWIFIEX_11N_H_ */ 170#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index ab84eb943749..395f1bfd4102 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
62 }; 62 };
63 struct tx_packet_hdr *tx_header; 63 struct tx_packet_hdr *tx_header;
64 64
65 skb_put(skb_aggr, sizeof(*tx_header)); 65 tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
66
67 tx_header = (struct tx_packet_hdr *) skb_aggr->data;
68 66
69 /* Copy DA and SA */ 67 /* Copy DA and SA */
70 dt_offset = 2 * ETH_ALEN; 68 dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
82 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); 80 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
83 81
84 /* Add payload */ 82 /* Add payload */
85 skb_put(skb_aggr, skb_src->len); 83 memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
86 memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data, 84
87 skb_src->len); 85 /* Add padding for new MSDU to start from 4 byte boundary */
88 *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len + 86 *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
89 LLC_SNAP_LEN)) & 3)) : 0;
90 skb_put(skb_aggr, *pad);
91 87
92 return skb_aggr->len + *pad; 88 return skb_aggr->len + *pad;
93} 89}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 591ccd33f83c..9402b93b9a36 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
54 tbl->rx_reorder_ptr[i] = NULL; 54 tbl->rx_reorder_ptr[i] = NULL;
55 } 55 }
56 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); 56 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
57 if (rx_tmp_ptr) 57 if (rx_tmp_ptr) {
58 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); 58 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
59 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
60 else
61 mwifiex_process_rx_packet(priv->adapter,
62 rx_tmp_ptr);
63 }
59 } 64 }
60 65
61 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 66 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
97 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 102 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
98 tbl->rx_reorder_ptr[i] = NULL; 103 tbl->rx_reorder_ptr[i] = NULL;
99 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); 104 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
100 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); 105
106 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
107 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
108 else
109 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
101 } 110 }
102 111
103 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 112 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
148 * This function returns the pointer to an entry in Rx reordering 157 * This function returns the pointer to an entry in Rx reordering
149 * table which matches the given TA/TID pair. 158 * table which matches the given TA/TID pair.
150 */ 159 */
151static struct mwifiex_rx_reorder_tbl * 160struct mwifiex_rx_reorder_tbl *
152mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) 161mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
153{ 162{
154 struct mwifiex_rx_reorder_tbl *tbl; 163 struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
167 return NULL; 176 return NULL;
168} 177}
169 178
179/* This function retrieves the pointer to an entry in Rx reordering
180 * table which matches the given TA and deletes it.
181 */
182void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
183{
184 struct mwifiex_rx_reorder_tbl *tbl, *tmp;
185 unsigned long flags;
186
187 if (!ta)
188 return;
189
190 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
191 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
192 if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
193 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
194 flags);
195 mwifiex_del_rx_reorder_entry(priv, tbl);
196 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
197 }
198 }
199 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
200
201 return;
202}
203
170/* 204/*
171 * This function finds the last sequence number used in the packets 205 * This function finds the last sequence number used in the packets
172 * buffered in Rx reordering table. 206 * buffered in Rx reordering table.
@@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
226 struct mwifiex_rx_reorder_tbl *tbl, *new_node; 260 struct mwifiex_rx_reorder_tbl *tbl, *new_node;
227 u16 last_seq = 0; 261 u16 last_seq = 0;
228 unsigned long flags; 262 unsigned long flags;
263 struct mwifiex_sta_node *node;
229 264
230 /* 265 /*
231 * If we get a TID, ta pair which is already present dispatch all the 266 * If we get a TID, ta pair which is already present dispatch all the
@@ -248,19 +283,26 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
248 new_node->tid = tid; 283 new_node->tid = tid;
249 memcpy(new_node->ta, ta, ETH_ALEN); 284 memcpy(new_node->ta, ta, ETH_ALEN);
250 new_node->start_win = seq_num; 285 new_node->start_win = seq_num;
251 if (mwifiex_queuing_ra_based(priv)) 286
252 /* TODO for adhoc */ 287 if (mwifiex_queuing_ra_based(priv)) {
253 dev_dbg(priv->adapter->dev, 288 dev_dbg(priv->adapter->dev,
254 "info: ADHOC:last_seq=%d start_win=%d\n", 289 "info: AP/ADHOC:last_seq=%d start_win=%d\n",
255 last_seq, new_node->start_win); 290 last_seq, new_node->start_win);
256 else 291 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
292 node = mwifiex_get_sta_entry(priv, ta);
293 if (node)
294 last_seq = node->rx_seq[tid];
295 }
296 } else {
257 last_seq = priv->rx_seq[tid]; 297 last_seq = priv->rx_seq[tid];
298 }
258 299
259 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && 300 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
260 last_seq >= new_node->start_win) 301 last_seq >= new_node->start_win)
261 new_node->start_win = last_seq + 1; 302 new_node->start_win = last_seq + 1;
262 303
263 new_node->win_size = win_size; 304 new_node->win_size = win_size;
305 new_node->flags = 0;
264 306
265 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size, 307 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
266 GFP_KERNEL); 308 GFP_KERNEL);
@@ -396,8 +438,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
396 438
397 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 439 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
398 if (!tbl) { 440 if (!tbl) {
399 if (pkt_type != PKT_TYPE_BAR) 441 if (pkt_type != PKT_TYPE_BAR) {
400 mwifiex_process_rx_packet(priv->adapter, payload); 442 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
443 mwifiex_handle_uap_rx_forward(priv, payload);
444 else
445 mwifiex_process_rx_packet(priv->adapter,
446 payload);
447 }
401 return 0; 448 return 0;
402 } 449 }
403 start_win = tbl->start_win; 450 start_win = tbl->start_win;
@@ -411,13 +458,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
411 * If seq_num is less then starting win then ignore and drop the 458 * If seq_num is less then starting win then ignore and drop the
412 * packet 459 * packet
413 */ 460 */
414 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */ 461 if (tbl->flags & RXREOR_FORCE_NO_DROP) {
415 if (seq_num >= ((start_win + TWOPOW11) & 462 dev_dbg(priv->adapter->dev,
416 (MAX_TID_VALUE - 1)) && (seq_num < start_win)) 463 "RXREOR_FORCE_NO_DROP when HS is activated\n");
464 tbl->flags &= ~RXREOR_FORCE_NO_DROP;
465 } else {
466 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
467 if (seq_num >= ((start_win + TWOPOW11) &
468 (MAX_TID_VALUE - 1)) &&
469 seq_num < start_win)
470 return -1;
471 } else if ((seq_num < start_win) ||
472 (seq_num > (start_win + TWOPOW11))) {
417 return -1; 473 return -1;
418 } else if ((seq_num < start_win) || 474 }
419 (seq_num > (start_win + TWOPOW11))) {
420 return -1;
421 } 475 }
422 476
423 /* 477 /*
@@ -428,8 +482,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
428 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1); 482 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
429 483
430 if (((end_win < start_win) && 484 if (((end_win < start_win) &&
431 (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) && 485 (seq_num < start_win) && (seq_num > end_win)) ||
432 (seq_num > end_win)) ||
433 ((end_win > start_win) && ((seq_num > end_win) || 486 ((end_win > start_win) && ((seq_num > end_win) ||
434 (seq_num < start_win)))) { 487 (seq_num < start_win)))) {
435 end_win = seq_num; 488 end_win = seq_num;
@@ -591,3 +644,29 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
591 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 644 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
592 mwifiex_reset_11n_rx_seq_num(priv); 645 mwifiex_reset_11n_rx_seq_num(priv);
593} 646}
647
648/*
649 * This function updates all rx_reorder_tbl's flags.
650 */
651void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
652{
653 struct mwifiex_private *priv;
654 struct mwifiex_rx_reorder_tbl *tbl;
655 unsigned long lock_flags;
656 int i;
657
658 for (i = 0; i < adapter->priv_num; i++) {
659 priv = adapter->priv[i];
660 if (!priv)
661 continue;
662 if (list_empty(&priv->rx_reorder_tbl_ptr))
663 continue;
664
665 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
666 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
667 tbl->flags = flags;
668 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
669 }
670
671 return;
672}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 6c9815a0f5d8..4064041ac852 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -38,6 +38,12 @@
38#define ADDBA_RSP_STATUS_ACCEPT 0 38#define ADDBA_RSP_STATUS_ACCEPT 0
39 39
40#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff 40#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
41#define BA_SETUP_MAX_PACKET_THRESHOLD 16
42#define BA_SETUP_PACKET_OFFSET 16
43
44enum mwifiex_rxreor_flags {
45 RXREOR_FORCE_NO_DROP = 1<<0,
46};
41 47
42static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv) 48static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
43{ 49{
@@ -68,5 +74,9 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
68 mwifiex_private 74 mwifiex_private
69 *priv, int tid, 75 *priv, int tid,
70 u8 *ta); 76 u8 *ta);
77struct mwifiex_rx_reorder_tbl *
78mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
79void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
80void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
71 81
72#endif /* _MWIFIEX_11N_RXREORDER_H_ */ 82#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 3f66ebb0a630..dd0410d2d465 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o
33mwifiex-y += ie.o 33mwifiex-y += ie.o
34mwifiex-y += sta_cmdresp.o 34mwifiex-y += sta_cmdresp.o
35mwifiex-y += sta_event.o 35mwifiex-y += sta_event.o
36mwifiex-y += uap_event.o
36mwifiex-y += sta_tx.o 37mwifiex-y += sta_tx.o
37mwifiex-y += sta_rx.o 38mwifiex-y += sta_rx.o
39mwifiex-y += uap_txrx.o
38mwifiex-y += cfg80211.o 40mwifiex-y += cfg80211.o
39mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o 41mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
40obj-$(CONFIG_MWIFIEX) += mwifiex.o 42obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index fe42137384da..2691620393ea 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -22,7 +22,7 @@
22 22
23static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { 23static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
24 { 24 {
25 .max = 1, .types = BIT(NL80211_IFTYPE_STATION), 25 .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
26 }, 26 },
27 { 27 {
28 .max = 1, .types = BIT(NL80211_IFTYPE_AP), 28 .max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -37,6 +37,36 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
37 .beacon_int_infra_match = true, 37 .beacon_int_infra_match = true,
38}; 38};
39 39
40static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
41 .n_reg_rules = 7,
42 .alpha2 = "99",
43 .reg_rules = {
44 /* Channel 1 - 11 */
45 REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
46 /* Channel 12 - 13 */
47 REG_RULE(2467-10, 2472+10, 20, 3, 20,
48 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
49 /* Channel 14 */
50 REG_RULE(2484-10, 2484+10, 20, 3, 20,
51 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
52 NL80211_RRF_NO_OFDM),
53 /* Channel 36 - 48 */
54 REG_RULE(5180-10, 5240+10, 40, 3, 20,
55 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
56 /* Channel 149 - 165 */
57 REG_RULE(5745-10, 5825+10, 40, 3, 20,
58 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
59 /* Channel 52 - 64 */
60 REG_RULE(5260-10, 5320+10, 40, 3, 30,
61 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
62 NL80211_RRF_DFS),
63 /* Channel 100 - 140 */
64 REG_RULE(5500-10, 5700+10, 40, 3, 30,
65 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
66 NL80211_RRF_DFS),
67 }
68};
69
40/* 70/*
41 * This function maps the nl802.11 channel type into driver channel type. 71 * This function maps the nl802.11 channel type into driver channel type.
42 * 72 *
@@ -47,8 +77,7 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
47 * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW 77 * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
48 * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE 78 * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
49 */ 79 */
50static u8 80u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
51mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
52{ 81{
53 switch (chan_type) { 82 switch (chan_type) {
54 case NL80211_CHAN_NO_HT: 83 case NL80211_CHAN_NO_HT:
@@ -99,7 +128,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
99 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 128 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
100 const u8 *peer_mac = pairwise ? mac_addr : bc_mac; 129 const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
101 130
102 if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) { 131 if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
103 wiphy_err(wiphy, "deleting the crypto keys\n"); 132 wiphy_err(wiphy, "deleting the crypto keys\n");
104 return -EFAULT; 133 return -EFAULT;
105 } 134 }
@@ -109,6 +138,188 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
109} 138}
110 139
111/* 140/*
141 * This function forms an skb for management frame.
142 */
143static int
144mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
145{
146 u8 addr[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
147 u16 pkt_len;
148 u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
149 struct timeval tv;
150
151 pkt_len = len + ETH_ALEN;
152
153 skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
154 MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
155 memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
156
157 memcpy(skb_push(skb, sizeof(tx_control)),
158 &tx_control, sizeof(tx_control));
159
160 memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
161
162 /* Add packet data and address4 */
163 memcpy(skb_put(skb, sizeof(struct ieee80211_hdr_3addr)), buf,
164 sizeof(struct ieee80211_hdr_3addr));
165 memcpy(skb_put(skb, ETH_ALEN), addr, ETH_ALEN);
166 memcpy(skb_put(skb, len - sizeof(struct ieee80211_hdr_3addr)),
167 buf + sizeof(struct ieee80211_hdr_3addr),
168 len - sizeof(struct ieee80211_hdr_3addr));
169
170 skb->priority = LOW_PRIO_TID;
171 do_gettimeofday(&tv);
172 skb->tstamp = timeval_to_ktime(tv);
173
174 return 0;
175}
176
177/*
178 * CFG802.11 operation handler to transmit a management frame.
179 */
180static int
181mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
182 struct ieee80211_channel *chan, bool offchan,
183 enum nl80211_channel_type channel_type,
184 bool channel_type_valid, unsigned int wait,
185 const u8 *buf, size_t len, bool no_cck,
186 bool dont_wait_for_ack, u64 *cookie)
187{
188 struct sk_buff *skb;
189 u16 pkt_len;
190 const struct ieee80211_mgmt *mgmt;
191 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
192
193 if (!buf || !len) {
194 wiphy_err(wiphy, "invalid buffer and length\n");
195 return -EFAULT;
196 }
197
198 mgmt = (const struct ieee80211_mgmt *)buf;
199 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA &&
200 ieee80211_is_probe_resp(mgmt->frame_control)) {
201 /* Since we support offload probe resp, we need to skip probe
202 * resp in AP or GO mode */
203 wiphy_dbg(wiphy,
204 "info: skip to send probe resp in AP or GO mode\n");
205 return 0;
206 }
207
208 pkt_len = len + ETH_ALEN;
209 skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
210 MWIFIEX_MGMT_FRAME_HEADER_SIZE +
211 pkt_len + sizeof(pkt_len));
212
213 if (!skb) {
214 wiphy_err(wiphy, "allocate skb failed for management frame\n");
215 return -ENOMEM;
216 }
217
218 mwifiex_form_mgmt_frame(skb, buf, len);
219 mwifiex_queue_tx_pkt(priv, skb);
220
221 *cookie = random32() | 1;
222 cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
223
224 wiphy_dbg(wiphy, "info: management frame transmitted\n");
225 return 0;
226}
227
228/*
229 * CFG802.11 operation handler to register a mgmt frame.
230 */
231static void
232mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
233 struct wireless_dev *wdev,
234 u16 frame_type, bool reg)
235{
236 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
237
238 if (reg)
239 priv->mgmt_frame_mask |= BIT(frame_type >> 4);
240 else
241 priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
242
243 mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
244 HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
245
246 wiphy_dbg(wiphy, "info: mgmt frame registered\n");
247}
248
249/*
250 * CFG802.11 operation handler to remain on channel.
251 */
252static int
253mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
254 struct wireless_dev *wdev,
255 struct ieee80211_channel *chan,
256 enum nl80211_channel_type channel_type,
257 unsigned int duration, u64 *cookie)
258{
259 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
260 int ret;
261
262 if (!chan || !cookie) {
263 wiphy_err(wiphy, "Invalid parameter for ROC\n");
264 return -EINVAL;
265 }
266
267 if (priv->roc_cfg.cookie) {
268 wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n",
269 priv->roc_cfg.cookie);
270 return -EBUSY;
271 }
272
273 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
274 &channel_type, duration);
275
276 if (!ret) {
277 *cookie = random32() | 1;
278 priv->roc_cfg.cookie = *cookie;
279 priv->roc_cfg.chan = *chan;
280 priv->roc_cfg.chan_type = channel_type;
281
282 cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
283 duration, GFP_ATOMIC);
284
285 wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
286 }
287
288 return ret;
289}
290
291/*
292 * CFG802.11 operation handler to cancel remain on channel.
293 */
294static int
295mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
296 struct wireless_dev *wdev, u64 cookie)
297{
298 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
299 int ret;
300
301 if (cookie != priv->roc_cfg.cookie)
302 return -ENOENT;
303
304 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
305 &priv->roc_cfg.chan,
306 &priv->roc_cfg.chan_type, 0);
307
308 if (!ret) {
309 cfg80211_remain_on_channel_expired(wdev, cookie,
310 &priv->roc_cfg.chan,
311 priv->roc_cfg.chan_type,
312 GFP_ATOMIC);
313
314 memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
315
316 wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
317 }
318
319 return ret;
320}
321
322/*
112 * CFG802.11 operation handler to set Tx power. 323 * CFG802.11 operation handler to set Tx power.
113 */ 324 */
114static int 325static int
@@ -171,7 +382,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
171 382
172 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) { 383 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
173 priv->wep_key_curr_index = key_index; 384 priv->wep_key_curr_index = key_index;
174 } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) { 385 } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
386 NULL, 0)) {
175 wiphy_err(wiphy, "set default Tx key index\n"); 387 wiphy_err(wiphy, "set default Tx key index\n");
176 return -EFAULT; 388 return -EFAULT;
177 } 389 }
@@ -207,7 +419,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
207 return 0; 419 return 0;
208 } 420 }
209 421
210 if (mwifiex_set_encode(priv, params->key, params->key_len, 422 if (mwifiex_set_encode(priv, params, params->key, params->key_len,
211 key_index, peer_mac, 0)) { 423 key_index, peer_mac, 0)) {
212 wiphy_err(wiphy, "crypto keys added\n"); 424 wiphy_err(wiphy, "crypto keys added\n");
213 return -EFAULT; 425 return -EFAULT;
@@ -462,6 +674,76 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
462 return 0; 674 return 0;
463} 675}
464 676
677static int
678mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
679{
680 u16 mode = P2P_MODE_DISABLE;
681
682 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
683 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
684
685 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
686 HostCmd_ACT_GEN_SET, 0, &mode))
687 return -1;
688
689 return 0;
690}
691
692/*
693 * This function initializes the functionalities for P2P client.
694 * The P2P client initialization sequence is:
695 * disable -> device -> client
696 */
697static int
698mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
699{
700 u16 mode;
701
702 if (mwifiex_cfg80211_deinit_p2p(priv))
703 return -1;
704
705 mode = P2P_MODE_DEVICE;
706 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
707 HostCmd_ACT_GEN_SET, 0, &mode))
708 return -1;
709
710 mode = P2P_MODE_CLIENT;
711 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
712 HostCmd_ACT_GEN_SET, 0, &mode))
713 return -1;
714
715 return 0;
716}
717
718/*
719 * This function initializes the functionalities for P2P GO.
720 * The P2P GO initialization sequence is:
721 * disable -> device -> GO
722 */
723static int
724mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
725{
726 u16 mode;
727
728 if (mwifiex_cfg80211_deinit_p2p(priv))
729 return -1;
730
731 mode = P2P_MODE_DEVICE;
732 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
733 HostCmd_ACT_GEN_SET, 0, &mode))
734 return -1;
735
736 mode = P2P_MODE_GO;
737 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
738 HostCmd_ACT_GEN_SET, 0, &mode))
739 return -1;
740
741 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
742 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP);
743
744 return 0;
745}
746
465/* 747/*
466 * CFG802.11 operation handler to change interface type. 748 * CFG802.11 operation handler to change interface type.
467 */ 749 */
@@ -494,6 +776,16 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
494 switch (type) { 776 switch (type) {
495 case NL80211_IFTYPE_ADHOC: 777 case NL80211_IFTYPE_ADHOC:
496 break; 778 break;
779 case NL80211_IFTYPE_P2P_CLIENT:
780 if (mwifiex_cfg80211_init_p2p_client(priv))
781 return -EFAULT;
782 dev->ieee80211_ptr->iftype = type;
783 return 0;
784 case NL80211_IFTYPE_P2P_GO:
785 if (mwifiex_cfg80211_init_p2p_go(priv))
786 return -EFAULT;
787 dev->ieee80211_ptr->iftype = type;
788 return 0;
497 case NL80211_IFTYPE_UNSPECIFIED: 789 case NL80211_IFTYPE_UNSPECIFIED:
498 wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name); 790 wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
499 case NL80211_IFTYPE_STATION: /* This shouldn't happen */ 791 case NL80211_IFTYPE_STATION: /* This shouldn't happen */
@@ -519,6 +811,18 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
519 return -EOPNOTSUPP; 811 return -EOPNOTSUPP;
520 } 812 }
521 break; 813 break;
814 case NL80211_IFTYPE_P2P_CLIENT:
815 case NL80211_IFTYPE_P2P_GO:
816 switch (type) {
817 case NL80211_IFTYPE_STATION:
818 if (mwifiex_cfg80211_deinit_p2p(priv))
819 return -EFAULT;
820 dev->ieee80211_ptr->iftype = type;
821 return 0;
822 default:
823 return -EOPNOTSUPP;
824 }
825 break;
522 default: 826 default:
523 wiphy_err(wiphy, "%s: unknown iftype: %d\n", 827 wiphy_err(wiphy, "%s: unknown iftype: %d\n",
524 dev->name, dev->ieee80211_ptr->iftype); 828 dev->name, dev->ieee80211_ptr->iftype);
@@ -657,7 +961,6 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
657} 961}
658 962
659/* Supported rates to be advertised to the cfg80211 */ 963/* Supported rates to be advertised to the cfg80211 */
660
661static struct ieee80211_rate mwifiex_rates[] = { 964static struct ieee80211_rate mwifiex_rates[] = {
662 {.bitrate = 10, .hw_value = 2, }, 965 {.bitrate = 10, .hw_value = 2, },
663 {.bitrate = 20, .hw_value = 4, }, 966 {.bitrate = 20, .hw_value = 4, },
@@ -674,7 +977,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
674}; 977};
675 978
676/* Channel definitions to be advertised to cfg80211 */ 979/* Channel definitions to be advertised to cfg80211 */
677
678static struct ieee80211_channel mwifiex_channels_2ghz[] = { 980static struct ieee80211_channel mwifiex_channels_2ghz[] = {
679 {.center_freq = 2412, .hw_value = 1, }, 981 {.center_freq = 2412, .hw_value = 1, },
680 {.center_freq = 2417, .hw_value = 2, }, 982 {.center_freq = 2417, .hw_value = 2, },
@@ -742,12 +1044,41 @@ static struct ieee80211_supported_band mwifiex_band_5ghz = {
742 1044
743 1045
744/* Supported crypto cipher suits to be advertised to cfg80211 */ 1046/* Supported crypto cipher suits to be advertised to cfg80211 */
745
746static const u32 mwifiex_cipher_suites[] = { 1047static const u32 mwifiex_cipher_suites[] = {
747 WLAN_CIPHER_SUITE_WEP40, 1048 WLAN_CIPHER_SUITE_WEP40,
748 WLAN_CIPHER_SUITE_WEP104, 1049 WLAN_CIPHER_SUITE_WEP104,
749 WLAN_CIPHER_SUITE_TKIP, 1050 WLAN_CIPHER_SUITE_TKIP,
750 WLAN_CIPHER_SUITE_CCMP, 1051 WLAN_CIPHER_SUITE_CCMP,
1052 WLAN_CIPHER_SUITE_AES_CMAC,
1053};
1054
1055/* Supported mgmt frame types to be advertised to cfg80211 */
1056static const struct ieee80211_txrx_stypes
1057mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
1058 [NL80211_IFTYPE_STATION] = {
1059 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1060 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1061 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1062 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1063 },
1064 [NL80211_IFTYPE_AP] = {
1065 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1066 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1067 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1068 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1069 },
1070 [NL80211_IFTYPE_P2P_CLIENT] = {
1071 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1072 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1073 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1074 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1075 },
1076 [NL80211_IFTYPE_P2P_GO] = {
1077 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1078 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1079 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1080 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1081 },
751}; 1082};
752 1083
753/* 1084/*
@@ -842,7 +1173,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
842{ 1173{
843 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1174 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
844 1175
845 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1176 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
846 wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__); 1177 wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
847 return -EINVAL; 1178 return -EINVAL;
848 } 1179 }
@@ -906,6 +1237,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
906 if (mwifiex_del_mgmt_ies(priv)) 1237 if (mwifiex_del_mgmt_ies(priv))
907 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); 1238 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
908 1239
1240 priv->ap_11n_enabled = 0;
1241
909 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1242 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
910 HostCmd_ACT_GEN_SET, 0, NULL)) { 1243 HostCmd_ACT_GEN_SET, 0, NULL)) {
911 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1244 wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -928,7 +1261,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
928 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1261 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
929 u8 config_bands = 0; 1262 u8 config_bands = 0;
930 1263
931 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) 1264 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
932 return -1; 1265 return -1;
933 if (mwifiex_set_mgmt_ies(priv, &params->beacon)) 1266 if (mwifiex_set_mgmt_ies(priv, &params->beacon))
934 return -1; 1267 return -1;
@@ -965,15 +1298,18 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
965 1298
966 bss_cfg->channel = 1299 bss_cfg->channel =
967 (u8)ieee80211_frequency_to_channel(params->channel->center_freq); 1300 (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
968 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
969 1301
970 /* Set appropriate bands */ 1302 /* Set appropriate bands */
971 if (params->channel->band == IEEE80211_BAND_2GHZ) { 1303 if (params->channel->band == IEEE80211_BAND_2GHZ) {
1304 bss_cfg->band_cfg = BAND_CONFIG_BG;
1305
972 if (params->channel_type == NL80211_CHAN_NO_HT) 1306 if (params->channel_type == NL80211_CHAN_NO_HT)
973 config_bands = BAND_B | BAND_G; 1307 config_bands = BAND_B | BAND_G;
974 else 1308 else
975 config_bands = BAND_B | BAND_G | BAND_GN; 1309 config_bands = BAND_B | BAND_G | BAND_GN;
976 } else { 1310 } else {
1311 bss_cfg->band_cfg = BAND_CONFIG_A;
1312
977 if (params->channel_type == NL80211_CHAN_NO_HT) 1313 if (params->channel_type == NL80211_CHAN_NO_HT)
978 config_bands = BAND_A; 1314 config_bands = BAND_A;
979 else 1315 else
@@ -984,6 +1320,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
984 ~priv->adapter->fw_bands)) 1320 ~priv->adapter->fw_bands))
985 priv->adapter->config_bands = config_bands; 1321 priv->adapter->config_bands = config_bands;
986 1322
1323 mwifiex_set_uap_rates(bss_cfg, params);
987 mwifiex_send_domain_info_cmd_fw(wiphy); 1324 mwifiex_send_domain_info_cmd_fw(wiphy);
988 1325
989 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 1326 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
@@ -994,6 +1331,12 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
994 1331
995 mwifiex_set_ht_params(priv, bss_cfg, params); 1332 mwifiex_set_ht_params(priv, bss_cfg, params);
996 1333
1334 if (params->inactivity_timeout > 0) {
1335 /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
1336 bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
1337 bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
1338 }
1339
997 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1340 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
998 HostCmd_ACT_GEN_SET, 0, NULL)) { 1341 HostCmd_ACT_GEN_SET, 0, NULL)) {
999 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1342 wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1149,7 +1492,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1149 ~priv->adapter->fw_bands)) 1492 ~priv->adapter->fw_bands))
1150 priv->adapter->config_bands = config_bands; 1493 priv->adapter->config_bands = config_bands;
1151 } 1494 }
1152 mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
1153 } 1495 }
1154 1496
1155 /* As this is new association, clear locally stored 1497 /* As this is new association, clear locally stored
@@ -1159,7 +1501,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1159 priv->wep_key_curr_index = 0; 1501 priv->wep_key_curr_index = 0;
1160 priv->sec_info.encryption_mode = 0; 1502 priv->sec_info.encryption_mode = 0;
1161 priv->sec_info.is_authtype_auto = 0; 1503 priv->sec_info.is_authtype_auto = 0;
1162 ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1); 1504 ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
1163 1505
1164 if (mode == NL80211_IFTYPE_ADHOC) { 1506 if (mode == NL80211_IFTYPE_ADHOC) {
1165 /* "privacy" is set only for ad-hoc mode */ 1507 /* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1548,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1206 "info: setting wep encryption" 1548 "info: setting wep encryption"
1207 " with key len %d\n", sme->key_len); 1549 " with key len %d\n", sme->key_len);
1208 priv->wep_key_curr_index = sme->key_idx; 1550 priv->wep_key_curr_index = sme->key_idx;
1209 ret = mwifiex_set_encode(priv, sme->key, sme->key_len, 1551 ret = mwifiex_set_encode(priv, NULL, sme->key,
1210 sme->key_idx, NULL, 0); 1552 sme->key_len, sme->key_idx,
1553 NULL, 0);
1211 } 1554 }
1212 } 1555 }
1213done: 1556done:
@@ -1459,11 +1802,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1459{ 1802{
1460 struct net_device *dev = request->wdev->netdev; 1803 struct net_device *dev = request->wdev->netdev;
1461 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1804 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1462 int i; 1805 int i, offset;
1463 struct ieee80211_channel *chan; 1806 struct ieee80211_channel *chan;
1807 struct ieee_types_header *ie;
1464 1808
1465 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); 1809 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1466 1810
1811 if (atomic_read(&priv->wmm.tx_pkts_queued) >=
1812 MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
1813 dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
1814 return -EBUSY;
1815 }
1816
1467 priv->scan_request = request; 1817 priv->scan_request = request;
1468 1818
1469 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), 1819 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
@@ -1477,13 +1827,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1477 priv->user_scan_cfg->ssid_list = request->ssids; 1827 priv->user_scan_cfg->ssid_list = request->ssids;
1478 1828
1479 if (request->ie && request->ie_len) { 1829 if (request->ie && request->ie_len) {
1830 offset = 0;
1480 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { 1831 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
1481 if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR) 1832 if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
1482 continue; 1833 continue;
1483 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN; 1834 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
1484 memcpy(&priv->vs_ie[i].ie, request->ie, 1835 ie = (struct ieee_types_header *)(request->ie + offset);
1485 request->ie_len); 1836 memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
1486 break; 1837 offset += sizeof(*ie) + ie->len;
1838
1839 if (offset >= request->ie_len)
1840 break;
1487 } 1841 }
1488 } 1842 }
1489 1843
@@ -1592,7 +1946,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
1592 * create a new virtual interface with the given name 1946 * create a new virtual interface with the given name
1593 */ 1947 */
1594struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, 1948struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1595 char *name, 1949 const char *name,
1596 enum nl80211_iftype type, 1950 enum nl80211_iftype type,
1597 u32 *flags, 1951 u32 *flags,
1598 struct vif_params *params) 1952 struct vif_params *params)
@@ -1632,7 +1986,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1632 1986
1633 priv->bss_type = MWIFIEX_BSS_TYPE_STA; 1987 priv->bss_type = MWIFIEX_BSS_TYPE_STA;
1634 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; 1988 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
1635 priv->bss_priority = MWIFIEX_BSS_ROLE_STA; 1989 priv->bss_priority = 0;
1636 priv->bss_role = MWIFIEX_BSS_ROLE_STA; 1990 priv->bss_role = MWIFIEX_BSS_ROLE_STA;
1637 priv->bss_num = 0; 1991 priv->bss_num = 0;
1638 1992
@@ -1655,13 +2009,48 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1655 2009
1656 priv->bss_type = MWIFIEX_BSS_TYPE_UAP; 2010 priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
1657 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; 2011 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
1658 priv->bss_priority = MWIFIEX_BSS_ROLE_UAP; 2012 priv->bss_priority = 0;
1659 priv->bss_role = MWIFIEX_BSS_ROLE_UAP; 2013 priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
1660 priv->bss_started = 0; 2014 priv->bss_started = 0;
1661 priv->bss_num = 0; 2015 priv->bss_num = 0;
1662 priv->bss_mode = type; 2016 priv->bss_mode = type;
1663 2017
1664 break; 2018 break;
2019 case NL80211_IFTYPE_P2P_CLIENT:
2020 priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P];
2021
2022 if (priv->bss_mode) {
2023 wiphy_err(wiphy, "Can't create multiple P2P ifaces");
2024 return ERR_PTR(-EINVAL);
2025 }
2026
2027 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
2028 if (!wdev)
2029 return ERR_PTR(-ENOMEM);
2030
2031 priv->wdev = wdev;
2032 wdev->wiphy = wiphy;
2033
2034 /* At start-up, wpa_supplicant tries to change the interface
2035 * to NL80211_IFTYPE_STATION if it is not managed mode.
2036 * So, we initialize it to STA mode.
2037 */
2038 wdev->iftype = NL80211_IFTYPE_STATION;
2039 priv->bss_mode = NL80211_IFTYPE_STATION;
2040
2041 /* Setting bss_type to P2P tells firmware that this interface
2042 * is receiving P2P peers found during find phase and doing
2043 * action frame handshake.
2044 */
2045 priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
2046
2047 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
2048 priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
2049 priv->bss_role = MWIFIEX_BSS_ROLE_STA;
2050 priv->bss_started = 0;
2051 priv->bss_num = 0;
2052
2053 break;
1665 default: 2054 default:
1666 wiphy_err(wiphy, "type not supported\n"); 2055 wiphy_err(wiphy, "type not supported\n");
1667 return ERR_PTR(-EINVAL); 2056 return ERR_PTR(-EINVAL);
@@ -1769,6 +2158,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1769 .leave_ibss = mwifiex_cfg80211_leave_ibss, 2158 .leave_ibss = mwifiex_cfg80211_leave_ibss,
1770 .add_key = mwifiex_cfg80211_add_key, 2159 .add_key = mwifiex_cfg80211_add_key,
1771 .del_key = mwifiex_cfg80211_del_key, 2160 .del_key = mwifiex_cfg80211_del_key,
2161 .mgmt_tx = mwifiex_cfg80211_mgmt_tx,
2162 .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
2163 .remain_on_channel = mwifiex_cfg80211_remain_on_channel,
2164 .cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel,
1772 .set_default_key = mwifiex_cfg80211_set_default_key, 2165 .set_default_key = mwifiex_cfg80211_set_default_key,
1773 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, 2166 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
1774 .set_tx_power = mwifiex_cfg80211_set_tx_power, 2167 .set_tx_power = mwifiex_cfg80211_set_tx_power,
@@ -1805,8 +2198,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1805 } 2198 }
1806 wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; 2199 wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
1807 wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; 2200 wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
2201 wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
2202 wiphy->max_remain_on_channel_duration = 5000;
1808 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2203 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1809 BIT(NL80211_IFTYPE_ADHOC) | 2204 BIT(NL80211_IFTYPE_ADHOC) |
2205 BIT(NL80211_IFTYPE_P2P_CLIENT) |
2206 BIT(NL80211_IFTYPE_P2P_GO) |
1810 BIT(NL80211_IFTYPE_AP); 2207 BIT(NL80211_IFTYPE_AP);
1811 2208
1812 wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz; 2209 wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
@@ -1825,15 +2222,21 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1825 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 2222 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1826 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 2223 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1827 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | 2224 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
1828 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 2225 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
2226 WIPHY_FLAG_CUSTOM_REGULATORY |
2227 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
2228
2229 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
1829 2230
1830 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 2231 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
1831 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; 2232 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
2233 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
1832 2234
1833 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; 2235 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
1834 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; 2236 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
1835 2237
1836 wiphy->features = NL80211_FEATURE_HT_IBSS; 2238 wiphy->features = NL80211_FEATURE_HT_IBSS |
2239 NL80211_FEATURE_INACTIVITY_TIMER;
1837 2240
1838 /* Reserve space for mwifiex specific private data for BSS */ 2241 /* Reserve space for mwifiex specific private data for BSS */
1839 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); 2242 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1854,8 +2257,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1854 return ret; 2257 return ret;
1855 } 2258 }
1856 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code); 2259 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
1857 if (country_code && regulatory_hint(wiphy, country_code)) 2260 if (country_code)
1858 dev_err(adapter->dev, "regulatory_hint() failed\n"); 2261 dev_info(adapter->dev,
2262 "ignoring F/W country code %2.2s\n", country_code);
1859 2263
1860 adapter->wiphy = wiphy; 2264 adapter->wiphy = wiphy;
1861 return ret; 2265 return ret;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 565527aee0ea..8d465107f52b 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -460,7 +460,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
460 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 460 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
461 } 461 }
462 462
463 ret = mwifiex_process_sta_event(priv); 463 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
464 ret = mwifiex_process_uap_event(priv);
465 else
466 ret = mwifiex_process_sta_event(priv);
464 467
465 adapter->event_cause = 0; 468 adapter->event_cause = 0;
466 adapter->event_skb = NULL; 469 adapter->event_skb = NULL;
@@ -1085,6 +1088,8 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
1085 if (activated) { 1088 if (activated) {
1086 if (priv->adapter->is_hs_configured) { 1089 if (priv->adapter->is_hs_configured) {
1087 priv->adapter->hs_activated = true; 1090 priv->adapter->hs_activated = true;
1091 mwifiex_update_rxreor_flags(priv->adapter,
1092 RXREOR_FORCE_NO_DROP);
1088 dev_dbg(priv->adapter->dev, "event: hs_activated\n"); 1093 dev_dbg(priv->adapter->dev, "event: hs_activated\n");
1089 priv->adapter->hs_activate_wait_q_woken = true; 1094 priv->adapter->hs_activate_wait_q_woken = true;
1090 wake_up_interruptible( 1095 wake_up_interruptible(
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 070ef25f5186..e9357d87d327 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -28,11 +28,14 @@
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29 29
30 30
31#define MWIFIEX_MAX_BSS_NUM (2) 31#define MWIFIEX_MAX_BSS_NUM (3)
32 32
33#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd) 33#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
34 * + 4 byte alignment 34 * + 4 byte alignment
35 */ 35 */
36#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8 /* sizeof(pkt_type)
37 * + sizeof(tx_control)
38 */
36 39
37#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 40#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
38#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 41#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
@@ -60,10 +63,14 @@
60#define MWIFIEX_SDIO_BLOCK_SIZE 256 63#define MWIFIEX_SDIO_BLOCK_SIZE 256
61 64
62#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) 65#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
66#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
67
68#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024
63 69
64enum mwifiex_bss_type { 70enum mwifiex_bss_type {
65 MWIFIEX_BSS_TYPE_STA = 0, 71 MWIFIEX_BSS_TYPE_STA = 0,
66 MWIFIEX_BSS_TYPE_UAP = 1, 72 MWIFIEX_BSS_TYPE_UAP = 1,
73 MWIFIEX_BSS_TYPE_P2P = 2,
67 MWIFIEX_BSS_TYPE_ANY = 0xff, 74 MWIFIEX_BSS_TYPE_ANY = 0xff,
68}; 75};
69 76
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e831b440a24a..dda588b35570 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -65,10 +65,12 @@ enum KEY_TYPE_ID {
65 KEY_TYPE_ID_TKIP, 65 KEY_TYPE_ID_TKIP,
66 KEY_TYPE_ID_AES, 66 KEY_TYPE_ID_AES,
67 KEY_TYPE_ID_WAPI, 67 KEY_TYPE_ID_WAPI,
68 KEY_TYPE_ID_AES_CMAC,
68}; 69};
69#define KEY_MCAST BIT(0) 70#define KEY_MCAST BIT(0)
70#define KEY_UNICAST BIT(1) 71#define KEY_UNICAST BIT(1)
71#define KEY_ENABLED BIT(2) 72#define KEY_ENABLED BIT(2)
73#define KEY_IGTK BIT(10)
72 74
73#define WAPI_KEY_LEN 50 75#define WAPI_KEY_LEN 50
74 76
@@ -92,6 +94,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
92}; 94};
93 95
94#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF))) 96#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
97#define CAL_RSSI(SNR, NF) ((s16)((s16)(SNR)+(s16)(NF)))
95 98
96#define UAP_BSS_PARAMS_I 0 99#define UAP_BSS_PARAMS_I 0
97#define UAP_CUSTOM_IE_I 1 100#define UAP_CUSTOM_IE_I 1
@@ -106,6 +109,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
106#define MGMT_MASK_BEACON 0x100 109#define MGMT_MASK_BEACON 0x100
107 110
108#define TLV_TYPE_UAP_SSID 0x0000 111#define TLV_TYPE_UAP_SSID 0x0000
112#define TLV_TYPE_UAP_RATES 0x0001
109 113
110#define PROPRIETARY_TLV_BASE_ID 0x0100 114#define PROPRIETARY_TLV_BASE_ID 0x0100
111#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0) 115#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
@@ -124,6 +128,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 128#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) 129#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 130#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
131#define TLV_TYPE_UAP_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 57)
127#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59) 132#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
128#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 133#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
129#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 134#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -138,6 +143,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
138#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105) 143#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
139#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113) 144#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
140#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114) 145#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
146#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
141#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) 147#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
142#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) 148#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
143 149
@@ -257,9 +263,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
257#define HostCmd_CMD_TX_RATE_CFG 0x00d6 263#define HostCmd_CMD_TX_RATE_CFG 0x00d6
258#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4 264#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4
259#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5 265#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
266#define HostCmd_CMD_P2P_MODE_CFG 0x00eb
260#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed 267#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
261#define HostCmd_CMD_SET_BSS_MODE 0x00f7 268#define HostCmd_CMD_SET_BSS_MODE 0x00f7
262#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa 269#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
270#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
271#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
263 272
264#define PROTOCOL_NO_SECURITY 0x01 273#define PROTOCOL_NO_SECURITY 0x01
265#define PROTOCOL_STATIC_WEP 0x02 274#define PROTOCOL_STATIC_WEP 0x02
@@ -285,9 +294,17 @@ enum ENH_PS_MODES {
285 DIS_AUTO_PS = 0xfe, 294 DIS_AUTO_PS = 0xfe,
286}; 295};
287 296
297enum P2P_MODES {
298 P2P_MODE_DISABLE = 0,
299 P2P_MODE_DEVICE = 1,
300 P2P_MODE_GO = 2,
301 P2P_MODE_CLIENT = 3,
302};
303
288#define HostCmd_RET_BIT 0x8000 304#define HostCmd_RET_BIT 0x8000
289#define HostCmd_ACT_GEN_GET 0x0000 305#define HostCmd_ACT_GEN_GET 0x0000
290#define HostCmd_ACT_GEN_SET 0x0001 306#define HostCmd_ACT_GEN_SET 0x0001
307#define HostCmd_ACT_GEN_REMOVE 0x0004
291#define HostCmd_ACT_BITWISE_SET 0x0002 308#define HostCmd_ACT_BITWISE_SET 0x0002
292#define HostCmd_ACT_BITWISE_CLR 0x0003 309#define HostCmd_ACT_BITWISE_CLR 0x0003
293#define HostCmd_RESULT_OK 0x0000 310#define HostCmd_RESULT_OK 0x0000
@@ -307,7 +324,7 @@ enum ENH_PS_MODES {
307#define HostCmd_SCAN_RADIO_TYPE_A 1 324#define HostCmd_SCAN_RADIO_TYPE_A 1
308 325
309#define HOST_SLEEP_CFG_CANCEL 0xffffffff 326#define HOST_SLEEP_CFG_CANCEL 0xffffffff
310#define HOST_SLEEP_CFG_COND_DEF 0x0000000f 327#define HOST_SLEEP_CFG_COND_DEF 0x00000000
311#define HOST_SLEEP_CFG_GPIO_DEF 0xff 328#define HOST_SLEEP_CFG_GPIO_DEF 0xff
312#define HOST_SLEEP_CFG_GAP_DEF 0 329#define HOST_SLEEP_CFG_GAP_DEF 0
313 330
@@ -385,6 +402,7 @@ enum ENH_PS_MODES {
385#define EVENT_BW_CHANGE 0x00000048 402#define EVENT_BW_CHANGE 0x00000048
386#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c 403#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
387#define EVENT_HOSTWAKE_STAIE 0x0000004d 404#define EVENT_HOSTWAKE_STAIE 0x0000004d
405#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
388 406
389#define EVENT_ID_MASK 0xffff 407#define EVENT_ID_MASK 0xffff
390#define BSS_NUM_MASK 0xf 408#define BSS_NUM_MASK 0xf
@@ -424,10 +442,10 @@ struct txpd {
424struct rxpd { 442struct rxpd {
425 u8 bss_type; 443 u8 bss_type;
426 u8 bss_num; 444 u8 bss_num;
427 u16 rx_pkt_length; 445 __le16 rx_pkt_length;
428 u16 rx_pkt_offset; 446 __le16 rx_pkt_offset;
429 u16 rx_pkt_type; 447 __le16 rx_pkt_type;
430 u16 seq_num; 448 __le16 seq_num;
431 u8 priority; 449 u8 priority;
432 u8 rx_rate; 450 u8 rx_rate;
433 s8 snr; 451 s8 snr;
@@ -439,6 +457,31 @@ struct rxpd {
439 u8 reserved; 457 u8 reserved;
440} __packed; 458} __packed;
441 459
460struct uap_txpd {
461 u8 bss_type;
462 u8 bss_num;
463 __le16 tx_pkt_length;
464 __le16 tx_pkt_offset;
465 __le16 tx_pkt_type;
466 __le32 tx_control;
467 u8 priority;
468 u8 flags;
469 u8 pkt_delay_2ms;
470 u8 reserved1;
471 __le32 reserved2;
472};
473
474struct uap_rxpd {
475 u8 bss_type;
476 u8 bss_num;
477 __le16 rx_pkt_length;
478 __le16 rx_pkt_offset;
479 __le16 rx_pkt_type;
480 __le16 seq_num;
481 u8 priority;
482 u8 reserved1;
483};
484
442enum mwifiex_chan_scan_mode_bitmasks { 485enum mwifiex_chan_scan_mode_bitmasks {
443 MWIFIEX_PASSIVE_SCAN = BIT(0), 486 MWIFIEX_PASSIVE_SCAN = BIT(0),
444 MWIFIEX_DISABLE_CHAN_FILT = BIT(1), 487 MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +601,13 @@ struct mwifiex_ie_type_key_param_set {
558 u8 key[50]; 601 u8 key[50];
559} __packed; 602} __packed;
560 603
604#define IGTK_PN_LEN 8
605
606struct mwifiex_cmac_param {
607 u8 ipn[IGTK_PN_LEN];
608 u8 key[WLAN_KEY_LEN_AES_CMAC];
609} __packed;
610
561struct host_cmd_ds_802_11_key_material { 611struct host_cmd_ds_802_11_key_material {
562 __le16 action; 612 __le16 action;
563 struct mwifiex_ie_type_key_param_set key_param_set; 613 struct mwifiex_ie_type_key_param_set key_param_set;
@@ -1250,6 +1300,11 @@ struct host_cmd_tlv_ssid {
1250 u8 ssid[0]; 1300 u8 ssid[0];
1251} __packed; 1301} __packed;
1252 1302
1303struct host_cmd_tlv_rates {
1304 struct host_cmd_tlv tlv;
1305 u8 rates[0];
1306} __packed;
1307
1253struct host_cmd_tlv_bcast_ssid { 1308struct host_cmd_tlv_bcast_ssid {
1254 struct host_cmd_tlv tlv; 1309 struct host_cmd_tlv tlv;
1255 u8 bcast_ctl; 1310 u8 bcast_ctl;
@@ -1291,11 +1346,35 @@ struct host_cmd_tlv_channel_band {
1291 u8 channel; 1346 u8 channel;
1292} __packed; 1347} __packed;
1293 1348
1349struct host_cmd_tlv_ageout_timer {
1350 struct host_cmd_tlv tlv;
1351 __le32 sta_ao_timer;
1352} __packed;
1353
1294struct host_cmd_ds_version_ext { 1354struct host_cmd_ds_version_ext {
1295 u8 version_str_sel; 1355 u8 version_str_sel;
1296 char version_str[128]; 1356 char version_str[128];
1297} __packed; 1357} __packed;
1298 1358
1359struct host_cmd_ds_mgmt_frame_reg {
1360 __le16 action;
1361 __le32 mask;
1362} __packed;
1363
1364struct host_cmd_ds_p2p_mode_cfg {
1365 __le16 action;
1366 __le16 mode;
1367} __packed;
1368
1369struct host_cmd_ds_remain_on_chan {
1370 __le16 action;
1371 u8 status;
1372 u8 reserved;
1373 u8 band_cfg;
1374 u8 channel;
1375 __le32 duration;
1376} __packed;
1377
1299struct host_cmd_ds_802_11_ibss_status { 1378struct host_cmd_ds_802_11_ibss_status {
1300 __le16 action; 1379 __le16 action;
1301 __le16 enable; 1380 __le16 enable;
@@ -1307,6 +1386,7 @@ struct host_cmd_ds_802_11_ibss_status {
1307 1386
1308#define CONNECTION_TYPE_INFRA 0 1387#define CONNECTION_TYPE_INFRA 0
1309#define CONNECTION_TYPE_ADHOC 1 1388#define CONNECTION_TYPE_ADHOC 1
1389#define CONNECTION_TYPE_AP 2
1310 1390
1311struct host_cmd_ds_set_bss_mode { 1391struct host_cmd_ds_set_bss_mode {
1312 u8 con_type; 1392 u8 con_type;
@@ -1404,6 +1484,9 @@ struct host_cmd_ds_command {
1404 struct host_cmd_ds_wmm_get_status get_wmm_status; 1484 struct host_cmd_ds_wmm_get_status get_wmm_status;
1405 struct host_cmd_ds_802_11_key_material key_material; 1485 struct host_cmd_ds_802_11_key_material key_material;
1406 struct host_cmd_ds_version_ext verext; 1486 struct host_cmd_ds_version_ext verext;
1487 struct host_cmd_ds_mgmt_frame_reg reg_mask;
1488 struct host_cmd_ds_remain_on_chan roc_cfg;
1489 struct host_cmd_ds_p2p_mode_cfg mode_cfg;
1407 struct host_cmd_ds_802_11_ibss_status ibss_coalescing; 1490 struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
1408 struct host_cmd_ds_mac_reg_access mac_reg; 1491 struct host_cmd_ds_mac_reg_access mac_reg;
1409 struct host_cmd_ds_bbp_reg_access bbp_reg; 1492 struct host_cmd_ds_bbp_reg_access bbp_reg;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 1d8dd003e396..e38342f86c51 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -114,9 +114,6 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
114 cpu_to_le16(mask); 114 cpu_to_le16(mask);
115 115
116 ie->ie_index = cpu_to_le16(index); 116 ie->ie_index = cpu_to_le16(index);
117 ie->ie_length = priv->mgmt_ie[index].ie_length;
118 memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
119 le16_to_cpu(priv->mgmt_ie[index].ie_length));
120 } else { 117 } else {
121 if (mask != MWIFIEX_DELETE_MASK) 118 if (mask != MWIFIEX_DELETE_MASK)
122 return -1; 119 return -1;
@@ -160,7 +157,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
160 u16 len; 157 u16 len;
161 int ret; 158 int ret;
162 159
163 ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 160 ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
164 if (!ap_custom_ie) 161 if (!ap_custom_ie)
165 return -ENOMEM; 162 return -ENOMEM;
166 163
@@ -214,30 +211,35 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
214 return ret; 211 return ret;
215} 212}
216 213
217/* This function checks if WPS IE is present in passed buffer and copies it to 214/* This function checks if the vendor specified IE is present in passed buffer
218 * mwifiex_ie structure. 215 * and copies it to mwifiex_ie structure.
219 * Function takes pointer to struct mwifiex_ie pointer as argument. 216 * Function takes pointer to struct mwifiex_ie pointer as argument.
220 * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled 217 * If the vendor specified IE is present then memory is allocated for
221 * in with WPS IE. Caller should take care of freeing this memory. 218 * mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
219 * this memory.
222 */ 220 */
223static int mwifiex_update_wps_ie(const u8 *ies, int ies_len, 221static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
224 struct mwifiex_ie **ie_ptr, u16 mask) 222 struct mwifiex_ie **ie_ptr, u16 mask,
223 unsigned int oui, u8 oui_type)
225{ 224{
226 struct ieee_types_header *wps_ie; 225 struct ieee_types_header *vs_ie;
227 struct mwifiex_ie *ie = NULL; 226 struct mwifiex_ie *ie = *ie_ptr;
228 const u8 *vendor_ie; 227 const u8 *vendor_ie;
229 228
230 vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 229 vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
231 WLAN_OUI_TYPE_MICROSOFT_WPS,
232 ies, ies_len);
233 if (vendor_ie) { 230 if (vendor_ie) {
234 ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 231 if (!*ie_ptr) {
235 if (!ie) 232 *ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
236 return -ENOMEM; 233 GFP_KERNEL);
234 if (!*ie_ptr)
235 return -ENOMEM;
236 ie = *ie_ptr;
237 }
237 238
238 wps_ie = (struct ieee_types_header *)vendor_ie; 239 vs_ie = (struct ieee_types_header *)vendor_ie;
239 memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2); 240 memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
240 ie->ie_length = cpu_to_le16(wps_ie->len + 2); 241 vs_ie, vs_ie->len + 2);
242 le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
241 ie->mgmt_subtype_mask = cpu_to_le16(mask); 243 ie->mgmt_subtype_mask = cpu_to_le16(mask);
242 ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK); 244 ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
243 } 245 }
@@ -257,20 +259,40 @@ static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
257 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK; 259 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
258 int ret = 0; 260 int ret = 0;
259 261
260 if (data->beacon_ies && data->beacon_ies_len) 262 if (data->beacon_ies && data->beacon_ies_len) {
261 mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len, 263 mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
262 &beacon_ie, MGMT_MASK_BEACON); 264 &beacon_ie, MGMT_MASK_BEACON,
265 WLAN_OUI_MICROSOFT,
266 WLAN_OUI_TYPE_MICROSOFT_WPS);
267 mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
268 &beacon_ie, MGMT_MASK_BEACON,
269 WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
270 }
263 271
264 if (data->proberesp_ies && data->proberesp_ies_len) 272 if (data->proberesp_ies && data->proberesp_ies_len) {
265 mwifiex_update_wps_ie(data->proberesp_ies, 273 mwifiex_update_vs_ie(data->proberesp_ies,
266 data->proberesp_ies_len, &pr_ie, 274 data->proberesp_ies_len, &pr_ie,
267 MGMT_MASK_PROBE_RESP); 275 MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
276 WLAN_OUI_TYPE_MICROSOFT_WPS);
277 mwifiex_update_vs_ie(data->proberesp_ies,
278 data->proberesp_ies_len, &pr_ie,
279 MGMT_MASK_PROBE_RESP,
280 WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
281 }
268 282
269 if (data->assocresp_ies && data->assocresp_ies_len) 283 if (data->assocresp_ies && data->assocresp_ies_len) {
270 mwifiex_update_wps_ie(data->assocresp_ies, 284 mwifiex_update_vs_ie(data->assocresp_ies,
271 data->assocresp_ies_len, &ar_ie, 285 data->assocresp_ies_len, &ar_ie,
272 MGMT_MASK_ASSOC_RESP | 286 MGMT_MASK_ASSOC_RESP |
273 MGMT_MASK_REASSOC_RESP); 287 MGMT_MASK_REASSOC_RESP,
288 WLAN_OUI_MICROSOFT,
289 WLAN_OUI_TYPE_MICROSOFT_WPS);
290 mwifiex_update_vs_ie(data->assocresp_ies,
291 data->assocresp_ies_len, &ar_ie,
292 MGMT_MASK_ASSOC_RESP |
293 MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
294 WLAN_OUI_TYPE_WFA_P2P);
295 }
274 296
275 if (beacon_ie || pr_ie || ar_ie) { 297 if (beacon_ie || pr_ie || ar_ie) {
276 ret = mwifiex_update_uap_custom_ie(priv, beacon_ie, 298 ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 21fdc6c02775..b5d37a8caa09 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -64,60 +64,77 @@ static void scan_delay_timer_fn(unsigned long data)
64 struct cmd_ctrl_node *cmd_node, *tmp_node; 64 struct cmd_ctrl_node *cmd_node, *tmp_node;
65 unsigned long flags; 65 unsigned long flags;
66 66
67 if (!mwifiex_wmm_lists_empty(adapter)) { 67 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
68 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { 68 /*
69 * Abort scan operation by cancelling all pending scan
70 * commands
71 */
72 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
73 list_for_each_entry_safe(cmd_node, tmp_node,
74 &adapter->scan_pending_q, list) {
75 list_del(&cmd_node->list);
76 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
77 }
78 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
79
80 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
81 adapter->scan_processing = false;
82 adapter->scan_delay_cnt = 0;
83 adapter->empty_tx_q_cnt = 0;
84 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
85
86 if (priv->user_scan_cfg) {
87 dev_dbg(priv->adapter->dev,
88 "info: %s: scan aborted\n", __func__);
89 cfg80211_scan_done(priv->scan_request, 1);
90 priv->scan_request = NULL;
91 kfree(priv->user_scan_cfg);
92 priv->user_scan_cfg = NULL;
93 }
94
95 if (priv->scan_pending_on_block) {
96 priv->scan_pending_on_block = false;
97 up(&priv->async_sem);
98 }
99 goto done;
100 }
101
102 if (!atomic_read(&priv->adapter->is_tx_received)) {
103 adapter->empty_tx_q_cnt++;
104 if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
69 /* 105 /*
70 * Abort scan operation by cancelling all pending scan 106 * No Tx traffic for 200msec. Get scan command from
71 * command 107 * scan pending queue and put to cmd pending queue to
108 * resume scan operation
72 */ 109 */
110 adapter->scan_delay_cnt = 0;
111 adapter->empty_tx_q_cnt = 0;
73 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 112 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
74 list_for_each_entry_safe(cmd_node, tmp_node, 113 cmd_node = list_first_entry(&adapter->scan_pending_q,
75 &adapter->scan_pending_q, 114 struct cmd_ctrl_node, list);
76 list) { 115 list_del(&cmd_node->list);
77 list_del(&cmd_node->list);
78 cmd_node->wait_q_enabled = false;
79 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
80 }
81 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 116 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
82 flags); 117 flags);
83 118
84 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 119 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
85 adapter->scan_processing = false; 120 true);
86 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, 121 queue_work(adapter->workqueue, &adapter->main_work);
87 flags); 122 goto done;
88
89 if (priv->user_scan_cfg) {
90 dev_dbg(priv->adapter->dev,
91 "info: %s: scan aborted\n", __func__);
92 cfg80211_scan_done(priv->scan_request, 1);
93 priv->scan_request = NULL;
94 kfree(priv->user_scan_cfg);
95 priv->user_scan_cfg = NULL;
96 }
97 } else {
98 /*
99 * Tx data queue is still not empty, delay scan
100 * operation further by 20msec.
101 */
102 mod_timer(&priv->scan_delay_timer, jiffies +
103 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
104 adapter->scan_delay_cnt++;
105 } 123 }
106 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
107 } else { 124 } else {
108 /* 125 adapter->empty_tx_q_cnt = 0;
109 * Tx data queue is empty. Get scan command from scan_pending_q
110 * and put to cmd_pending_q to resume scan operation
111 */
112 adapter->scan_delay_cnt = 0;
113 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
114 cmd_node = list_first_entry(&adapter->scan_pending_q,
115 struct cmd_ctrl_node, list);
116 list_del(&cmd_node->list);
117 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
118
119 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
120 } 126 }
127
128 /* Delay scan operation further by 20msec */
129 mod_timer(&priv->scan_delay_timer, jiffies +
130 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
131 adapter->scan_delay_cnt++;
132
133done:
134 if (atomic_read(&priv->adapter->is_tx_received))
135 atomic_set(&priv->adapter->is_tx_received, false);
136
137 return;
121} 138}
122 139
123/* 140/*
@@ -127,7 +144,7 @@ static void scan_delay_timer_fn(unsigned long data)
127 * Additionally, it also initializes all the locks and sets up all the 144 * Additionally, it also initializes all the locks and sets up all the
128 * lists. 145 * lists.
129 */ 146 */
130static int mwifiex_init_priv(struct mwifiex_private *priv) 147int mwifiex_init_priv(struct mwifiex_private *priv)
131{ 148{
132 u32 i; 149 u32 i;
133 150
@@ -196,6 +213,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
196 priv->curr_bcn_size = 0; 213 priv->curr_bcn_size = 0;
197 priv->wps_ie = NULL; 214 priv->wps_ie = NULL;
198 priv->wps_ie_len = 0; 215 priv->wps_ie_len = 0;
216 priv->ap_11n_enabled = 0;
217 memset(&priv->roc_cfg, 0, sizeof(priv->roc_cfg));
199 218
200 priv->scan_block = false; 219 priv->scan_block = false;
201 220
@@ -345,6 +364,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
345 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); 364 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
346 adapter->arp_filter_size = 0; 365 adapter->arp_filter_size = 0;
347 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; 366 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
367 adapter->empty_tx_q_cnt = 0;
348} 368}
349 369
350/* 370/*
@@ -410,6 +430,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
410 list_del(&priv->wmm.tid_tbl_ptr[j].ra_list); 430 list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
411 list_del(&priv->tx_ba_stream_tbl_ptr); 431 list_del(&priv->tx_ba_stream_tbl_ptr);
412 list_del(&priv->rx_reorder_tbl_ptr); 432 list_del(&priv->rx_reorder_tbl_ptr);
433 list_del(&priv->sta_list);
413 } 434 }
414 } 435 }
415} 436}
@@ -472,6 +493,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
472 spin_lock_init(&priv->rx_pkt_lock); 493 spin_lock_init(&priv->rx_pkt_lock);
473 spin_lock_init(&priv->wmm.ra_list_spinlock); 494 spin_lock_init(&priv->wmm.ra_list_spinlock);
474 spin_lock_init(&priv->curr_bcn_buf_lock); 495 spin_lock_init(&priv->curr_bcn_buf_lock);
496 spin_lock_init(&priv->sta_list_spinlock);
475 } 497 }
476 } 498 }
477 499
@@ -504,6 +526,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
504 } 526 }
505 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr); 527 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
506 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 528 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
529 INIT_LIST_HEAD(&priv->sta_list);
507 530
508 spin_lock_init(&priv->tx_ba_stream_tbl_lock); 531 spin_lock_init(&priv->tx_ba_stream_tbl_lock);
509 spin_lock_init(&priv->rx_reorder_tbl_lock); 532 spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -626,6 +649,17 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
626} 649}
627 650
628/* 651/*
652 * This function frees the private structure, including cleans
653 * up the TX and RX queues and frees the BSS priority tables.
654 */
655void mwifiex_free_priv(struct mwifiex_private *priv)
656{
657 mwifiex_clean_txrx(priv);
658 mwifiex_delete_bss_prio_tbl(priv);
659 mwifiex_free_curr_bcn(priv);
660}
661
662/*
629 * This function is used to shutdown the driver. 663 * This function is used to shutdown the driver.
630 * 664 *
631 * The following operations are performed sequentially - 665 * The following operations are performed sequentially -
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 50191539bb32..4e31c6013ebe 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -81,7 +81,11 @@ struct wep_key {
81 81
82#define KEY_MGMT_ON_HOST 0x03 82#define KEY_MGMT_ON_HOST 0x03
83#define MWIFIEX_AUTH_MODE_AUTO 0xFF 83#define MWIFIEX_AUTH_MODE_AUTO 0xFF
84#define BAND_CONFIG_MANUAL 0x00 84#define BAND_CONFIG_BG 0x00
85#define BAND_CONFIG_A 0x01
86#define MWIFIEX_SUPPORTED_RATES 14
87#define MWIFIEX_SUPPORTED_RATES_EXT 32
88
85struct mwifiex_uap_bss_param { 89struct mwifiex_uap_bss_param {
86 u8 channel; 90 u8 channel;
87 u8 band_cfg; 91 u8 band_cfg;
@@ -100,6 +104,9 @@ struct mwifiex_uap_bss_param {
100 struct wpa_param wpa_cfg; 104 struct wpa_param wpa_cfg;
101 struct wep_key wep_cfg[NUM_WEP_KEYS]; 105 struct wep_key wep_cfg[NUM_WEP_KEYS];
102 struct ieee80211_ht_cap ht_cap; 106 struct ieee80211_ht_cap ht_cap;
107 u8 rates[MWIFIEX_SUPPORTED_RATES];
108 u32 sta_ao_timer;
109 u32 ps_sta_ao_timer;
103}; 110};
104 111
105enum { 112enum {
@@ -213,7 +220,7 @@ struct mwifiex_debug_info {
213}; 220};
214 221
215#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000 222#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000
216#define WAPI_RXPN_LEN 16 223#define PN_LEN 16
217 224
218struct mwifiex_ds_encrypt_key { 225struct mwifiex_ds_encrypt_key {
219 u32 key_disable; 226 u32 key_disable;
@@ -222,7 +229,8 @@ struct mwifiex_ds_encrypt_key {
222 u8 key_material[WLAN_MAX_KEY_LEN]; 229 u8 key_material[WLAN_MAX_KEY_LEN];
223 u8 mac_addr[ETH_ALEN]; 230 u8 mac_addr[ETH_ALEN];
224 u32 is_wapi_key; 231 u32 is_wapi_key;
225 u8 wapi_rxpn[WAPI_RXPN_LEN]; 232 u8 pn[PN_LEN]; /* packet number */
233 u8 is_igtk_key;
226}; 234};
227 235
228struct mwifiex_power_cfg { 236struct mwifiex_power_cfg {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 46803621d015..eb22dd248d54 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -72,7 +72,6 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
72 goto error; 72 goto error;
73 73
74 adapter->priv[i]->adapter = adapter; 74 adapter->priv[i]->adapter = adapter;
75 adapter->priv[i]->bss_priority = i;
76 adapter->priv_num++; 75 adapter->priv_num++;
77 } 76 }
78 mwifiex_init_lock_list(adapter); 77 mwifiex_init_lock_list(adapter);
@@ -370,6 +369,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
370 dev_err(adapter->dev, "cannot create default AP interface\n"); 369 dev_err(adapter->dev, "cannot create default AP interface\n");
371 goto err_add_intf; 370 goto err_add_intf;
372 } 371 }
372
373 /* Create P2P interface by default */
374 if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
375 NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
376 dev_err(adapter->dev, "cannot create default P2P interface\n");
377 goto err_add_intf;
378 }
373 rtnl_unlock(); 379 rtnl_unlock();
374 380
375 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); 381 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -470,6 +476,27 @@ mwifiex_close(struct net_device *dev)
470} 476}
471 477
472/* 478/*
479 * Add buffer into wmm tx queue and queue work to transmit it.
480 */
481int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
482{
483 mwifiex_wmm_add_buf_txqueue(priv, skb);
484 atomic_inc(&priv->adapter->tx_pending);
485
486 if (priv->adapter->scan_delay_cnt)
487 atomic_set(&priv->adapter->is_tx_received, true);
488
489 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
490 mwifiex_set_trans_start(priv->netdev);
491 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
492 }
493
494 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
495
496 return 0;
497}
498
499/*
473 * CFG802.11 network device handler for data transmission. 500 * CFG802.11 network device handler for data transmission.
474 */ 501 */
475static int 502static int
@@ -517,15 +544,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
517 tx_info->bss_type = priv->bss_type; 544 tx_info->bss_type = priv->bss_type;
518 mwifiex_fill_buffer(skb); 545 mwifiex_fill_buffer(skb);
519 546
520 mwifiex_wmm_add_buf_txqueue(priv, skb); 547 mwifiex_queue_tx_pkt(priv, skb);
521 atomic_inc(&priv->adapter->tx_pending);
522
523 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
524 mwifiex_set_trans_start(dev);
525 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
526 }
527
528 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
529 548
530 return 0; 549 return 0;
531} 550}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e7c2a82fd610..bfb3fa69805c 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -88,13 +88,18 @@ enum {
88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) 88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
89 89
90#define MWIFIEX_MAX_SCAN_DELAY_CNT 50 90#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
91#define MWIFIEX_MAX_EMPTY_TX_Q_CNT 10
91#define MWIFIEX_SCAN_DELAY_MSEC 20 92#define MWIFIEX_SCAN_DELAY_MSEC 20
92 93
94#define MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN 2
95
93#define RSN_GTK_OUI_OFFSET 2 96#define RSN_GTK_OUI_OFFSET 2
94 97
95#define MWIFIEX_OUI_NOT_PRESENT 0 98#define MWIFIEX_OUI_NOT_PRESENT 0
96#define MWIFIEX_OUI_PRESENT 1 99#define MWIFIEX_OUI_PRESENT 1
97 100
101#define PKT_TYPE_MGMT 0xE5
102
98/* 103/*
99 * Do not check for data_received for USB, as data_received 104 * Do not check for data_received for USB, as data_received
100 * is handled in mwifiex_usb_recv for USB 105 * is handled in mwifiex_usb_recv for USB
@@ -115,6 +120,7 @@ enum {
115#define MAX_BITMAP_RATES_SIZE 10 120#define MAX_BITMAP_RATES_SIZE 10
116 121
117#define MAX_CHANNEL_BAND_BG 14 122#define MAX_CHANNEL_BAND_BG 14
123#define MAX_CHANNEL_BAND_A 165
118 124
119#define MAX_FREQUENCY_BAND_BG 2484 125#define MAX_FREQUENCY_BAND_BG 2484
120 126
@@ -199,6 +205,9 @@ struct mwifiex_ra_list_tbl {
199 u8 ra[ETH_ALEN]; 205 u8 ra[ETH_ALEN];
200 u32 total_pkts_size; 206 u32 total_pkts_size;
201 u32 is_11n_enabled; 207 u32 is_11n_enabled;
208 u16 max_amsdu;
209 u16 pkt_count;
210 u8 ba_packet_thr;
202}; 211};
203 212
204struct mwifiex_tid_tbl { 213struct mwifiex_tid_tbl {
@@ -245,10 +254,6 @@ struct ieee_types_header {
245 u8 len; 254 u8 len;
246} __packed; 255} __packed;
247 256
248#define MWIFIEX_SUPPORTED_RATES 14
249
250#define MWIFIEX_SUPPORTED_RATES_EXT 32
251
252struct ieee_types_vendor_specific { 257struct ieee_types_vendor_specific {
253 struct ieee_types_vendor_header vend_hdr; 258 struct ieee_types_vendor_header vend_hdr;
254 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)]; 259 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
@@ -365,6 +370,12 @@ struct wps {
365 u8 session_enable; 370 u8 session_enable;
366}; 371};
367 372
373struct mwifiex_roc_cfg {
374 u64 cookie;
375 struct ieee80211_channel chan;
376 enum nl80211_channel_type chan_type;
377};
378
368struct mwifiex_adapter; 379struct mwifiex_adapter;
369struct mwifiex_private; 380struct mwifiex_private;
370 381
@@ -431,6 +442,9 @@ struct mwifiex_private {
431 u8 wmm_enabled; 442 u8 wmm_enabled;
432 u8 wmm_qosinfo; 443 u8 wmm_qosinfo;
433 struct mwifiex_wmm_desc wmm; 444 struct mwifiex_wmm_desc wmm;
445 struct list_head sta_list;
446 /* spin lock for associated station list */
447 spinlock_t sta_list_spinlock;
434 struct list_head tx_ba_stream_tbl_ptr; 448 struct list_head tx_ba_stream_tbl_ptr;
435 /* spin lock for tx_ba_stream_tbl_ptr queue */ 449 /* spin lock for tx_ba_stream_tbl_ptr queue */
436 spinlock_t tx_ba_stream_tbl_lock; 450 spinlock_t tx_ba_stream_tbl_lock;
@@ -480,12 +494,16 @@ struct mwifiex_private {
480 s32 cqm_rssi_thold; 494 s32 cqm_rssi_thold;
481 u32 cqm_rssi_hyst; 495 u32 cqm_rssi_hyst;
482 u8 subsc_evt_rssi_state; 496 u8 subsc_evt_rssi_state;
497 struct mwifiex_ds_misc_subsc_evt async_subsc_evt_storage;
483 struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX]; 498 struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
484 u16 beacon_idx; 499 u16 beacon_idx;
485 u16 proberesp_idx; 500 u16 proberesp_idx;
486 u16 assocresp_idx; 501 u16 assocresp_idx;
487 u16 rsn_idx; 502 u16 rsn_idx;
488 struct timer_list scan_delay_timer; 503 struct timer_list scan_delay_timer;
504 u8 ap_11n_enabled;
505 u32 mgmt_frame_mask;
506 struct mwifiex_roc_cfg roc_cfg;
489}; 507};
490 508
491enum mwifiex_ba_status { 509enum mwifiex_ba_status {
@@ -517,6 +535,7 @@ struct mwifiex_rx_reorder_tbl {
517 int win_size; 535 int win_size;
518 void **rx_reorder_ptr; 536 void **rx_reorder_ptr;
519 struct reorder_tmr_cnxt timer_context; 537 struct reorder_tmr_cnxt timer_context;
538 u8 flags;
520}; 539};
521 540
522struct mwifiex_bss_prio_node { 541struct mwifiex_bss_prio_node {
@@ -550,6 +569,19 @@ struct mwifiex_bss_priv {
550 u64 fw_tsf; 569 u64 fw_tsf;
551}; 570};
552 571
572/* This is AP specific structure which stores information
573 * about associated STA
574 */
575struct mwifiex_sta_node {
576 struct list_head list;
577 u8 mac_addr[ETH_ALEN];
578 u8 is_wmm_enabled;
579 u8 is_11n_enabled;
580 u8 ampdu_sta[MAX_NUM_TID];
581 u16 rx_seq[MAX_NUM_TID];
582 u16 max_amsdu;
583};
584
553struct mwifiex_if_ops { 585struct mwifiex_if_ops {
554 int (*init_if) (struct mwifiex_adapter *); 586 int (*init_if) (struct mwifiex_adapter *);
555 void (*cleanup_if) (struct mwifiex_adapter *); 587 void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +722,9 @@ struct mwifiex_adapter {
690 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 722 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
691 u16 max_mgmt_ie_index; 723 u16 max_mgmt_ie_index;
692 u8 scan_delay_cnt; 724 u8 scan_delay_cnt;
725 u8 empty_tx_q_cnt;
726 atomic_t is_tx_received;
727 atomic_t pending_bridged_pkts;
693}; 728};
694 729
695int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 730int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -702,6 +737,9 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
702void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, 737void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
703 struct mwifiex_adapter *adapter); 738 struct mwifiex_adapter *adapter);
704 739
740int mwifiex_init_priv(struct mwifiex_private *priv);
741void mwifiex_free_priv(struct mwifiex_private *priv);
742
705int mwifiex_init_fw(struct mwifiex_adapter *adapter); 743int mwifiex_init_fw(struct mwifiex_adapter *adapter);
706 744
707int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); 745int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -714,6 +752,9 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
714 752
715int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb); 753int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
716 754
755int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
756 struct sk_buff *skb);
757
717int mwifiex_process_event(struct mwifiex_adapter *adapter); 758int mwifiex_process_event(struct mwifiex_adapter *adapter);
718 759
719int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, 760int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
@@ -780,8 +821,17 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
780 struct host_cmd_ds_command *resp); 821 struct host_cmd_ds_command *resp);
781int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *, 822int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
782 struct sk_buff *skb); 823 struct sk_buff *skb);
824int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
825 struct sk_buff *skb);
826int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
827 struct sk_buff *skb);
783int mwifiex_process_sta_event(struct mwifiex_private *); 828int mwifiex_process_sta_event(struct mwifiex_private *);
829int mwifiex_process_uap_event(struct mwifiex_private *);
830struct mwifiex_sta_node *
831mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
832void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
784void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); 833void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
834void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
785int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta); 835int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
786int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, 836int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
787 struct mwifiex_scan_cmd_config *scan_cfg); 837 struct mwifiex_scan_cmd_config *scan_cfg);
@@ -840,6 +890,8 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
840void mwifiex_set_ht_params(struct mwifiex_private *priv, 890void mwifiex_set_ht_params(struct mwifiex_private *priv,
841 struct mwifiex_uap_bss_param *bss_cfg, 891 struct mwifiex_uap_bss_param *bss_cfg,
842 struct cfg80211_ap_settings *params); 892 struct cfg80211_ap_settings *params);
893void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
894 struct cfg80211_ap_settings *params);
843 895
844/* 896/*
845 * This function checks if the queuing is RA based or not. 897 * This function checks if the queuing is RA based or not.
@@ -925,6 +977,14 @@ mwifiex_netdev_get_priv(struct net_device *dev)
925 return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev)); 977 return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
926} 978}
927 979
980/*
981 * This function checks if a skb holds a management frame.
982 */
983static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
984{
985 return (*(u32 *)skb->data == PKT_TYPE_MGMT);
986}
987
928int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, 988int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
929 u32 func_init_shutdown); 989 u32 func_init_shutdown);
930int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); 990int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -949,14 +1009,21 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
949 const struct mwifiex_user_scan_cfg *user_scan_in); 1009 const struct mwifiex_user_scan_cfg *user_scan_in);
950int mwifiex_set_radio(struct mwifiex_private *priv, u8 option); 1010int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
951 1011
952int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key, 1012int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
953 int key_len, u8 key_index, const u8 *mac_addr, 1013 const u8 *key, int key_len, u8 key_index,
954 int disable); 1014 const u8 *mac_addr, int disable);
955 1015
956int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len); 1016int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
957 1017
958int mwifiex_get_ver_ext(struct mwifiex_private *priv); 1018int mwifiex_get_ver_ext(struct mwifiex_private *priv);
959 1019
1020int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1021 struct ieee80211_channel *chan,
1022 enum nl80211_channel_type *channel_type,
1023 unsigned int duration);
1024
1025int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
1026
960int mwifiex_get_stats_info(struct mwifiex_private *priv, 1027int mwifiex_get_stats_info(struct mwifiex_private *priv,
961 struct mwifiex_ds_get_stats *log); 1028 struct mwifiex_ds_get_stats *log);
962 1029
@@ -987,6 +1054,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
987 1054
988int mwifiex_main_process(struct mwifiex_adapter *); 1055int mwifiex_main_process(struct mwifiex_adapter *);
989 1056
1057int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb);
1058
990int mwifiex_get_bss_info(struct mwifiex_private *, 1059int mwifiex_get_bss_info(struct mwifiex_private *,
991 struct mwifiex_bss_info *); 1060 struct mwifiex_bss_info *);
992int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 1061int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -997,8 +1066,10 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
997int mwifiex_check_network_compatibility(struct mwifiex_private *priv, 1066int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
998 struct mwifiex_bssdescriptor *bss_desc); 1067 struct mwifiex_bssdescriptor *bss_desc);
999 1068
1069u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
1070
1000struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, 1071struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1001 char *name, 1072 const char *name,
1002 enum nl80211_iftype type, 1073 enum nl80211_iftype type,
1003 u32 *flags, 1074 u32 *flags,
1004 struct vif_params *params); 1075 struct vif_params *params);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 04dc7ca4ac22..e36a75988f87 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -614,9 +614,8 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
614 614
615 /* Increment the TLV header length by the size 615 /* Increment the TLV header length by the size
616 appended */ 616 appended */
617 chan_tlv_out->header.len = 617 le16_add_cpu(&chan_tlv_out->header.len,
618 cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) + 618 sizeof(chan_tlv_out->chan_scan_param));
619 (sizeof(chan_tlv_out->chan_scan_param)));
620 619
621 /* 620 /*
622 * The tlv buffer length is set to the number of bytes 621 * The tlv buffer length is set to the number of bytes
@@ -726,7 +725,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
726 struct mwifiex_ie_types_num_probes *num_probes_tlv; 725 struct mwifiex_ie_types_num_probes *num_probes_tlv;
727 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; 726 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
728 struct mwifiex_ie_types_rates_param_set *rates_tlv; 727 struct mwifiex_ie_types_rates_param_set *rates_tlv;
729 const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
730 u8 *tlv_pos; 728 u8 *tlv_pos;
731 u32 num_probes; 729 u32 num_probes;
732 u32 ssid_len; 730 u32 ssid_len;
@@ -840,8 +838,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
840 * or BSSID filter applied to the scan results in the firmware. 838 * or BSSID filter applied to the scan results in the firmware.
841 */ 839 */
842 if ((i && ssid_filter) || 840 if ((i && ssid_filter) ||
843 memcmp(scan_cfg_out->specific_bssid, &zero_mac, 841 !is_zero_ether_addr(scan_cfg_out->specific_bssid))
844 sizeof(zero_mac)))
845 *filtered_scan = true; 842 *filtered_scan = true;
846 } else { 843 } else {
847 scan_cfg_out->bss_mode = (u8) adapter->scan_mode; 844 scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
@@ -989,6 +986,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
989 *max_chan_per_scan = 2; 986 *max_chan_per_scan = 2;
990 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD) 987 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
991 *max_chan_per_scan = 3; 988 *max_chan_per_scan = 3;
989 else
990 *max_chan_per_scan = 4;
992 } 991 }
993} 992}
994 993
@@ -1433,9 +1432,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
1433 if (ret) 1432 if (ret)
1434 dev_err(priv->adapter->dev, "cannot find ssid " 1433 dev_err(priv->adapter->dev, "cannot find ssid "
1435 "%s\n", bss_desc->ssid.ssid); 1434 "%s\n", bss_desc->ssid.ssid);
1436 break; 1435 break;
1437 default: 1436 default:
1438 ret = 0; 1437 ret = 0;
1439 } 1438 }
1440 } 1439 }
1441 1440
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index df3a33c530cf..5d87195390f8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -551,7 +551,6 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
551 struct host_cmd_tlv_mac_addr *tlv_mac; 551 struct host_cmd_tlv_mac_addr *tlv_mac;
552 u16 key_param_len = 0, cmd_size; 552 u16 key_param_len = 0, cmd_size;
553 int ret = 0; 553 int ret = 0;
554 const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
555 554
556 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL); 555 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
557 key_material->action = cpu_to_le16(cmd_action); 556 key_material->action = cpu_to_le16(cmd_action);
@@ -593,7 +592,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
593 /* set 0 when re-key */ 592 /* set 0 when re-key */
594 key_material->key_param_set.key[1] = 0; 593 key_material->key_param_set.key[1] = 0;
595 594
596 if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) { 595 if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
597 /* WAPI pairwise key: unicast */ 596 /* WAPI pairwise key: unicast */
598 key_material->key_param_set.key_info |= 597 key_material->key_param_set.key_info |=
599 cpu_to_le16(KEY_UNICAST); 598 cpu_to_le16(KEY_UNICAST);
@@ -610,7 +609,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
610 memcpy(&key_material->key_param_set.key[2], 609 memcpy(&key_material->key_param_set.key[2],
611 enc_key->key_material, enc_key->key_len); 610 enc_key->key_material, enc_key->key_len);
612 memcpy(&key_material->key_param_set.key[2 + enc_key->key_len], 611 memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
613 enc_key->wapi_rxpn, WAPI_RXPN_LEN); 612 enc_key->pn, PN_LEN);
614 key_material->key_param_set.length = 613 key_material->key_param_set.length =
615 cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN); 614 cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
616 615
@@ -621,23 +620,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
621 return ret; 620 return ret;
622 } 621 }
623 if (enc_key->key_len == WLAN_KEY_LEN_CCMP) { 622 if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
624 dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n"); 623 if (enc_key->is_igtk_key) {
625 key_material->key_param_set.key_type_id = 624 dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
625 key_material->key_param_set.key_type_id =
626 cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
627 if (cmd_oid == KEY_INFO_ENABLED)
628 key_material->key_param_set.key_info =
629 cpu_to_le16(KEY_ENABLED);
630 else
631 key_material->key_param_set.key_info =
632 cpu_to_le16(!KEY_ENABLED);
633
634 key_material->key_param_set.key_info |=
635 cpu_to_le16(KEY_IGTK);
636 } else {
637 dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
638 key_material->key_param_set.key_type_id =
626 cpu_to_le16(KEY_TYPE_ID_AES); 639 cpu_to_le16(KEY_TYPE_ID_AES);
627 if (cmd_oid == KEY_INFO_ENABLED) 640 if (cmd_oid == KEY_INFO_ENABLED)
628 key_material->key_param_set.key_info = 641 key_material->key_param_set.key_info =
629 cpu_to_le16(KEY_ENABLED); 642 cpu_to_le16(KEY_ENABLED);
630 else 643 else
631 key_material->key_param_set.key_info = 644 key_material->key_param_set.key_info =
632 cpu_to_le16(!KEY_ENABLED); 645 cpu_to_le16(!KEY_ENABLED);
633 646
634 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST) 647 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
635 /* AES pairwise key: unicast */ 648 /* AES pairwise key: unicast */
636 key_material->key_param_set.key_info |= 649 key_material->key_param_set.key_info |=
637 cpu_to_le16(KEY_UNICAST); 650 cpu_to_le16(KEY_UNICAST);
638 else /* AES group key: multicast */ 651 else /* AES group key: multicast */
639 key_material->key_param_set.key_info |= 652 key_material->key_param_set.key_info |=
640 cpu_to_le16(KEY_MCAST); 653 cpu_to_le16(KEY_MCAST);
654 }
641 } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { 655 } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
642 dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n"); 656 dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
643 key_material->key_param_set.key_type_id = 657 key_material->key_param_set.key_type_id =
@@ -668,6 +682,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
668 key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN) 682 key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
669 + sizeof(struct mwifiex_ie_types_header); 683 + sizeof(struct mwifiex_ie_types_header);
670 684
685 if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
686 KEY_TYPE_ID_AES_CMAC) {
687 struct mwifiex_cmac_param *param =
688 (void *)key_material->key_param_set.key;
689
690 memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
691 memcpy(param->key, enc_key->key_material,
692 WLAN_KEY_LEN_AES_CMAC);
693
694 key_param_len = sizeof(struct mwifiex_cmac_param);
695 key_material->key_param_set.key_len =
696 cpu_to_le16(key_param_len);
697 key_param_len += KEYPARAMSET_FIXED_LEN;
698 key_material->key_param_set.length =
699 cpu_to_le16(key_param_len);
700 key_param_len += sizeof(struct mwifiex_ie_types_header);
701 }
702
671 cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN 703 cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
672 + key_param_len); 704 + key_param_len);
673 705
@@ -1135,6 +1167,31 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1135 S_DS_GEN); 1167 S_DS_GEN);
1136 ret = 0; 1168 ret = 0;
1137 break; 1169 break;
1170 case HostCmd_CMD_MGMT_FRAME_REG:
1171 cmd_ptr->command = cpu_to_le16(cmd_no);
1172 cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
1173 cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
1174 cmd_ptr->size =
1175 cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
1176 S_DS_GEN);
1177 ret = 0;
1178 break;
1179 case HostCmd_CMD_REMAIN_ON_CHAN:
1180 cmd_ptr->command = cpu_to_le16(cmd_no);
1181 memcpy(&cmd_ptr->params, data_buf,
1182 sizeof(struct host_cmd_ds_remain_on_chan));
1183 cmd_ptr->size =
1184 cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) +
1185 S_DS_GEN);
1186 break;
1187 case HostCmd_CMD_P2P_MODE_CFG:
1188 cmd_ptr->command = cpu_to_le16(cmd_no);
1189 cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
1190 cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
1191 cmd_ptr->size =
1192 cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
1193 S_DS_GEN);
1194 break;
1138 case HostCmd_CMD_FUNC_INIT: 1195 case HostCmd_CMD_FUNC_INIT:
1139 if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET) 1196 if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
1140 priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY; 1197 priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1204,6 +1261,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1204 else if (priv->bss_mode == NL80211_IFTYPE_STATION) 1261 else if (priv->bss_mode == NL80211_IFTYPE_STATION)
1205 cmd_ptr->params.bss_mode.con_type = 1262 cmd_ptr->params.bss_mode.con_type =
1206 CONNECTION_TYPE_INFRA; 1263 CONNECTION_TYPE_INFRA;
1264 else if (priv->bss_mode == NL80211_IFTYPE_AP)
1265 cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
1207 cmd_ptr->size = cpu_to_le16(sizeof(struct 1266 cmd_ptr->size = cpu_to_le16(sizeof(struct
1208 host_cmd_ds_set_bss_mode) + S_DS_GEN); 1267 host_cmd_ds_set_bss_mode) + S_DS_GEN);
1209 ret = 0; 1268 ret = 0;
@@ -1253,35 +1312,35 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1253 1312
1254 if (first_sta) { 1313 if (first_sta) {
1255 if (priv->adapter->iface_type == MWIFIEX_PCIE) { 1314 if (priv->adapter->iface_type == MWIFIEX_PCIE) {
1256 ret = mwifiex_send_cmd_async(priv, 1315 ret = mwifiex_send_cmd_sync(priv,
1257 HostCmd_CMD_PCIE_DESC_DETAILS, 1316 HostCmd_CMD_PCIE_DESC_DETAILS,
1258 HostCmd_ACT_GEN_SET, 0, NULL); 1317 HostCmd_ACT_GEN_SET, 0, NULL);
1259 if (ret) 1318 if (ret)
1260 return -1; 1319 return -1;
1261 } 1320 }
1262 1321
1263 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT, 1322 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
1264 HostCmd_ACT_GEN_SET, 0, NULL); 1323 HostCmd_ACT_GEN_SET, 0, NULL);
1265 if (ret) 1324 if (ret)
1266 return -1; 1325 return -1;
1267 /* Read MAC address from HW */ 1326 /* Read MAC address from HW */
1268 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_GET_HW_SPEC, 1327 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
1269 HostCmd_ACT_GEN_GET, 0, NULL); 1328 HostCmd_ACT_GEN_GET, 0, NULL);
1270 if (ret) 1329 if (ret)
1271 return -1; 1330 return -1;
1272 1331
1273 /* Reconfigure tx buf size */ 1332 /* Reconfigure tx buf size */
1274 ret = mwifiex_send_cmd_async(priv, 1333 ret = mwifiex_send_cmd_sync(priv,
1275 HostCmd_CMD_RECONFIGURE_TX_BUFF, 1334 HostCmd_CMD_RECONFIGURE_TX_BUFF,
1276 HostCmd_ACT_GEN_SET, 0, 1335 HostCmd_ACT_GEN_SET, 0,
1277 &priv->adapter->tx_buf_size); 1336 &priv->adapter->tx_buf_size);
1278 if (ret) 1337 if (ret)
1279 return -1; 1338 return -1;
1280 1339
1281 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1340 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
1282 /* Enable IEEE PS by default */ 1341 /* Enable IEEE PS by default */
1283 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; 1342 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1284 ret = mwifiex_send_cmd_async( 1343 ret = mwifiex_send_cmd_sync(
1285 priv, HostCmd_CMD_802_11_PS_MODE_ENH, 1344 priv, HostCmd_CMD_802_11_PS_MODE_ENH,
1286 EN_AUTO_PS, BITMAP_STA_PS, NULL); 1345 EN_AUTO_PS, BITMAP_STA_PS, NULL);
1287 if (ret) 1346 if (ret)
@@ -1290,21 +1349,21 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1290 } 1349 }
1291 1350
1292 /* get tx rate */ 1351 /* get tx rate */
1293 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TX_RATE_CFG, 1352 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
1294 HostCmd_ACT_GEN_GET, 0, NULL); 1353 HostCmd_ACT_GEN_GET, 0, NULL);
1295 if (ret) 1354 if (ret)
1296 return -1; 1355 return -1;
1297 priv->data_rate = 0; 1356 priv->data_rate = 0;
1298 1357
1299 /* get tx power */ 1358 /* get tx power */
1300 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR, 1359 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
1301 HostCmd_ACT_GEN_GET, 0, NULL); 1360 HostCmd_ACT_GEN_GET, 0, NULL);
1302 if (ret) 1361 if (ret)
1303 return -1; 1362 return -1;
1304 1363
1305 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) { 1364 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
1306 /* set ibss coalescing_status */ 1365 /* set ibss coalescing_status */
1307 ret = mwifiex_send_cmd_async( 1366 ret = mwifiex_send_cmd_sync(
1308 priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, 1367 priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
1309 HostCmd_ACT_GEN_SET, 0, &enable); 1368 HostCmd_ACT_GEN_SET, 0, &enable);
1310 if (ret) 1369 if (ret)
@@ -1314,16 +1373,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1314 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl)); 1373 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
1315 amsdu_aggr_ctrl.enable = true; 1374 amsdu_aggr_ctrl.enable = true;
1316 /* Send request to firmware */ 1375 /* Send request to firmware */
1317 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL, 1376 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
1318 HostCmd_ACT_GEN_SET, 0, 1377 HostCmd_ACT_GEN_SET, 0,
1319 &amsdu_aggr_ctrl); 1378 &amsdu_aggr_ctrl);
1320 if (ret) 1379 if (ret)
1321 return -1; 1380 return -1;
1322 /* MAC Control must be the last command in init_fw */ 1381 /* MAC Control must be the last command in init_fw */
1323 /* set MAC Control */ 1382 /* set MAC Control */
1324 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, 1383 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
1325 HostCmd_ACT_GEN_SET, 0, 1384 HostCmd_ACT_GEN_SET, 0,
1326 &priv->curr_pkt_filter); 1385 &priv->curr_pkt_filter);
1327 if (ret) 1386 if (ret)
1328 return -1; 1387 return -1;
1329 1388
@@ -1332,10 +1391,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1332 /* Enable auto deep sleep */ 1391 /* Enable auto deep sleep */
1333 auto_ds.auto_ds = DEEP_SLEEP_ON; 1392 auto_ds.auto_ds = DEEP_SLEEP_ON;
1334 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; 1393 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
1335 ret = mwifiex_send_cmd_async(priv, 1394 ret = mwifiex_send_cmd_sync(priv,
1336 HostCmd_CMD_802_11_PS_MODE_ENH, 1395 HostCmd_CMD_802_11_PS_MODE_ENH,
1337 EN_AUTO_PS, BITMAP_AUTO_DS, 1396 EN_AUTO_PS, BITMAP_AUTO_DS,
1338 &auto_ds); 1397 &auto_ds);
1339 if (ret) 1398 if (ret)
1340 return -1; 1399 return -1;
1341 } 1400 }
@@ -1343,23 +1402,24 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1343 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1402 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
1344 /* Send cmd to FW to enable/disable 11D function */ 1403 /* Send cmd to FW to enable/disable 11D function */
1345 state_11d = ENABLE_11D; 1404 state_11d = ENABLE_11D;
1346 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB, 1405 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
1347 HostCmd_ACT_GEN_SET, DOT11D_I, 1406 HostCmd_ACT_GEN_SET, DOT11D_I,
1348 &state_11d); 1407 &state_11d);
1349 if (ret) 1408 if (ret)
1350 dev_err(priv->adapter->dev, 1409 dev_err(priv->adapter->dev,
1351 "11D: failed to enable 11D\n"); 1410 "11D: failed to enable 11D\n");
1352 } 1411 }
1353 1412
1413 /* set last_init_cmd before sending the command */
1414 priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
1415
1354 /* Send cmd to FW to configure 11n specific configuration 1416 /* Send cmd to FW to configure 11n specific configuration
1355 * (Short GI, Channel BW, Green field support etc.) for transmit 1417 * (Short GI, Channel BW, Green field support etc.) for transmit
1356 */ 1418 */
1357 tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG; 1419 tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
1358 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_CFG, 1420 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
1359 HostCmd_ACT_GEN_SET, 0, &tx_cfg); 1421 HostCmd_ACT_GEN_SET, 0, &tx_cfg);
1360 1422
1361 /* set last_init_cmd */
1362 priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
1363 ret = -EINPROGRESS; 1423 ret = -EINPROGRESS;
1364 1424
1365 return ret; 1425 return ret;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 0b09004ebb25..e380171c4c5d 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -123,7 +123,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
123{ 123{
124 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp = 124 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
125 &resp->params.rssi_info_rsp; 125 &resp->params.rssi_info_rsp;
126 struct mwifiex_ds_misc_subsc_evt subsc_evt; 126 struct mwifiex_ds_misc_subsc_evt *subsc_evt =
127 &priv->async_subsc_evt_storage;
127 128
128 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last); 129 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
129 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last); 130 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -140,26 +141,27 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
140 if (priv->subsc_evt_rssi_state == EVENT_HANDLED) 141 if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
141 return 0; 142 return 0;
142 143
144 memset(subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
145
143 /* Resubscribe low and high rssi events with new thresholds */ 146 /* Resubscribe low and high rssi events with new thresholds */
144 memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt)); 147 subsc_evt->events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
145 subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH; 148 subsc_evt->action = HostCmd_ACT_BITWISE_SET;
146 subsc_evt.action = HostCmd_ACT_BITWISE_SET;
147 if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) { 149 if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
148 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg - 150 subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
149 priv->cqm_rssi_hyst); 151 priv->cqm_rssi_hyst);
150 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold); 152 subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
151 } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) { 153 } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
152 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold); 154 subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
153 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg + 155 subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
154 priv->cqm_rssi_hyst); 156 priv->cqm_rssi_hyst);
155 } 157 }
156 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1; 158 subsc_evt->bcn_l_rssi_cfg.evt_freq = 1;
157 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1; 159 subsc_evt->bcn_h_rssi_cfg.evt_freq = 1;
158 160
159 priv->subsc_evt_rssi_state = EVENT_HANDLED; 161 priv->subsc_evt_rssi_state = EVENT_HANDLED;
160 162
161 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT, 163 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
162 0, 0, &subsc_evt); 164 0, 0, subsc_evt);
163 165
164 return 0; 166 return 0;
165} 167}
@@ -652,6 +654,38 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
652} 654}
653 655
654/* 656/*
657 * This function handles the command response of remain on channel.
658 */
659static int
660mwifiex_ret_remain_on_chan(struct mwifiex_private *priv,
661 struct host_cmd_ds_command *resp,
662 struct host_cmd_ds_remain_on_chan *roc_cfg)
663{
664 struct host_cmd_ds_remain_on_chan *resp_cfg = &resp->params.roc_cfg;
665
666 if (roc_cfg)
667 memcpy(roc_cfg, resp_cfg, sizeof(*roc_cfg));
668
669 return 0;
670}
671
672/*
673 * This function handles the command response of P2P mode cfg.
674 */
675static int
676mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
677 struct host_cmd_ds_command *resp,
678 void *data_buf)
679{
680 struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
681
682 if (data_buf)
683 *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
684
685 return 0;
686}
687
688/*
655 * This function handles the command response of register access. 689 * This function handles the command response of register access.
656 * 690 *
657 * The register value and offset are returned to the user. For EEPROM 691 * The register value and offset are returned to the user. For EEPROM
@@ -736,7 +770,6 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
736{ 770{
737 struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp = 771 struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
738 &(resp->params.ibss_coalescing); 772 &(resp->params.ibss_coalescing);
739 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
740 773
741 if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET) 774 if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
742 return 0; 775 return 0;
@@ -745,7 +778,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
745 "info: new BSSID %pM\n", ibss_coal_resp->bssid); 778 "info: new BSSID %pM\n", ibss_coal_resp->bssid);
746 779
747 /* If rsp has NULL BSSID, Just return..... No Action */ 780 /* If rsp has NULL BSSID, Just return..... No Action */
748 if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) { 781 if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
749 dev_warn(priv->adapter->dev, "new BSSID is NULL\n"); 782 dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
750 return 0; 783 return 0;
751 } 784 }
@@ -775,8 +808,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
775 * This function handles the command response for subscribe event command. 808 * This function handles the command response for subscribe event command.
776 */ 809 */
777static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, 810static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
778 struct host_cmd_ds_command *resp, 811 struct host_cmd_ds_command *resp)
779 struct mwifiex_ds_misc_subsc_evt *sub_event)
780{ 812{
781 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event = 813 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
782 &resp->params.subsc_evt; 814 &resp->params.subsc_evt;
@@ -786,10 +818,6 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
786 dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n", 818 dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
787 le16_to_cpu(cmd_sub_event->events)); 819 le16_to_cpu(cmd_sub_event->events));
788 820
789 /*Return the subscribed event info for a Get request*/
790 if (sub_event)
791 sub_event->events = le16_to_cpu(cmd_sub_event->events);
792
793 return 0; 821 return 0;
794} 822}
795 823
@@ -879,6 +907,13 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
879 case HostCmd_CMD_VERSION_EXT: 907 case HostCmd_CMD_VERSION_EXT:
880 ret = mwifiex_ret_ver_ext(priv, resp, data_buf); 908 ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
881 break; 909 break;
910 case HostCmd_CMD_REMAIN_ON_CHAN:
911 ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf);
912 break;
913 case HostCmd_CMD_P2P_MODE_CFG:
914 ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
915 break;
916 case HostCmd_CMD_MGMT_FRAME_REG:
882 case HostCmd_CMD_FUNC_INIT: 917 case HostCmd_CMD_FUNC_INIT:
883 case HostCmd_CMD_FUNC_SHUTDOWN: 918 case HostCmd_CMD_FUNC_SHUTDOWN:
884 break; 919 break;
@@ -913,7 +948,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
913 le16_to_cpu(resp->params.tx_buf.mp_end_port)); 948 le16_to_cpu(resp->params.tx_buf.mp_end_port));
914 break; 949 break;
915 case HostCmd_CMD_AMSDU_AGGR_CTRL: 950 case HostCmd_CMD_AMSDU_AGGR_CTRL:
916 ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
917 break; 951 break;
918 case HostCmd_CMD_WMM_GET_STATUS: 952 case HostCmd_CMD_WMM_GET_STATUS:
919 ret = mwifiex_ret_wmm_get_status(priv, resp); 953 ret = mwifiex_ret_wmm_get_status(priv, resp);
@@ -932,12 +966,11 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
932 case HostCmd_CMD_SET_BSS_MODE: 966 case HostCmd_CMD_SET_BSS_MODE:
933 break; 967 break;
934 case HostCmd_CMD_11N_CFG: 968 case HostCmd_CMD_11N_CFG:
935 ret = mwifiex_ret_11n_cfg(resp, data_buf);
936 break; 969 break;
937 case HostCmd_CMD_PCIE_DESC_DETAILS: 970 case HostCmd_CMD_PCIE_DESC_DETAILS:
938 break; 971 break;
939 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT: 972 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
940 ret = mwifiex_ret_subsc_evt(priv, resp, data_buf); 973 ret = mwifiex_ret_subsc_evt(priv, resp);
941 break; 974 break;
942 case HostCmd_CMD_UAP_SYS_CONFIG: 975 case HostCmd_CMD_UAP_SYS_CONFIG:
943 break; 976 break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index b8614a825460..aafde30e714a 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
184int mwifiex_process_sta_event(struct mwifiex_private *priv) 184int mwifiex_process_sta_event(struct mwifiex_private *priv)
185{ 185{
186 struct mwifiex_adapter *adapter = priv->adapter; 186 struct mwifiex_adapter *adapter = priv->adapter;
187 int len, ret = 0; 187 int ret = 0;
188 u32 eventcause = adapter->event_cause; 188 u32 eventcause = adapter->event_cause;
189 struct station_info sinfo; 189 u16 ctrl;
190 struct mwifiex_assoc_event *event;
191 190
192 switch (eventcause) { 191 switch (eventcause) {
193 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: 192 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
279 278
280 case EVENT_MIC_ERR_UNICAST: 279 case EVENT_MIC_ERR_UNICAST:
281 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n"); 280 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
281 cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
282 NL80211_KEYTYPE_PAIRWISE,
283 -1, NULL, GFP_KERNEL);
282 break; 284 break;
283 285
284 case EVENT_MIC_ERR_MULTICAST: 286 case EVENT_MIC_ERR_MULTICAST:
285 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n"); 287 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
288 cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
289 NL80211_KEYTYPE_GROUP,
290 -1, NULL, GFP_KERNEL);
286 break; 291 break;
287 case EVENT_MIB_CHANGED: 292 case EVENT_MIB_CHANGED:
288 case EVENT_INIT_DONE: 293 case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
384 adapter->event_body); 389 adapter->event_body);
385 break; 390 break;
386 case EVENT_AMSDU_AGGR_CTRL: 391 case EVENT_AMSDU_AGGR_CTRL:
387 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", 392 ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
388 *(u16 *) adapter->event_body); 393 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
394
389 adapter->tx_buf_size = 395 adapter->tx_buf_size =
390 min(adapter->curr_tx_buf_size, 396 min_t(u16, adapter->curr_tx_buf_size, ctrl);
391 le16_to_cpu(*(__le16 *) adapter->event_body));
392 dev_dbg(adapter->dev, "event: tx_buf_size %d\n", 397 dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
393 adapter->tx_buf_size); 398 adapter->tx_buf_size);
394 break; 399 break;
@@ -405,51 +410,18 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
405 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause); 410 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
406 break; 411 break;
407 412
408 case EVENT_UAP_STA_ASSOC: 413 case EVENT_REMAIN_ON_CHAN_EXPIRED:
409 memset(&sinfo, 0, sizeof(sinfo)); 414 dev_dbg(adapter->dev, "event: Remain on channel expired\n");
410 event = (struct mwifiex_assoc_event *) 415 cfg80211_remain_on_channel_expired(priv->wdev,
411 (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER); 416 priv->roc_cfg.cookie,
412 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { 417 &priv->roc_cfg.chan,
413 len = -1; 418 priv->roc_cfg.chan_type,
414 419 GFP_ATOMIC);
415 if (ieee80211_is_assoc_req(event->frame_control)) 420
416 len = 0; 421 memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
417 else if (ieee80211_is_reassoc_req(event->frame_control)) 422
418 /* There will be ETH_ALEN bytes of
419 * current_ap_addr before the re-assoc ies.
420 */
421 len = ETH_ALEN;
422
423 if (len != -1) {
424 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
425 sinfo.assoc_req_ies = &event->data[len];
426 len = (u8 *)sinfo.assoc_req_ies -
427 (u8 *)&event->frame_control;
428 sinfo.assoc_req_ies_len =
429 le16_to_cpu(event->len) - (u16)len;
430 }
431 }
432 cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
433 GFP_KERNEL);
434 break;
435 case EVENT_UAP_STA_DEAUTH:
436 cfg80211_del_sta(priv->netdev, adapter->event_body +
437 MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
438 break;
439 case EVENT_UAP_BSS_IDLE:
440 priv->media_connected = false;
441 break;
442 case EVENT_UAP_BSS_ACTIVE:
443 priv->media_connected = true;
444 break;
445 case EVENT_UAP_BSS_START:
446 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
447 memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
448 break;
449 case EVENT_UAP_MIC_COUNTERMEASURES:
450 /* For future development */
451 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
452 break; 423 break;
424
453 default: 425 default:
454 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 426 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
455 eventcause); 427 eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index fb2136089a22..0c9f70b2cbe6 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -26,6 +26,9 @@
26#include "11n.h" 26#include "11n.h"
27#include "cfg80211.h" 27#include "cfg80211.h"
28 28
29static int disconnect_on_suspend = 1;
30module_param(disconnect_on_suspend, int, 0644);
31
29/* 32/*
30 * Copies the multicast address list from device to driver. 33 * Copies the multicast address list from device to driver.
31 * 34 *
@@ -192,6 +195,44 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
192 return ret; 195 return ret;
193} 196}
194 197
198static int mwifiex_process_country_ie(struct mwifiex_private *priv,
199 struct cfg80211_bss *bss)
200{
201 u8 *country_ie, country_ie_len;
202 struct mwifiex_802_11d_domain_reg *domain_info =
203 &priv->adapter->domain_reg;
204
205 country_ie = (u8 *)ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
206
207 if (!country_ie)
208 return 0;
209
210 country_ie_len = country_ie[1];
211 if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
212 return 0;
213
214 domain_info->country_code[0] = country_ie[2];
215 domain_info->country_code[1] = country_ie[3];
216 domain_info->country_code[2] = ' ';
217
218 country_ie_len -= IEEE80211_COUNTRY_STRING_LEN;
219
220 domain_info->no_of_triplet =
221 country_ie_len / sizeof(struct ieee80211_country_ie_triplet);
222
223 memcpy((u8 *)domain_info->triplet,
224 &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len);
225
226 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
227 HostCmd_ACT_GEN_SET, 0, NULL)) {
228 wiphy_err(priv->adapter->wiphy,
229 "11D: setting domain info in FW\n");
230 return -1;
231 }
232
233 return 0;
234}
235
195/* 236/*
196 * In Ad-Hoc mode, the IBSS is created if not found in scan list. 237 * In Ad-Hoc mode, the IBSS is created if not found in scan list.
197 * In both Ad-Hoc and infra mode, an deauthentication is performed 238 * In both Ad-Hoc and infra mode, an deauthentication is performed
@@ -207,6 +248,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
207 priv->scan_block = false; 248 priv->scan_block = false;
208 249
209 if (bss) { 250 if (bss) {
251 mwifiex_process_country_ie(priv, bss);
252
210 /* Allocate and fill new bss descriptor */ 253 /* Allocate and fill new bss descriptor */
211 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), 254 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
212 GFP_KERNEL); 255 GFP_KERNEL);
@@ -408,6 +451,16 @@ EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
408int mwifiex_enable_hs(struct mwifiex_adapter *adapter) 451int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
409{ 452{
410 struct mwifiex_ds_hs_cfg hscfg; 453 struct mwifiex_ds_hs_cfg hscfg;
454 struct mwifiex_private *priv;
455 int i;
456
457 if (disconnect_on_suspend) {
458 for (i = 0; i < adapter->priv_num; i++) {
459 priv = adapter->priv[i];
460 if (priv)
461 mwifiex_deauthenticate(priv, NULL);
462 }
463 }
411 464
412 if (adapter->hs_activated) { 465 if (adapter->hs_activated) {
413 dev_dbg(adapter->dev, "cmd: HS Already actived\n"); 466 dev_dbg(adapter->dev, "cmd: HS Already actived\n");
@@ -942,20 +995,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
942 * This function allocates the IOCTL request buffer, fills it 995 * This function allocates the IOCTL request buffer, fills it
943 * with requisite parameters and calls the IOCTL handler. 996 * with requisite parameters and calls the IOCTL handler.
944 */ 997 */
945int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key, 998int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
946 int key_len, u8 key_index, 999 const u8 *key, int key_len, u8 key_index,
947 const u8 *mac_addr, int disable) 1000 const u8 *mac_addr, int disable)
948{ 1001{
949 struct mwifiex_ds_encrypt_key encrypt_key; 1002 struct mwifiex_ds_encrypt_key encrypt_key;
950 1003
951 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key)); 1004 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
952 encrypt_key.key_len = key_len; 1005 encrypt_key.key_len = key_len;
1006
1007 if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1008 encrypt_key.is_igtk_key = true;
1009
953 if (!disable) { 1010 if (!disable) {
954 encrypt_key.key_index = key_index; 1011 encrypt_key.key_index = key_index;
955 if (key_len) 1012 if (key_len)
956 memcpy(encrypt_key.key_material, key, key_len); 1013 memcpy(encrypt_key.key_material, key, key_len);
957 if (mac_addr) 1014 if (mac_addr)
958 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); 1015 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
1016 if (kp && kp->seq && kp->seq_len)
1017 memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
959 } else { 1018 } else {
960 encrypt_key.key_disable = true; 1019 encrypt_key.key_disable = true;
961 if (mac_addr) 1020 if (mac_addr)
@@ -984,6 +1043,65 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
984 return 0; 1043 return 0;
985} 1044}
986 1045
1046int
1047mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1048 struct ieee80211_channel *chan,
1049 enum nl80211_channel_type *ct,
1050 unsigned int duration)
1051{
1052 struct host_cmd_ds_remain_on_chan roc_cfg;
1053 u8 sc;
1054
1055 memset(&roc_cfg, 0, sizeof(roc_cfg));
1056 roc_cfg.action = cpu_to_le16(action);
1057 if (action == HostCmd_ACT_GEN_SET) {
1058 roc_cfg.band_cfg = chan->band;
1059 sc = mwifiex_chan_type_to_sec_chan_offset(*ct);
1060 roc_cfg.band_cfg |= (sc << 2);
1061
1062 roc_cfg.channel =
1063 ieee80211_frequency_to_channel(chan->center_freq);
1064 roc_cfg.duration = cpu_to_le32(duration);
1065 }
1066 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
1067 action, 0, &roc_cfg)) {
1068 dev_err(priv->adapter->dev, "failed to remain on channel\n");
1069 return -1;
1070 }
1071
1072 return roc_cfg.status;
1073}
1074
1075int
1076mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
1077{
1078 if (GET_BSS_ROLE(priv) == bss_role) {
1079 dev_dbg(priv->adapter->dev,
1080 "info: already in the desired role.\n");
1081 return 0;
1082 }
1083
1084 mwifiex_free_priv(priv);
1085 mwifiex_init_priv(priv);
1086
1087 priv->bss_role = bss_role;
1088 switch (bss_role) {
1089 case MWIFIEX_BSS_ROLE_UAP:
1090 priv->bss_mode = NL80211_IFTYPE_AP;
1091 break;
1092 case MWIFIEX_BSS_ROLE_STA:
1093 case MWIFIEX_BSS_ROLE_ANY:
1094 default:
1095 priv->bss_mode = NL80211_IFTYPE_STATION;
1096 break;
1097 }
1098
1099 mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
1100 HostCmd_ACT_GEN_SET, 0, NULL);
1101
1102 return mwifiex_sta_init_cmd(priv, false);
1103}
1104
987/* 1105/*
988 * Sends IOCTL request to get statistics information. 1106 * Sends IOCTL request to get statistics information.
989 * 1107 *
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 02ce3b77d3e7..07d32b73783e 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
54 54
55 local_rx_pd = (struct rxpd *) (skb->data); 55 local_rx_pd = (struct rxpd *) (skb->data);
56 56
57 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd + 57 rx_pkt_hdr = (void *)local_rx_pd +
58 local_rx_pd->rx_pkt_offset); 58 le16_to_cpu(local_rx_pd->rx_pkt_offset);
59 59
60 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, 60 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
61 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { 61 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
125 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); 125 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
126 struct rx_packet_hdr *rx_pkt_hdr; 126 struct rx_packet_hdr *rx_pkt_hdr;
127 u8 ta[ETH_ALEN]; 127 u8 ta[ETH_ALEN];
128 u16 rx_pkt_type; 128 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
129 struct mwifiex_private *priv = 129 struct mwifiex_private *priv =
130 mwifiex_get_priv_by_id(adapter, rx_info->bss_num, 130 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
131 rx_info->bss_type); 131 rx_info->bss_type);
@@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
134 return -1; 134 return -1;
135 135
136 local_rx_pd = (struct rxpd *) (skb->data); 136 local_rx_pd = (struct rxpd *) (skb->data);
137 rx_pkt_type = local_rx_pd->rx_pkt_type; 137 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
138 rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
139 rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
140 seq_num = le16_to_cpu(local_rx_pd->seq_num);
138 141
139 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd + 142 rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
140 local_rx_pd->rx_pkt_offset);
141 143
142 if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) > 144 if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
143 (u16) skb->len) { 145 dev_err(adapter->dev,
144 dev_err(adapter->dev, "wrong rx packet: len=%d," 146 "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
145 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, 147 skb->len, rx_pkt_offset, rx_pkt_length);
146 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
147 priv->stats.rx_dropped++; 148 priv->stats.rx_dropped++;
148 149
149 if (adapter->if_ops.data_complete) 150 if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
154 return ret; 155 return ret;
155 } 156 }
156 157
157 if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) { 158 if (rx_pkt_type == PKT_TYPE_AMSDU) {
158 struct sk_buff_head list; 159 struct sk_buff_head list;
159 struct sk_buff *rx_skb; 160 struct sk_buff *rx_skb;
160 161
161 __skb_queue_head_init(&list); 162 __skb_queue_head_init(&list);
162 163
163 skb_pull(skb, local_rx_pd->rx_pkt_offset); 164 skb_pull(skb, rx_pkt_offset);
164 skb_trim(skb, local_rx_pd->rx_pkt_length); 165 skb_trim(skb, rx_pkt_length);
165 166
166 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, 167 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
167 priv->wdev->iftype, 0, false); 168 priv->wdev->iftype, 0, false);
@@ -173,6 +174,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
173 dev_err(adapter->dev, "Rx of A-MSDU failed"); 174 dev_err(adapter->dev, "Rx of A-MSDU failed");
174 } 175 }
175 return 0; 176 return 0;
177 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
178 ret = mwifiex_process_mgmt_packet(adapter, skb);
179 if (ret)
180 dev_err(adapter->dev, "Rx of mgmt packet failed");
181 dev_kfree_skb_any(skb);
182 return ret;
176 } 183 }
177 184
178 /* 185 /*
@@ -189,17 +196,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
189 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); 196 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
190 } else { 197 } else {
191 if (rx_pkt_type != PKT_TYPE_BAR) 198 if (rx_pkt_type != PKT_TYPE_BAR)
192 priv->rx_seq[local_rx_pd->priority] = 199 priv->rx_seq[local_rx_pd->priority] = seq_num;
193 local_rx_pd->seq_num;
194 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address, 200 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
195 ETH_ALEN); 201 ETH_ALEN);
196 } 202 }
197 203
198 /* Reorder and send to OS */ 204 /* Reorder and send to OS */
199 ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num, 205 ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
200 local_rx_pd->priority, ta, 206 ta, (u8) rx_pkt_type, skb);
201 (u8) local_rx_pd->rx_pkt_type,
202 skb);
203 207
204 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { 208 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
205 if (adapter->if_ops.data_complete) 209 if (adapter->if_ops.data_complete)
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 0a046d3a0c16..7b581af24f5f 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -48,6 +48,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
48 struct txpd *local_tx_pd; 48 struct txpd *local_tx_pd;
49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
50 u8 pad; 50 u8 pad;
51 u16 pkt_type, pkt_offset;
51 52
52 if (!skb->len) { 53 if (!skb->len) {
53 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); 54 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -55,6 +56,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
55 return skb->data; 56 return skb->data;
56 } 57 }
57 58
59 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
60
58 /* If skb->data is not aligned; add padding */ 61 /* If skb->data is not aligned; add padding */
59 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4; 62 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
60 63
@@ -93,7 +96,14 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
93 } 96 }
94 97
95 /* Offset of actual data */ 98 /* Offset of actual data */
96 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad); 99 pkt_offset = sizeof(struct txpd) + pad;
100 if (pkt_type == PKT_TYPE_MGMT) {
101 /* Set the packet type and add header for management frame */
102 local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type);
103 pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
104 }
105
106 local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
97 107
98 /* make space for INTF_HEADER_LEN */ 108 /* make space for INTF_HEADER_LEN */
99 skb_push(skb, INTF_HEADER_LEN); 109 skb_push(skb, INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index cecb27283196..2af263992e83 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
51 rx_info->bss_num = priv->bss_num; 51 rx_info->bss_num = priv->bss_num;
52 rx_info->bss_type = priv->bss_type; 52 rx_info->bss_type = priv->bss_type;
53 53
54 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
55 return mwifiex_process_uap_rx_packet(adapter, skb);
56
54 return mwifiex_process_sta_rx_packet(adapter, skb); 57 return mwifiex_process_sta_rx_packet(adapter, skb);
55} 58}
56EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); 59EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -72,7 +75,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
72 u8 *head_ptr; 75 u8 *head_ptr;
73 struct txpd *local_tx_pd = NULL; 76 struct txpd *local_tx_pd = NULL;
74 77
75 head_ptr = mwifiex_process_sta_txpd(priv, skb); 78 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
79 head_ptr = mwifiex_process_uap_txpd(priv, skb);
80 else
81 head_ptr = mwifiex_process_sta_txpd(priv, skb);
82
76 if (head_ptr) { 83 if (head_ptr) {
77 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) 84 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
78 local_tx_pd = 85 local_tx_pd =
@@ -157,6 +164,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
157 priv->stats.tx_errors++; 164 priv->stats.tx_errors++;
158 } 165 }
159 166
167 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
168 atomic_dec_return(&adapter->pending_bridged_pkts);
160 if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING) 169 if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
161 goto done; 170 goto done;
162 171
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f40e93fe894a..d95a2d558fcf 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
167 if (ht_ie) { 167 if (ht_ie) {
168 memcpy(&bss_cfg->ht_cap, ht_ie + 2, 168 memcpy(&bss_cfg->ht_cap, ht_ie + 2,
169 sizeof(struct ieee80211_ht_cap)); 169 sizeof(struct ieee80211_ht_cap));
170 priv->ap_11n_enabled = 1;
170 } else { 171 } else {
171 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap)); 172 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
172 bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP); 173 bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
@@ -176,6 +177,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
176 return; 177 return;
177} 178}
178 179
180/* This function finds supported rates IE from beacon parameter and sets
181 * these rates into bss_config structure.
182 */
183void
184mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
185 struct cfg80211_ap_settings *params)
186{
187 struct ieee_types_header *rate_ie;
188 int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
189 const u8 *var_pos = params->beacon.head + var_offset;
190 int len = params->beacon.head_len - var_offset;
191
192 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
193 if (rate_ie)
194 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
195
196 return;
197}
198
179/* This function initializes some of mwifiex_uap_bss_param variables. 199/* This function initializes some of mwifiex_uap_bss_param variables.
180 * This helps FW in ignoring invalid values. These values may or may not 200 * This helps FW in ignoring invalid values. These values may or may not
181 * be get updated to valid ones at later stage. 201 * be get updated to valid ones at later stage.
@@ -322,8 +342,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
322 struct host_cmd_tlv_retry_limit *retry_limit; 342 struct host_cmd_tlv_retry_limit *retry_limit;
323 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol; 343 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
324 struct host_cmd_tlv_auth_type *auth_type; 344 struct host_cmd_tlv_auth_type *auth_type;
345 struct host_cmd_tlv_rates *tlv_rates;
346 struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
325 struct mwifiex_ie_types_htcap *htcap; 347 struct mwifiex_ie_types_htcap *htcap;
326 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; 348 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
349 int i;
327 u16 cmd_size = *param_size; 350 u16 cmd_size = *param_size;
328 351
329 if (bss_cfg->ssid.ssid_len) { 352 if (bss_cfg->ssid.ssid_len) {
@@ -343,7 +366,23 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
343 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); 366 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
344 tlv += sizeof(struct host_cmd_tlv_bcast_ssid); 367 tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
345 } 368 }
346 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { 369 if (bss_cfg->rates[0]) {
370 tlv_rates = (struct host_cmd_tlv_rates *)tlv;
371 tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
372
373 for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
374 i++)
375 tlv_rates->rates[i] = bss_cfg->rates[i];
376
377 tlv_rates->tlv.len = cpu_to_le16(i);
378 cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
379 tlv += sizeof(struct host_cmd_tlv_rates) + i;
380 }
381 if (bss_cfg->channel &&
382 ((bss_cfg->band_cfg == BAND_CONFIG_BG &&
383 bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
384 (bss_cfg->band_cfg == BAND_CONFIG_A &&
385 bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
347 chan_band = (struct host_cmd_tlv_channel_band *)tlv; 386 chan_band = (struct host_cmd_tlv_channel_band *)tlv;
348 chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); 387 chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
349 chan_band->tlv.len = 388 chan_band->tlv.len =
@@ -459,6 +498,27 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
459 tlv += sizeof(struct mwifiex_ie_types_htcap); 498 tlv += sizeof(struct mwifiex_ie_types_htcap);
460 } 499 }
461 500
501 if (bss_cfg->sta_ao_timer) {
502 ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
503 ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
504 ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
505 sizeof(struct host_cmd_tlv));
506 ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
507 cmd_size += sizeof(*ao_timer);
508 tlv += sizeof(*ao_timer);
509 }
510
511 if (bss_cfg->ps_sta_ao_timer) {
512 ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
513 ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
514 ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
515 sizeof(struct host_cmd_tlv));
516 ps_ao_timer->sta_ao_timer =
517 cpu_to_le32(bss_cfg->ps_sta_ao_timer);
518 cmd_size += sizeof(*ps_ao_timer);
519 tlv += sizeof(*ps_ao_timer);
520 }
521
462 *param_size = cmd_size; 522 *param_size = cmd_size;
463 523
464 return 0; 524 return 0;
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644
index 000000000000..a33fa394e349
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -0,0 +1,290 @@
1/*
2 * Marvell Wireless LAN device driver: AP event handling
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "main.h"
22#include "11n.h"
23
24/*
25 * This function will return the pointer to station entry in station list
26 * table which matches specified mac address.
27 * This function should be called after acquiring RA list spinlock.
28 * NULL is returned if station entry is not found in associated STA list.
29 */
30struct mwifiex_sta_node *
31mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
32{
33 struct mwifiex_sta_node *node;
34
35 if (!mac)
36 return NULL;
37
38 list_for_each_entry(node, &priv->sta_list, list) {
39 if (!memcmp(node->mac_addr, mac, ETH_ALEN))
40 return node;
41 }
42
43 return NULL;
44}
45
46/*
47 * This function will add a sta_node entry to associated station list
48 * table with the given mac address.
49 * If entry exist already, existing entry is returned.
50 * If received mac address is NULL, NULL is returned.
51 */
52static struct mwifiex_sta_node *
53mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
54{
55 struct mwifiex_sta_node *node;
56 unsigned long flags;
57
58 if (!mac)
59 return NULL;
60
61 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
62 node = mwifiex_get_sta_entry(priv, mac);
63 if (node)
64 goto done;
65
66 node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
67 if (!node)
68 goto done;
69
70 memcpy(node->mac_addr, mac, ETH_ALEN);
71 list_add_tail(&node->list, &priv->sta_list);
72
73done:
74 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
75 return node;
76}
77
78/*
79 * This function will search for HT IE in association request IEs
80 * and set station HT parameters accordingly.
81 */
82static void
83mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
84 int ies_len, struct mwifiex_sta_node *node)
85{
86 const struct ieee80211_ht_cap *ht_cap;
87
88 if (!ies)
89 return;
90
91 ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
92 if (ht_cap) {
93 node->is_11n_enabled = 1;
94 node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
95 IEEE80211_HT_CAP_MAX_AMSDU ?
96 MWIFIEX_TX_DATA_BUF_SIZE_8K :
97 MWIFIEX_TX_DATA_BUF_SIZE_4K;
98 } else {
99 node->is_11n_enabled = 0;
100 }
101
102 return;
103}
104
105/*
106 * This function will delete a station entry from station list
107 */
108static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
109{
110 struct mwifiex_sta_node *node, *tmp;
111 unsigned long flags;
112
113 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
114
115 node = mwifiex_get_sta_entry(priv, mac);
116 if (node) {
117 list_for_each_entry_safe(node, tmp, &priv->sta_list,
118 list) {
119 list_del(&node->list);
120 kfree(node);
121 }
122 }
123
124 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
125 return;
126}
127
128/*
129 * This function will delete all stations from associated station list.
130 */
131static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
132{
133 struct mwifiex_sta_node *node, *tmp;
134 unsigned long flags;
135
136 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
137
138 list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
139 list_del(&node->list);
140 kfree(node);
141 }
142
143 INIT_LIST_HEAD(&priv->sta_list);
144 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
145 return;
146}
147
148/*
149 * This function handles AP interface specific events generated by firmware.
150 *
151 * Event specific routines are called by this function based
152 * upon the generated event cause.
153 *
154 *
155 * Events supported for AP -
156 * - EVENT_UAP_STA_ASSOC
157 * - EVENT_UAP_STA_DEAUTH
158 * - EVENT_UAP_BSS_ACTIVE
159 * - EVENT_UAP_BSS_START
160 * - EVENT_UAP_BSS_IDLE
161 * - EVENT_UAP_MIC_COUNTERMEASURES:
162 */
163int mwifiex_process_uap_event(struct mwifiex_private *priv)
164{
165 struct mwifiex_adapter *adapter = priv->adapter;
166 int len, i;
167 u32 eventcause = adapter->event_cause;
168 struct station_info sinfo;
169 struct mwifiex_assoc_event *event;
170 struct mwifiex_sta_node *node;
171 u8 *deauth_mac;
172 struct host_cmd_ds_11n_batimeout *ba_timeout;
173 u16 ctrl;
174
175 switch (eventcause) {
176 case EVENT_UAP_STA_ASSOC:
177 memset(&sinfo, 0, sizeof(sinfo));
178 event = (struct mwifiex_assoc_event *)
179 (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
180 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
181 len = -1;
182
183 if (ieee80211_is_assoc_req(event->frame_control))
184 len = 0;
185 else if (ieee80211_is_reassoc_req(event->frame_control))
186 /* There will be ETH_ALEN bytes of
187 * current_ap_addr before the re-assoc ies.
188 */
189 len = ETH_ALEN;
190
191 if (len != -1) {
192 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
193 sinfo.assoc_req_ies = &event->data[len];
194 len = (u8 *)sinfo.assoc_req_ies -
195 (u8 *)&event->frame_control;
196 sinfo.assoc_req_ies_len =
197 le16_to_cpu(event->len) - (u16)len;
198 }
199 }
200 cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
201 GFP_KERNEL);
202
203 node = mwifiex_add_sta_entry(priv, event->sta_addr);
204 if (!node) {
205 dev_warn(adapter->dev,
206 "could not create station entry!\n");
207 return -1;
208 }
209
210 if (!priv->ap_11n_enabled)
211 break;
212
213 mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
214 sinfo.assoc_req_ies_len, node);
215
216 for (i = 0; i < MAX_NUM_TID; i++) {
217 if (node->is_11n_enabled)
218 node->ampdu_sta[i] =
219 priv->aggr_prio_tbl[i].ampdu_user;
220 else
221 node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
222 }
223 memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
224 break;
225 case EVENT_UAP_STA_DEAUTH:
226 deauth_mac = adapter->event_body +
227 MWIFIEX_UAP_EVENT_EXTRA_HEADER;
228 cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
229
230 if (priv->ap_11n_enabled) {
231 mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
232 mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
233 }
234 mwifiex_del_sta_entry(priv, deauth_mac);
235 break;
236 case EVENT_UAP_BSS_IDLE:
237 priv->media_connected = false;
238 mwifiex_clean_txrx(priv);
239 mwifiex_del_all_sta_list(priv);
240 break;
241 case EVENT_UAP_BSS_ACTIVE:
242 priv->media_connected = true;
243 break;
244 case EVENT_UAP_BSS_START:
245 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
246 memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
247 ETH_ALEN);
248 break;
249 case EVENT_UAP_MIC_COUNTERMEASURES:
250 /* For future development */
251 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
252 break;
253 case EVENT_AMSDU_AGGR_CTRL:
254 ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
255 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
256
257 if (priv->media_connected) {
258 adapter->tx_buf_size =
259 min_t(u16, adapter->curr_tx_buf_size, ctrl);
260 dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
261 adapter->tx_buf_size);
262 }
263 break;
264 case EVENT_ADDBA:
265 dev_dbg(adapter->dev, "event: ADDBA Request\n");
266 if (priv->media_connected)
267 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
268 HostCmd_ACT_GEN_SET, 0,
269 adapter->event_body);
270 break;
271 case EVENT_DELBA:
272 dev_dbg(adapter->dev, "event: DELBA Request\n");
273 if (priv->media_connected)
274 mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
275 break;
276 case EVENT_BA_STREAM_TIEMOUT:
277 dev_dbg(adapter->dev, "event: BA Stream timeout\n");
278 if (priv->media_connected) {
279 ba_timeout = (void *)adapter->event_body;
280 mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
281 }
282 break;
283 default:
284 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
285 eventcause);
286 break;
287 }
288
289 return 0;
290}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644
index 000000000000..0966ac24b3b4
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -0,0 +1,340 @@
1/*
2 * Marvell Wireless LAN device driver: AP TX and RX data handling
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "main.h"
23#include "wmm.h"
24#include "11n_aggr.h"
25#include "11n_rxreorder.h"
26
27static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
28 struct sk_buff *skb)
29{
30 struct mwifiex_adapter *adapter = priv->adapter;
31 struct uap_rxpd *uap_rx_pd;
32 struct rx_packet_hdr *rx_pkt_hdr;
33 struct sk_buff *new_skb;
34 struct mwifiex_txinfo *tx_info;
35 int hdr_chop;
36 struct timeval tv;
37 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
38
39 uap_rx_pd = (struct uap_rxpd *)(skb->data);
40 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
41
42 if ((atomic_read(&adapter->pending_bridged_pkts) >=
43 MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
44 dev_err(priv->adapter->dev,
45 "Tx: Bridge packet limit reached. Drop packet!\n");
46 kfree_skb(skb);
47 return;
48 }
49
50 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
51 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
52 /* Chop off the rxpd + the excess memory from
53 * 802.2/llc/snap header that was removed.
54 */
55 hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
56 else
57 /* Chop off the rxpd */
58 hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
59
60 /* Chop off the leading header bytes so the it points
61 * to the start of either the reconstructed EthII frame
62 * or the 802.2/llc/snap frame.
63 */
64 skb_pull(skb, hdr_chop);
65
66 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
67 dev_dbg(priv->adapter->dev,
68 "data: Tx: insufficient skb headroom %d\n",
69 skb_headroom(skb));
70 /* Insufficient skb headroom - allocate a new skb */
71 new_skb =
72 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
73 if (unlikely(!new_skb)) {
74 dev_err(priv->adapter->dev,
75 "Tx: cannot allocate new_skb\n");
76 kfree_skb(skb);
77 priv->stats.tx_dropped++;
78 return;
79 }
80
81 kfree_skb(skb);
82 skb = new_skb;
83 dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
84 skb_headroom(skb));
85 }
86
87 tx_info = MWIFIEX_SKB_TXCB(skb);
88 tx_info->bss_num = priv->bss_num;
89 tx_info->bss_type = priv->bss_type;
90 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
91
92 do_gettimeofday(&tv);
93 skb->tstamp = timeval_to_ktime(tv);
94 mwifiex_wmm_add_buf_txqueue(priv, skb);
95 atomic_inc(&adapter->tx_pending);
96 atomic_inc(&adapter->pending_bridged_pkts);
97
98 if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
99 mwifiex_set_trans_start(priv->netdev);
100 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
101 }
102 return;
103}
104
105/*
106 * This function contains logic for AP packet forwarding.
107 *
108 * If a packet is multicast/broadcast, it is sent to kernel/upper layer
109 * as well as queued back to AP TX queue so that it can be sent to other
110 * associated stations.
111 * If a packet is unicast and RA is present in associated station list,
112 * it is again requeued into AP TX queue.
113 * If a packet is unicast and RA is not in associated station list,
114 * packet is forwarded to kernel to handle routing logic.
115 */
116int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
117 struct sk_buff *skb)
118{
119 struct mwifiex_adapter *adapter = priv->adapter;
120 struct uap_rxpd *uap_rx_pd;
121 struct rx_packet_hdr *rx_pkt_hdr;
122 u8 ra[ETH_ALEN];
123 struct sk_buff *skb_uap;
124
125 uap_rx_pd = (struct uap_rxpd *)(skb->data);
126 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
127
128 /* don't do packet forwarding in disconnected state */
129 if (!priv->media_connected) {
130 dev_err(adapter->dev, "drop packet in disconnected state.\n");
131 dev_kfree_skb_any(skb);
132 return 0;
133 }
134
135 memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
136
137 if (is_multicast_ether_addr(ra)) {
138 skb_uap = skb_copy(skb, GFP_ATOMIC);
139 mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
140 } else {
141 if (mwifiex_get_sta_entry(priv, ra)) {
142 /* Requeue Intra-BSS packet */
143 mwifiex_uap_queue_bridged_pkt(priv, skb);
144 return 0;
145 }
146 }
147
148 /* Forward unicat/Inter-BSS packets to kernel. */
149 return mwifiex_process_rx_packet(adapter, skb);
150}
151
152/*
153 * This function processes the packet received on AP interface.
154 *
155 * The function looks into the RxPD and performs sanity tests on the
156 * received buffer to ensure its a valid packet before processing it
157 * further. If the packet is determined to be aggregated, it is
158 * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
159 *
160 * The completion callback is called after processing is complete.
161 */
162int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
163 struct sk_buff *skb)
164{
165 int ret;
166 struct uap_rxpd *uap_rx_pd;
167 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
168 struct rx_packet_hdr *rx_pkt_hdr;
169 u16 rx_pkt_type;
170 u8 ta[ETH_ALEN], pkt_type;
171 struct mwifiex_sta_node *node;
172
173 struct mwifiex_private *priv =
174 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
175 rx_info->bss_type);
176
177 if (!priv)
178 return -1;
179
180 uap_rx_pd = (struct uap_rxpd *)(skb->data);
181 rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
182 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
183
184 if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
185 le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
186 dev_err(adapter->dev,
187 "wrong rx packet: len=%d, offset=%d, length=%d\n",
188 skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
189 le16_to_cpu(uap_rx_pd->rx_pkt_length));
190 priv->stats.rx_dropped++;
191
192 if (adapter->if_ops.data_complete)
193 adapter->if_ops.data_complete(adapter, skb);
194 else
195 dev_kfree_skb_any(skb);
196
197 return 0;
198 }
199
200 if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
201 struct sk_buff_head list;
202 struct sk_buff *rx_skb;
203
204 __skb_queue_head_init(&list);
205 skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
206 skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
207
208 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
209 priv->wdev->iftype, 0, false);
210
211 while (!skb_queue_empty(&list)) {
212 rx_skb = __skb_dequeue(&list);
213 ret = mwifiex_recv_packet(adapter, rx_skb);
214 if (ret)
215 dev_err(adapter->dev,
216 "AP:Rx A-MSDU failed");
217 }
218
219 return 0;
220 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
221 ret = mwifiex_process_mgmt_packet(adapter, skb);
222 if (ret)
223 dev_err(adapter->dev, "Rx of mgmt packet failed");
224 dev_kfree_skb_any(skb);
225 return ret;
226 }
227
228 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
229
230 if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
231 node = mwifiex_get_sta_entry(priv, ta);
232 if (node)
233 node->rx_seq[uap_rx_pd->priority] =
234 le16_to_cpu(uap_rx_pd->seq_num);
235 }
236
237 if (!priv->ap_11n_enabled ||
238 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
239 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
240 ret = mwifiex_handle_uap_rx_forward(priv, skb);
241 return ret;
242 }
243
244 /* Reorder and send to kernel */
245 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
246 ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
247 uap_rx_pd->priority, ta, pkt_type,
248 skb);
249
250 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
251 if (adapter->if_ops.data_complete)
252 adapter->if_ops.data_complete(adapter, skb);
253 else
254 dev_kfree_skb_any(skb);
255 }
256
257 if (ret)
258 priv->stats.rx_dropped++;
259
260 return ret;
261}
262
263/*
264 * This function fills the TxPD for AP tx packets.
265 *
266 * The Tx buffer received by this function should already have the
267 * header space allocated for TxPD.
268 *
269 * This function inserts the TxPD in between interface header and actual
270 * data and adjusts the buffer pointers accordingly.
271 *
272 * The following TxPD fields are set by this function, as required -
273 * - BSS number
274 * - Tx packet length and offset
275 * - Priority
276 * - Packet delay
277 * - Priority specific Tx control
278 * - Flags
279 */
280void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
281 struct sk_buff *skb)
282{
283 struct mwifiex_adapter *adapter = priv->adapter;
284 struct uap_txpd *txpd;
285 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
286 int pad, len;
287 u16 pkt_type;
288
289 if (!skb->len) {
290 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
291 tx_info->status_code = -1;
292 return skb->data;
293 }
294
295 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
296
297 /* If skb->data is not aligned, add padding */
298 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
299
300 len = sizeof(*txpd) + pad;
301
302 BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN);
303
304 skb_push(skb, len);
305
306 txpd = (struct uap_txpd *)skb->data;
307 memset(txpd, 0, sizeof(*txpd));
308 txpd->bss_num = priv->bss_num;
309 txpd->bss_type = priv->bss_type;
310 txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
311
312 txpd->priority = (u8)skb->priority;
313 txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
314
315 if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
316 /*
317 * Set the priority specific tx_control field, setting of 0 will
318 * cause the default value to be used later in this function.
319 */
320 txpd->tx_control =
321 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
322
323 /* Offset of actual data */
324 if (pkt_type == PKT_TYPE_MGMT) {
325 /* Set the packet type and add header for management frame */
326 txpd->tx_pkt_type = cpu_to_le16(pkt_type);
327 len += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
328 }
329
330 txpd->tx_pkt_offset = cpu_to_le16(len);
331
332 /* make space for INTF_HEADER_LEN */
333 skb_push(skb, INTF_HEADER_LEN);
334
335 if (!txpd->tx_control)
336 /* TxCtrl set by user or default */
337 txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
338
339 return skb->data;
340}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 2864c74bdb6f..ae88f80cf86b 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -142,6 +142,46 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
142} 142}
143 143
144/* 144/*
145 * This function processes the received management packet and send it
146 * to the kernel.
147 */
148int
149mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
150 struct sk_buff *skb)
151{
152 struct rxpd *rx_pd;
153 struct mwifiex_private *priv;
154 u16 pkt_len;
155
156 if (!skb)
157 return -1;
158
159 rx_pd = (struct rxpd *)skb->data;
160 priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
161 if (!priv)
162 return -1;
163
164 skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
165 skb_pull(skb, sizeof(pkt_len));
166
167 pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
168
169 /* Remove address4 */
170 memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
171 skb->data + sizeof(struct ieee80211_hdr),
172 pkt_len - sizeof(struct ieee80211_hdr));
173
174 pkt_len -= ETH_ALEN + sizeof(pkt_len);
175 rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
176
177 cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
178 CAL_RSSI(rx_pd->snr, rx_pd->nf),
179 skb->data, pkt_len, GFP_ATOMIC);
180
181 return 0;
182}
183
184/*
145 * This function processes the received packet before sending it to the 185 * This function processes the received packet before sending it to the
146 * kernel. 186 * kernel.
147 * 187 *
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 3fa4d4176993..600d8194610e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
127 return ra_list; 127 return ra_list;
128} 128}
129 129
130/* This function returns random no between 16 and 32 to be used as threshold
131 * for no of packets after which BA setup is initiated.
132 */
133static u8 mwifiex_get_random_ba_threshold(void)
134{
135 u32 sec, usec;
136 struct timeval ba_tstamp;
137 u8 ba_threshold;
138
139 /* setup ba_packet_threshold here random number between
140 * [BA_SETUP_PACKET_OFFSET,
141 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
142 */
143
144 do_gettimeofday(&ba_tstamp);
145 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
146 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
147 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
148 + BA_SETUP_PACKET_OFFSET;
149
150 return ba_threshold;
151}
152
130/* 153/*
131 * This function allocates and adds a RA list for all TIDs 154 * This function allocates and adds a RA list for all TIDs
132 * with the given RA. 155 * with the given RA.
@@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
137 int i; 160 int i;
138 struct mwifiex_ra_list_tbl *ra_list; 161 struct mwifiex_ra_list_tbl *ra_list;
139 struct mwifiex_adapter *adapter = priv->adapter; 162 struct mwifiex_adapter *adapter = priv->adapter;
163 struct mwifiex_sta_node *node;
164 unsigned long flags;
165
166 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
167 node = mwifiex_get_sta_entry(priv, ra);
168 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
140 169
141 for (i = 0; i < MAX_NUM_TID; ++i) { 170 for (i = 0; i < MAX_NUM_TID; ++i) {
142 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); 171 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
145 if (!ra_list) 174 if (!ra_list)
146 break; 175 break;
147 176
148 if (!mwifiex_queuing_ra_based(priv)) 177 ra_list->is_11n_enabled = 0;
178 if (!mwifiex_queuing_ra_based(priv)) {
149 ra_list->is_11n_enabled = IS_11N_ENABLED(priv); 179 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
150 else 180 } else {
151 ra_list->is_11n_enabled = false; 181 ra_list->is_11n_enabled =
182 mwifiex_is_sta_11n_enabled(priv, node);
183 if (ra_list->is_11n_enabled)
184 ra_list->max_amsdu = node->max_amsdu;
185 }
152 186
153 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", 187 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
154 ra_list, ra_list->is_11n_enabled); 188 ra_list, ra_list->is_11n_enabled);
155 189
190 if (ra_list->is_11n_enabled) {
191 ra_list->pkt_count = 0;
192 ra_list->ba_packet_thr =
193 mwifiex_get_random_ba_threshold();
194 }
156 list_add_tail(&ra_list->list, 195 list_add_tail(&ra_list->list,
157 &priv->wmm.tid_tbl_ptr[i].ra_list); 196 &priv->wmm.tid_tbl_ptr[i].ra_list);
158 197
@@ -423,7 +462,7 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
423 for (i = 0; i < adapter->priv_num; ++i) { 462 for (i = 0; i < adapter->priv_num; ++i) {
424 priv = adapter->priv[i]; 463 priv = adapter->priv[i];
425 if (priv && atomic_read(&priv->wmm.tx_pkts_queued)) 464 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
426 return false; 465 return false;
427 } 466 }
428 467
429 return true; 468 return true;
@@ -609,7 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
609 u8 ra[ETH_ALEN], tid_down; 648 u8 ra[ETH_ALEN], tid_down;
610 unsigned long flags; 649 unsigned long flags;
611 650
612 if (!priv->media_connected) { 651 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
613 dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); 652 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
614 mwifiex_write_data_complete(adapter, skb, -1); 653 mwifiex_write_data_complete(adapter, skb, -1);
615 return; 654 return;
@@ -624,7 +663,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
624 /* In case of infra as we have already created the list during 663 /* In case of infra as we have already created the list during
625 association we just don't have to call get_queue_raptr, we will 664 association we just don't have to call get_queue_raptr, we will
626 have only 1 raptr for a tid in case of infra */ 665 have only 1 raptr for a tid in case of infra */
627 if (!mwifiex_queuing_ra_based(priv)) { 666 if (!mwifiex_queuing_ra_based(priv) &&
667 !mwifiex_is_skb_mgmt_frame(skb)) {
628 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list)) 668 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
629 ra_list = list_first_entry( 669 ra_list = list_first_entry(
630 &priv->wmm.tid_tbl_ptr[tid_down].ra_list, 670 &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
@@ -633,7 +673,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
633 ra_list = NULL; 673 ra_list = NULL;
634 } else { 674 } else {
635 memcpy(ra, skb->data, ETH_ALEN); 675 memcpy(ra, skb->data, ETH_ALEN);
636 if (ra[0] & 0x01) 676 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
637 memset(ra, 0xff, ETH_ALEN); 677 memset(ra, 0xff, ETH_ALEN);
638 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); 678 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
639 } 679 }
@@ -647,6 +687,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
647 skb_queue_tail(&ra_list->skb_head, skb); 687 skb_queue_tail(&ra_list->skb_head, skb);
648 688
649 ra_list->total_pkts_size += skb->len; 689 ra_list->total_pkts_size += skb->len;
690 ra_list->pkt_count++;
650 691
651 atomic_inc(&priv->wmm.tx_pkts_queued); 692 atomic_inc(&priv->wmm.tx_pkts_queued);
652 693
@@ -867,17 +908,16 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
867 if (adapter->bss_prio_tbl[j].bss_prio_cur == 908 if (adapter->bss_prio_tbl[j].bss_prio_cur ==
868 (struct mwifiex_bss_prio_node *) 909 (struct mwifiex_bss_prio_node *)
869 &adapter->bss_prio_tbl[j].bss_prio_head) { 910 &adapter->bss_prio_tbl[j].bss_prio_head) {
870 bssprio_node = 911 adapter->bss_prio_tbl[j].bss_prio_cur =
871 list_first_entry(&adapter->bss_prio_tbl[j] 912 list_first_entry(&adapter->bss_prio_tbl[j]
872 .bss_prio_head, 913 .bss_prio_head,
873 struct mwifiex_bss_prio_node, 914 struct mwifiex_bss_prio_node,
874 list); 915 list);
875 bssprio_head = bssprio_node;
876 } else {
877 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
878 bssprio_head = bssprio_node;
879 } 916 }
880 917
918 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
919 bssprio_head = bssprio_node;
920
881 do { 921 do {
882 priv_tmp = bssprio_node->priv; 922 priv_tmp = bssprio_node->priv;
883 hqp = &priv_tmp->wmm.highest_queued_prio; 923 hqp = &priv_tmp->wmm.highest_queued_prio;
@@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
986{ 1026{
987 int count = 0, total_size = 0; 1027 int count = 0, total_size = 0;
988 struct sk_buff *skb, *tmp; 1028 struct sk_buff *skb, *tmp;
1029 int max_amsdu_size;
1030
1031 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1032 ptr->is_11n_enabled)
1033 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1034 else
1035 max_amsdu_size = max_buf_size;
989 1036
990 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) { 1037 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
991 total_size += skb->len; 1038 total_size += skb->len;
992 if (total_size >= max_buf_size) 1039 if (total_size >= max_amsdu_size)
993 break; 1040 break;
994 if (++count >= MIN_NUM_AMSDU) 1041 if (++count >= MIN_NUM_AMSDU)
995 return true; 1042 return true;
@@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1050 skb_queue_tail(&ptr->skb_head, skb); 1097 skb_queue_tail(&ptr->skb_head, skb);
1051 1098
1052 ptr->total_pkts_size += skb->len; 1099 ptr->total_pkts_size += skb->len;
1100 ptr->pkt_count++;
1053 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; 1101 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1054 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1102 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1055 ra_list_flags); 1103 ra_list_flags);
@@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1231 /* ra_list_spinlock has been freed in 1279 /* ra_list_spinlock has been freed in
1232 mwifiex_send_single_packet() */ 1280 mwifiex_send_single_packet() */
1233 } else { 1281 } else {
1234 if (mwifiex_is_ampdu_allowed(priv, tid)) { 1282 if (mwifiex_is_ampdu_allowed(priv, tid) &&
1283 ptr->pkt_count > ptr->ba_packet_thr) {
1235 if (mwifiex_space_avail_for_new_ba_stream(adapter)) { 1284 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1236 mwifiex_create_ba_tbl(priv, ptr->ra, tid, 1285 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1237 BA_SETUP_INPROGRESS); 1286 BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 224e03ade145..5099e5375cb3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
1830} 1830}
1831 1831
1832static void 1832static void
1833mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1833mwl8k_txq_xmit(struct ieee80211_hw *hw,
1834 int index,
1835 struct ieee80211_sta *sta,
1836 struct sk_buff *skb)
1834{ 1837{
1835 struct mwl8k_priv *priv = hw->priv; 1838 struct mwl8k_priv *priv = hw->priv;
1836 struct ieee80211_tx_info *tx_info; 1839 struct ieee80211_tx_info *tx_info;
1837 struct mwl8k_vif *mwl8k_vif; 1840 struct mwl8k_vif *mwl8k_vif;
1838 struct ieee80211_sta *sta;
1839 struct ieee80211_hdr *wh; 1841 struct ieee80211_hdr *wh;
1840 struct mwl8k_tx_queue *txq; 1842 struct mwl8k_tx_queue *txq;
1841 struct mwl8k_tx_desc *tx; 1843 struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1867 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1869 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1868 1870
1869 tx_info = IEEE80211_SKB_CB(skb); 1871 tx_info = IEEE80211_SKB_CB(skb);
1870 sta = tx_info->control.sta;
1871 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1872 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1872 1873
1873 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1874 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
2019 tx->pkt_phys_addr = cpu_to_le32(dma); 2020 tx->pkt_phys_addr = cpu_to_le32(dma);
2020 tx->pkt_len = cpu_to_le16(skb->len); 2021 tx->pkt_len = cpu_to_le16(skb->len);
2021 tx->rate_info = 0; 2022 tx->rate_info = 0;
2022 if (!priv->ap_fw && tx_info->control.sta != NULL) 2023 if (!priv->ap_fw && sta != NULL)
2023 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; 2024 tx->peer_id = MWL8K_STA(sta)->peer_id;
2024 else 2025 else
2025 tx->peer_id = 0; 2026 tx->peer_id = 0;
2026 2027
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
4364/* 4365/*
4365 * Core driver operations. 4366 * Core driver operations.
4366 */ 4367 */
4367static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 4368static void mwl8k_tx(struct ieee80211_hw *hw,
4369 struct ieee80211_tx_control *control,
4370 struct sk_buff *skb)
4368{ 4371{
4369 struct mwl8k_priv *priv = hw->priv; 4372 struct mwl8k_priv *priv = hw->priv;
4370 int index = skb_get_queue_mapping(skb); 4373 int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
4376 return; 4379 return;
4377 } 4380 }
4378 4381
4379 mwl8k_txq_xmit(hw, index, skb); 4382 mwl8k_txq_xmit(hw, index, control->sta, skb);
4380} 4383}
4381 4384
4382static int mwl8k_start(struct ieee80211_hw *hw) 4385static int mwl8k_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 33747e131a96..3b5508f982e8 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -7,6 +7,7 @@
7#include <linux/if_arp.h> 7#include <linux/if_arp.h>
8#include <linux/wireless.h> 8#include <linux/wireless.h>
9#include <linux/ieee80211.h> 9#include <linux/ieee80211.h>
10#include <linux/etherdevice.h>
10#include <net/iw_handler.h> 11#include <net/iw_handler.h>
11#include <net/cfg80211.h> 12#include <net/cfg80211.h>
12#include <net/cfg80211-wext.h> 13#include <net/cfg80211-wext.h>
@@ -159,15 +160,13 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
159 struct orinoco_private *priv = ndev_priv(dev); 160 struct orinoco_private *priv = ndev_priv(dev);
160 int err = -EINPROGRESS; /* Call commit handler */ 161 int err = -EINPROGRESS; /* Call commit handler */
161 unsigned long flags; 162 unsigned long flags;
162 static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
163 static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
164 163
165 if (orinoco_lock(priv, &flags) != 0) 164 if (orinoco_lock(priv, &flags) != 0)
166 return -EBUSY; 165 return -EBUSY;
167 166
168 /* Enable automatic roaming - no sanity checks are needed */ 167 /* Enable automatic roaming - no sanity checks are needed */
169 if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 || 168 if (is_zero_ether_addr(ap_addr->sa_data) ||
170 memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) { 169 is_broadcast_ether_addr(ap_addr->sa_data)) {
171 priv->bssid_fixed = 0; 170 priv->bssid_fixed = 0;
172 memset(priv->desired_bssid, 0, ETH_ALEN); 171 memset(priv->desired_bssid, 0, ETH_ALEN);
173 172
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 14037092ba89..1ef1bfe6a9d7 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -76,6 +76,7 @@ struct p54_channel_entry {
76 u16 freq; 76 u16 freq;
77 u16 data; 77 u16 data;
78 int index; 78 int index;
79 int max_power;
79 enum ieee80211_band band; 80 enum ieee80211_band band;
80}; 81};
81 82
@@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
173 for (i = 0, j = 0; (j < list->band_channel_num[band]) && 174 for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
174 (i < list->entries); i++) { 175 (i < list->entries); i++) {
175 struct p54_channel_entry *chan = &list->channels[i]; 176 struct p54_channel_entry *chan = &list->channels[i];
177 struct ieee80211_channel *dest = &tmp->channels[j];
176 178
177 if (chan->band != band) 179 if (chan->band != band)
178 continue; 180 continue;
@@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
190 continue; 192 continue;
191 } 193 }
192 194
193 tmp->channels[j].band = chan->band; 195 dest->band = chan->band;
194 tmp->channels[j].center_freq = chan->freq; 196 dest->center_freq = chan->freq;
197 dest->max_power = chan->max_power;
195 priv->survey[*chan_num].channel = &tmp->channels[j]; 198 priv->survey[*chan_num].channel = &tmp->channels[j];
196 priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM | 199 priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
197 SURVEY_INFO_CHANNEL_TIME | 200 SURVEY_INFO_CHANNEL_TIME |
198 SURVEY_INFO_CHANNEL_TIME_BUSY | 201 SURVEY_INFO_CHANNEL_TIME_BUSY |
199 SURVEY_INFO_CHANNEL_TIME_TX; 202 SURVEY_INFO_CHANNEL_TIME_TX;
200 tmp->channels[j].hw_value = (*chan_num); 203 dest->hw_value = (*chan_num);
201 j++; 204 j++;
202 (*chan_num)++; 205 (*chan_num)++;
203 } 206 }
@@ -229,10 +232,11 @@ err_out:
229 return ret; 232 return ret;
230} 233}
231 234
232static void p54_update_channel_param(struct p54_channel_list *list, 235static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
233 u16 freq, u16 data) 236 u16 freq, u16 data)
234{ 237{
235 int band, i; 238 int i;
239 struct p54_channel_entry *entry = NULL;
236 240
237 /* 241 /*
238 * usually all lists in the eeprom are mostly sorted. 242 * usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list,
241 */ 245 */
242 for (i = list->entries; i >= 0; i--) { 246 for (i = list->entries; i >= 0; i--) {
243 if (freq == list->channels[i].freq) { 247 if (freq == list->channels[i].freq) {
244 list->channels[i].data |= data; 248 entry = &list->channels[i];
245 break; 249 break;
246 } 250 }
247 } 251 }
248 252
249 if ((i < 0) && (list->entries < list->max_entries)) { 253 if ((i < 0) && (list->entries < list->max_entries)) {
250 /* entry does not exist yet. Initialize a new one. */ 254 /* entry does not exist yet. Initialize a new one. */
251 band = p54_get_band_from_freq(freq); 255 int band = p54_get_band_from_freq(freq);
252 256
253 /* 257 /*
254 * filter out frequencies which don't belong into 258 * filter out frequencies which don't belong into
255 * any supported band. 259 * any supported band.
256 */ 260 */
257 if (band < 0) 261 if (band >= 0) {
258 return ; 262 i = list->entries++;
263 list->band_channel_num[band]++;
264
265 entry = &list->channels[i];
266 entry->freq = freq;
267 entry->band = band;
268 entry->index = ieee80211_frequency_to_channel(freq);
269 entry->max_power = 0;
270 entry->data = 0;
271 }
272 }
259 273
260 i = list->entries++; 274 if (entry)
261 list->band_channel_num[band]++; 275 entry->data |= data;
262 276
263 list->channels[i].freq = freq; 277 return entry;
264 list->channels[i].data = data; 278}
265 list->channels[i].band = band; 279
266 list->channels[i].index = ieee80211_frequency_to_channel(freq); 280static int p54_get_maxpower(struct p54_common *priv, void *data)
267 /* TODO: parse output_limit and fill max_power */ 281{
282 switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
283 case PDR_SYNTH_FRONTEND_LONGBOW: {
284 struct pda_channel_output_limit_longbow *pda = data;
285 int j;
286 u16 rawpower = 0;
287 pda = data;
288 for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
289 struct pda_channel_output_limit_point_longbow *point =
290 &pda->point[j];
291 rawpower = max_t(u16,
292 rawpower, le16_to_cpu(point->val_qpsk));
293 rawpower = max_t(u16,
294 rawpower, le16_to_cpu(point->val_bpsk));
295 rawpower = max_t(u16,
296 rawpower, le16_to_cpu(point->val_16qam));
297 rawpower = max_t(u16,
298 rawpower, le16_to_cpu(point->val_64qam));
299 }
300 /* longbow seems to use 1/16 dBm units */
301 return rawpower / 16;
302 }
303
304 case PDR_SYNTH_FRONTEND_DUETTE3:
305 case PDR_SYNTH_FRONTEND_DUETTE2:
306 case PDR_SYNTH_FRONTEND_FRISBEE:
307 case PDR_SYNTH_FRONTEND_XBOW: {
308 struct pda_channel_output_limit *pda = data;
309 u8 rawpower = 0;
310 rawpower = max(rawpower, pda->val_qpsk);
311 rawpower = max(rawpower, pda->val_bpsk);
312 rawpower = max(rawpower, pda->val_16qam);
313 rawpower = max(rawpower, pda->val_64qam);
314 /* raw values are in 1/4 dBm units */
315 return rawpower / 4;
316 }
317
318 default:
319 return 20;
268 } 320 }
269} 321}
270 322
@@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
315 } 367 }
316 368
317 if (i < priv->output_limit->entries) { 369 if (i < priv->output_limit->entries) {
318 freq = le16_to_cpup((__le16 *) (i * 370 struct p54_channel_entry *tmp;
319 priv->output_limit->entry_size + 371
320 priv->output_limit->offset + 372 void *data = (void *) ((unsigned long) i *
321 priv->output_limit->data)); 373 priv->output_limit->entry_size +
322 374 priv->output_limit->offset +
323 p54_update_channel_param(list, freq, CHAN_HAS_LIMIT); 375 priv->output_limit->data);
376
377 freq = le16_to_cpup((__le16 *) data);
378 tmp = p54_update_channel_param(list, freq,
379 CHAN_HAS_LIMIT);
380 if (tmp) {
381 tmp->max_power = p54_get_maxpower(priv, data);
382 }
324 } 383 }
325 384
326 if (i < priv->curve_data->entries) { 385 if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@ good_eeprom:
834 goto err; 893 goto err;
835 } 894 }
836 895
896 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
897
837 err = p54_generate_channel_lists(dev); 898 err = p54_generate_channel_lists(dev);
838 if (err) 899 if (err)
839 goto err; 900 goto err;
840 901
841 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
842 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) 902 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
843 p54_init_xbow_synth(priv); 903 p54_init_xbow_synth(priv);
844 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) 904 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index afde72b84606..20ebe39a3f4e 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -57,6 +57,18 @@ struct pda_channel_output_limit {
57 u8 rate_set_size; 57 u8 rate_set_size;
58} __packed; 58} __packed;
59 59
60struct pda_channel_output_limit_point_longbow {
61 __le16 val_bpsk;
62 __le16 val_qpsk;
63 __le16 val_16qam;
64 __le16 val_64qam;
65} __packed;
66
67struct pda_channel_output_limit_longbow {
68 __le16 freq;
69 struct pda_channel_output_limit_point_longbow point[3];
70} __packed;
71
60struct pda_pa_curve_data_sample_rev0 { 72struct pda_pa_curve_data_sample_rev0 {
61 u8 rf_power; 73 u8 rf_power;
62 u8 pa_detector; 74 u8 pa_detector;
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 3d8d622bec55..de1d46bf97df 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
526void p54_unregister_leds(struct p54_common *priv); 526void p54_unregister_leds(struct p54_common *priv);
527 527
528/* xmit functions */ 528/* xmit functions */
529void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); 529void p54_tx_80211(struct ieee80211_hw *dev,
530 struct ieee80211_tx_control *control,
531 struct sk_buff *skb);
530int p54_tx_cancel(struct p54_common *priv, __le32 req_id); 532int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
531void p54_tx(struct p54_common *priv, struct sk_buff *skb); 533void p54_tx(struct p54_common *priv, struct sk_buff *skb);
532 534
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 7cffea795ad2..aadda99989c0 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -139,6 +139,7 @@ static int p54_beacon_format_ie_tim(struct sk_buff *skb)
139static int p54_beacon_update(struct p54_common *priv, 139static int p54_beacon_update(struct p54_common *priv,
140 struct ieee80211_vif *vif) 140 struct ieee80211_vif *vif)
141{ 141{
142 struct ieee80211_tx_control control = { };
142 struct sk_buff *beacon; 143 struct sk_buff *beacon;
143 int ret; 144 int ret;
144 145
@@ -158,7 +159,7 @@ static int p54_beacon_update(struct p54_common *priv,
158 * to cancel the old beacon template by hand, instead the firmware 159 * to cancel the old beacon template by hand, instead the firmware
159 * will release the previous one through the feedback mechanism. 160 * will release the previous one through the feedback mechanism.
160 */ 161 */
161 p54_tx_80211(priv->hw, beacon); 162 p54_tx_80211(priv->hw, &control, beacon);
162 priv->tsf_high32 = 0; 163 priv->tsf_high32 = 0;
163 priv->tsf_low32 = 0; 164 priv->tsf_low32 = 0;
164 165
@@ -514,6 +515,17 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
514 if (modparam_nohwcrypt) 515 if (modparam_nohwcrypt)
515 return -EOPNOTSUPP; 516 return -EOPNOTSUPP;
516 517
518 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
519 /*
520 * Unfortunately most/all firmwares are trying to decrypt
521 * incoming management frames if a suitable key can be found.
522 * However, in doing so the data in these frames gets
523 * corrupted. So, we can't have firmware supported crypto
524 * offload in this case.
525 */
526 return -EOPNOTSUPP;
527 }
528
517 mutex_lock(&priv->conf_mutex); 529 mutex_lock(&priv->conf_mutex);
518 if (cmd == SET_KEY) { 530 if (cmd == SET_KEY) {
519 switch (key->cipher) { 531 switch (key->cipher) {
@@ -737,6 +749,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
737 IEEE80211_HW_SIGNAL_DBM | 749 IEEE80211_HW_SIGNAL_DBM |
738 IEEE80211_HW_SUPPORTS_PS | 750 IEEE80211_HW_SUPPORTS_PS |
739 IEEE80211_HW_PS_NULLFUNC_STACK | 751 IEEE80211_HW_PS_NULLFUNC_STACK |
752 IEEE80211_HW_MFP_CAPABLE |
740 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 753 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
741 754
742 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 755 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 89318adc8c7f..b4390797d78c 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev)
488 return 0; 488 return 0;
489} 489}
490 490
491static void p54p_firmware_step2(const struct firmware *fw,
492 void *context)
493{
494 struct p54p_priv *priv = context;
495 struct ieee80211_hw *dev = priv->common.hw;
496 struct pci_dev *pdev = priv->pdev;
497 int err;
498
499 if (!fw) {
500 dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
501 err = -ENOENT;
502 goto out;
503 }
504
505 priv->firmware = fw;
506
507 err = p54p_open(dev);
508 if (err)
509 goto out;
510 err = p54_read_eeprom(dev);
511 p54p_stop(dev);
512 if (err)
513 goto out;
514
515 err = p54_register_common(dev, &pdev->dev);
516 if (err)
517 goto out;
518
519out:
520
521 complete(&priv->fw_loaded);
522
523 if (err) {
524 struct device *parent = pdev->dev.parent;
525
526 if (parent)
527 device_lock(parent);
528
529 /*
530 * This will indirectly result in a call to p54p_remove.
531 * Hence, we don't need to bother with freeing any
532 * allocated ressources at all.
533 */
534 device_release_driver(&pdev->dev);
535
536 if (parent)
537 device_unlock(parent);
538 }
539
540 pci_dev_put(pdev);
541}
542
491static int __devinit p54p_probe(struct pci_dev *pdev, 543static int __devinit p54p_probe(struct pci_dev *pdev,
492 const struct pci_device_id *id) 544 const struct pci_device_id *id)
493{ 545{
@@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
496 unsigned long mem_addr, mem_len; 548 unsigned long mem_addr, mem_len;
497 int err; 549 int err;
498 550
551 pci_dev_get(pdev);
499 err = pci_enable_device(pdev); 552 err = pci_enable_device(pdev);
500 if (err) { 553 if (err) {
501 dev_err(&pdev->dev, "Cannot enable new PCI device\n"); 554 dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
537 priv = dev->priv; 590 priv = dev->priv;
538 priv->pdev = pdev; 591 priv->pdev = pdev;
539 592
593 init_completion(&priv->fw_loaded);
540 SET_IEEE80211_DEV(dev, &pdev->dev); 594 SET_IEEE80211_DEV(dev, &pdev->dev);
541 pci_set_drvdata(pdev, dev); 595 pci_set_drvdata(pdev, dev);
542 596
@@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
561 spin_lock_init(&priv->lock); 615 spin_lock_init(&priv->lock);
562 tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); 616 tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
563 617
564 err = request_firmware(&priv->firmware, "isl3886pci", 618 err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
565 &priv->pdev->dev); 619 &priv->pdev->dev, GFP_KERNEL,
566 if (err) { 620 priv, p54p_firmware_step2);
567 dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); 621 if (!err)
568 err = request_firmware(&priv->firmware, "isl3886", 622 return 0;
569 &priv->pdev->dev);
570 if (err)
571 goto err_free_common;
572 }
573
574 err = p54p_open(dev);
575 if (err)
576 goto err_free_common;
577 err = p54_read_eeprom(dev);
578 p54p_stop(dev);
579 if (err)
580 goto err_free_common;
581
582 err = p54_register_common(dev, &pdev->dev);
583 if (err)
584 goto err_free_common;
585
586 return 0;
587 623
588 err_free_common:
589 release_firmware(priv->firmware);
590 pci_free_consistent(pdev, sizeof(*priv->ring_control), 624 pci_free_consistent(pdev, sizeof(*priv->ring_control),
591 priv->ring_control, priv->ring_control_dma); 625 priv->ring_control, priv->ring_control_dma);
592 626
@@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
601 pci_release_regions(pdev); 635 pci_release_regions(pdev);
602 err_disable_dev: 636 err_disable_dev:
603 pci_disable_device(pdev); 637 pci_disable_device(pdev);
638 pci_dev_put(pdev);
604 return err; 639 return err;
605} 640}
606 641
@@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
612 if (!dev) 647 if (!dev)
613 return; 648 return;
614 649
615 p54_unregister_common(dev);
616 priv = dev->priv; 650 priv = dev->priv;
651 wait_for_completion(&priv->fw_loaded);
652 p54_unregister_common(dev);
617 release_firmware(priv->firmware); 653 release_firmware(priv->firmware);
618 pci_free_consistent(pdev, sizeof(*priv->ring_control), 654 pci_free_consistent(pdev, sizeof(*priv->ring_control),
619 priv->ring_control, priv->ring_control_dma); 655 priv->ring_control, priv->ring_control_dma);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 7aa509f7e387..68405c142f97 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -105,6 +105,7 @@ struct p54p_priv {
105 struct sk_buff *tx_buf_data[32]; 105 struct sk_buff *tx_buf_data[32];
106 struct sk_buff *tx_buf_mgmt[4]; 106 struct sk_buff *tx_buf_mgmt[4];
107 struct completion boot_comp; 107 struct completion boot_comp;
108 struct completion fw_loaded;
108}; 109};
109 110
110#endif /* P54USB_H */ 111#endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f38786e02623..5861e13a6fd8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
676EXPORT_SYMBOL_GPL(p54_rx); 676EXPORT_SYMBOL_GPL(p54_rx);
677 677
678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, 678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
679 struct ieee80211_tx_info *info, u8 *queue, 679 struct ieee80211_tx_info *info,
680 u32 *extra_len, u16 *flags, u16 *aid, 680 struct ieee80211_sta *sta,
681 u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
681 bool *burst_possible) 682 bool *burst_possible)
682{ 683{
683 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
746 } 747 }
747 } 748 }
748 749
749 if (info->control.sta) 750 if (sta)
750 *aid = info->control.sta->aid; 751 *aid = sta->aid;
751 break; 752 break;
752 } 753 }
753} 754}
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
767 } 768 }
768} 769}
769 770
770void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) 771void p54_tx_80211(struct ieee80211_hw *dev,
772 struct ieee80211_tx_control *control,
773 struct sk_buff *skb)
771{ 774{
772 struct p54_common *priv = dev->priv; 775 struct p54_common *priv = dev->priv;
773 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
784 u8 nrates = 0, nremaining = 8; 787 u8 nrates = 0, nremaining = 8;
785 bool burst_allowed = false; 788 bool burst_allowed = false;
786 789
787 p54_tx_80211_header(priv, skb, info, &queue, &extra_len, 790 p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
788 &hdr_flags, &aid, &burst_allowed); 791 &hdr_flags, &aid, &burst_allowed);
789 792
790 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 793 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 7a4ae9ee1c63..bd1f0cb56085 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1959,9 +1959,6 @@ static int rndis_scan(struct wiphy *wiphy,
1959 */ 1959 */
1960 rndis_check_bssid_list(usbdev, NULL, NULL); 1960 rndis_check_bssid_list(usbdev, NULL, NULL);
1961 1961
1962 if (!request)
1963 return -EINVAL;
1964
1965 if (priv->scan_request && priv->scan_request != request) 1962 if (priv->scan_request && priv->scan_request != request)
1966 return -EBUSY; 1963 return -EBUSY;
1967 1964
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 64328af496f5..e3a2d9070cf6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -205,7 +205,7 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
205 u32 reg; 205 u32 reg;
206 206
207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
208 return rt2x00_get_field32(reg, GPIOCSR_BIT0); 208 return rt2x00_get_field32(reg, GPIOCSR_VAL0);
209} 209}
210 210
211#ifdef CONFIG_RT2X00_LIB_LEDS 211#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1629,7 +1629,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1629 * rfkill switch GPIO pin correctly. 1629 * rfkill switch GPIO pin correctly.
1630 */ 1630 */
1631 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 1631 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
1632 rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1); 1632 rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
1633 rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); 1633 rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
1634 1634
1635 /* 1635 /*
@@ -1789,7 +1789,6 @@ static const struct data_queue_desc rt2400pci_queue_atim = {
1789 1789
1790static const struct rt2x00_ops rt2400pci_ops = { 1790static const struct rt2x00_ops rt2400pci_ops = {
1791 .name = KBUILD_MODNAME, 1791 .name = KBUILD_MODNAME,
1792 .max_sta_intf = 1,
1793 .max_ap_intf = 1, 1792 .max_ap_intf = 1,
1794 .eeprom_size = EEPROM_SIZE, 1793 .eeprom_size = EEPROM_SIZE,
1795 .rf_size = RF_SIZE, 1794 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index 7564ae992b73..e4b07f0aa3cc 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -660,17 +660,26 @@
660 660
661/* 661/*
662 * GPIOCSR: GPIO control register. 662 * GPIOCSR: GPIO control register.
663 * GPIOCSR_VALx: Actual GPIO pin x value
664 * GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
663 */ 665 */
664#define GPIOCSR 0x0120 666#define GPIOCSR 0x0120
665#define GPIOCSR_BIT0 FIELD32(0x00000001) 667#define GPIOCSR_VAL0 FIELD32(0x00000001)
666#define GPIOCSR_BIT1 FIELD32(0x00000002) 668#define GPIOCSR_VAL1 FIELD32(0x00000002)
667#define GPIOCSR_BIT2 FIELD32(0x00000004) 669#define GPIOCSR_VAL2 FIELD32(0x00000004)
668#define GPIOCSR_BIT3 FIELD32(0x00000008) 670#define GPIOCSR_VAL3 FIELD32(0x00000008)
669#define GPIOCSR_BIT4 FIELD32(0x00000010) 671#define GPIOCSR_VAL4 FIELD32(0x00000010)
670#define GPIOCSR_BIT5 FIELD32(0x00000020) 672#define GPIOCSR_VAL5 FIELD32(0x00000020)
671#define GPIOCSR_BIT6 FIELD32(0x00000040) 673#define GPIOCSR_VAL6 FIELD32(0x00000040)
672#define GPIOCSR_BIT7 FIELD32(0x00000080) 674#define GPIOCSR_VAL7 FIELD32(0x00000080)
673#define GPIOCSR_BIT8 FIELD32(0x00000100) 675#define GPIOCSR_DIR0 FIELD32(0x00000100)
676#define GPIOCSR_DIR1 FIELD32(0x00000200)
677#define GPIOCSR_DIR2 FIELD32(0x00000400)
678#define GPIOCSR_DIR3 FIELD32(0x00000800)
679#define GPIOCSR_DIR4 FIELD32(0x00001000)
680#define GPIOCSR_DIR5 FIELD32(0x00002000)
681#define GPIOCSR_DIR6 FIELD32(0x00004000)
682#define GPIOCSR_DIR7 FIELD32(0x00008000)
674 683
675/* 684/*
676 * BBPPCSR: BBP Pin control register. 685 * BBPPCSR: BBP Pin control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 3de0406735f6..479d756e275b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -205,7 +205,7 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
205 u32 reg; 205 u32 reg;
206 206
207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
208 return rt2x00_get_field32(reg, GPIOCSR_BIT0); 208 return rt2x00_get_field32(reg, GPIOCSR_VAL0);
209} 209}
210 210
211#ifdef CONFIG_RT2X00_LIB_LEDS 211#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2081,7 +2081,6 @@ static const struct data_queue_desc rt2500pci_queue_atim = {
2081 2081
2082static const struct rt2x00_ops rt2500pci_ops = { 2082static const struct rt2x00_ops rt2500pci_ops = {
2083 .name = KBUILD_MODNAME, 2083 .name = KBUILD_MODNAME,
2084 .max_sta_intf = 1,
2085 .max_ap_intf = 1, 2084 .max_ap_intf = 1,
2086 .eeprom_size = EEPROM_SIZE, 2085 .eeprom_size = EEPROM_SIZE,
2087 .rf_size = RF_SIZE, 2086 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 2aad7ba8a100..9c10068e4987 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -789,16 +789,18 @@
789 789
790/* 790/*
791 * GPIOCSR: GPIO control register. 791 * GPIOCSR: GPIO control register.
792 * GPIOCSR_VALx: GPIO value
793 * GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
792 */ 794 */
793#define GPIOCSR 0x0120 795#define GPIOCSR 0x0120
794#define GPIOCSR_BIT0 FIELD32(0x00000001) 796#define GPIOCSR_VAL0 FIELD32(0x00000001)
795#define GPIOCSR_BIT1 FIELD32(0x00000002) 797#define GPIOCSR_VAL1 FIELD32(0x00000002)
796#define GPIOCSR_BIT2 FIELD32(0x00000004) 798#define GPIOCSR_VAL2 FIELD32(0x00000004)
797#define GPIOCSR_BIT3 FIELD32(0x00000008) 799#define GPIOCSR_VAL3 FIELD32(0x00000008)
798#define GPIOCSR_BIT4 FIELD32(0x00000010) 800#define GPIOCSR_VAL4 FIELD32(0x00000010)
799#define GPIOCSR_BIT5 FIELD32(0x00000020) 801#define GPIOCSR_VAL5 FIELD32(0x00000020)
800#define GPIOCSR_BIT6 FIELD32(0x00000040) 802#define GPIOCSR_VAL6 FIELD32(0x00000040)
801#define GPIOCSR_BIT7 FIELD32(0x00000080) 803#define GPIOCSR_VAL7 FIELD32(0x00000080)
802#define GPIOCSR_DIR0 FIELD32(0x00000100) 804#define GPIOCSR_DIR0 FIELD32(0x00000100)
803#define GPIOCSR_DIR1 FIELD32(0x00000200) 805#define GPIOCSR_DIR1 FIELD32(0x00000200)
804#define GPIOCSR_DIR2 FIELD32(0x00000400) 806#define GPIOCSR_DIR2 FIELD32(0x00000400)
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 89fee311d8fd..a12e84f892be 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
283 u16 reg; 283 u16 reg;
284 284
285 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); 285 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
286 return rt2x00_get_field16(reg, MAC_CSR19_BIT7); 286 return rt2x00_get_field16(reg, MAC_CSR19_VAL7);
287} 287}
288 288
289#ifdef CONFIG_RT2X00_LIB_LEDS 289#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1786,7 +1786,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1786 * rfkill switch GPIO pin correctly. 1786 * rfkill switch GPIO pin correctly.
1787 */ 1787 */
1788 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); 1788 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
1789 rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0); 1789 rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0);
1790 rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); 1790 rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
1791 1791
1792 /* 1792 /*
@@ -1896,7 +1896,6 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
1896 1896
1897static const struct rt2x00_ops rt2500usb_ops = { 1897static const struct rt2x00_ops rt2500usb_ops = {
1898 .name = KBUILD_MODNAME, 1898 .name = KBUILD_MODNAME,
1899 .max_sta_intf = 1,
1900 .max_ap_intf = 1, 1899 .max_ap_intf = 1,
1901 .eeprom_size = EEPROM_SIZE, 1900 .eeprom_size = EEPROM_SIZE,
1902 .rf_size = RF_SIZE, 1901 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 196bd5103e4f..1b91a4cef965 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -187,17 +187,26 @@
187 187
188/* 188/*
189 * MAC_CSR19: GPIO control register. 189 * MAC_CSR19: GPIO control register.
190 * MAC_CSR19_VALx: GPIO value
191 * MAC_CSR19_DIRx: GPIO direction: 0 = input; 1 = output
190 */ 192 */
191#define MAC_CSR19 0x0426 193#define MAC_CSR19 0x0426
192#define MAC_CSR19_BIT0 FIELD16(0x0001) 194#define MAC_CSR19_VAL0 FIELD16(0x0001)
193#define MAC_CSR19_BIT1 FIELD16(0x0002) 195#define MAC_CSR19_VAL1 FIELD16(0x0002)
194#define MAC_CSR19_BIT2 FIELD16(0x0004) 196#define MAC_CSR19_VAL2 FIELD16(0x0004)
195#define MAC_CSR19_BIT3 FIELD16(0x0008) 197#define MAC_CSR19_VAL3 FIELD16(0x0008)
196#define MAC_CSR19_BIT4 FIELD16(0x0010) 198#define MAC_CSR19_VAL4 FIELD16(0x0010)
197#define MAC_CSR19_BIT5 FIELD16(0x0020) 199#define MAC_CSR19_VAL5 FIELD16(0x0020)
198#define MAC_CSR19_BIT6 FIELD16(0x0040) 200#define MAC_CSR19_VAL6 FIELD16(0x0040)
199#define MAC_CSR19_BIT7 FIELD16(0x0080) 201#define MAC_CSR19_VAL7 FIELD16(0x0080)
200#define MAC_CSR19_BIT8 FIELD16(0x0100) 202#define MAC_CSR19_DIR0 FIELD16(0x0100)
203#define MAC_CSR19_DIR1 FIELD16(0x0200)
204#define MAC_CSR19_DIR2 FIELD16(0x0400)
205#define MAC_CSR19_DIR3 FIELD16(0x0800)
206#define MAC_CSR19_DIR4 FIELD16(0x1000)
207#define MAC_CSR19_DIR5 FIELD16(0x2000)
208#define MAC_CSR19_DIR6 FIELD16(0x4000)
209#define MAC_CSR19_DIR7 FIELD16(0x8000)
201 210
202/* 211/*
203 * MAC_CSR20: LED control register. 212 * MAC_CSR20: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index e252e9bafd0e..6d67c3ede651 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -439,26 +439,33 @@
439#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000) 439#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
440 440
441/* 441/*
442 * GPIO_CTRL_CFG: 442 * GPIO_CTRL:
443 * GPIOD: GPIO direction, 0: Output, 1: Input 443 * GPIO_CTRL_VALx: GPIO value
444 */ 444 * GPIO_CTRL_DIRx: GPIO direction: 0 = output; 1 = input
445#define GPIO_CTRL_CFG 0x0228 445 */
446#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001) 446#define GPIO_CTRL 0x0228
447#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002) 447#define GPIO_CTRL_VAL0 FIELD32(0x00000001)
448#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004) 448#define GPIO_CTRL_VAL1 FIELD32(0x00000002)
449#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008) 449#define GPIO_CTRL_VAL2 FIELD32(0x00000004)
450#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010) 450#define GPIO_CTRL_VAL3 FIELD32(0x00000008)
451#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020) 451#define GPIO_CTRL_VAL4 FIELD32(0x00000010)
452#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040) 452#define GPIO_CTRL_VAL5 FIELD32(0x00000020)
453#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080) 453#define GPIO_CTRL_VAL6 FIELD32(0x00000040)
454#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100) 454#define GPIO_CTRL_VAL7 FIELD32(0x00000080)
455#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200) 455#define GPIO_CTRL_DIR0 FIELD32(0x00000100)
456#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400) 456#define GPIO_CTRL_DIR1 FIELD32(0x00000200)
457#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800) 457#define GPIO_CTRL_DIR2 FIELD32(0x00000400)
458#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000) 458#define GPIO_CTRL_DIR3 FIELD32(0x00000800)
459#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000) 459#define GPIO_CTRL_DIR4 FIELD32(0x00001000)
460#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000) 460#define GPIO_CTRL_DIR5 FIELD32(0x00002000)
461#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000) 461#define GPIO_CTRL_DIR6 FIELD32(0x00004000)
462#define GPIO_CTRL_DIR7 FIELD32(0x00008000)
463#define GPIO_CTRL_VAL8 FIELD32(0x00010000)
464#define GPIO_CTRL_VAL9 FIELD32(0x00020000)
465#define GPIO_CTRL_VAL10 FIELD32(0x00040000)
466#define GPIO_CTRL_DIR8 FIELD32(0x01000000)
467#define GPIO_CTRL_DIR9 FIELD32(0x02000000)
468#define GPIO_CTRL_DIR10 FIELD32(0x04000000)
462 469
463/* 470/*
464 * MCU_CMD_CFG 471 * MCU_CMD_CFG
@@ -1936,6 +1943,11 @@ struct mac_iveiv_entry {
1936#define BBP47_TSSI_ADC6 FIELD8(0x80) 1943#define BBP47_TSSI_ADC6 FIELD8(0x80)
1937 1944
1938/* 1945/*
1946 * BBP 49
1947 */
1948#define BBP49_UPDATE_FLAG FIELD8(0x01)
1949
1950/*
1939 * BBP 109 1951 * BBP 109
1940 */ 1952 */
1941#define BBP109_TX0_POWER FIELD8(0x0f) 1953#define BBP109_TX0_POWER FIELD8(0x0f)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b93516d832fb..540c94f8505a 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -923,8 +923,8 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
923 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg); 923 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
924 return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0); 924 return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
925 } else { 925 } else {
926 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 926 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
927 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2); 927 return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
928 } 928 }
929} 929}
930EXPORT_SYMBOL_GPL(rt2800_rfkill_poll); 930EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1570,10 +1570,10 @@ static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
1570 rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff, 1570 rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
1571 eesk_pin, 0); 1571 eesk_pin, 0);
1572 1572
1573 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 1573 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
1574 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0); 1574 rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
1575 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3); 1575 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, gpio_bit3);
1576 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 1576 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
1577} 1577}
1578 1578
1579void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) 1579void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
@@ -1615,6 +1615,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1615 case 1: 1615 case 1:
1616 if (rt2x00_rt(rt2x00dev, RT3070) || 1616 if (rt2x00_rt(rt2x00dev, RT3070) ||
1617 rt2x00_rt(rt2x00dev, RT3090) || 1617 rt2x00_rt(rt2x00dev, RT3090) ||
1618 rt2x00_rt(rt2x00dev, RT3352) ||
1618 rt2x00_rt(rt2x00dev, RT3390)) { 1619 rt2x00_rt(rt2x00dev, RT3390)) {
1619 rt2x00_eeprom_read(rt2x00dev, 1620 rt2x00_eeprom_read(rt2x00dev,
1620 EEPROM_NIC_CONF1, &eeprom); 1621 EEPROM_NIC_CONF1, &eeprom);
@@ -1762,36 +1763,15 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
1762 1763
1763 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 1764 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
1764 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); 1765 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
1766 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
1767 rt2x00dev->default_ant.rx_chain_num <= 1);
1768 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD,
1769 rt2x00dev->default_ant.rx_chain_num <= 2);
1765 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); 1770 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
1766 if (rt2x00_rt(rt2x00dev, RT3390)) { 1771 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
1767 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1772 rt2x00dev->default_ant.tx_chain_num <= 1);
1768 rt2x00dev->default_ant.rx_chain_num == 1); 1773 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD,
1769 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1774 rt2x00dev->default_ant.tx_chain_num <= 2);
1770 rt2x00dev->default_ant.tx_chain_num == 1);
1771 } else {
1772 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
1773 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
1774 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
1775 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
1776
1777 switch (rt2x00dev->default_ant.tx_chain_num) {
1778 case 1:
1779 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
1780 /* fall through */
1781 case 2:
1782 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
1783 break;
1784 }
1785
1786 switch (rt2x00dev->default_ant.rx_chain_num) {
1787 case 1:
1788 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
1789 /* fall through */
1790 case 2:
1791 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
1792 break;
1793 }
1794 }
1795 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 1775 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
1796 1776
1797 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 1777 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
@@ -1995,13 +1975,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1995 rt2800_rfcsr_write(rt2x00dev, 29, 0x9f); 1975 rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
1996 } 1976 }
1997 1977
1998 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 1978 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
1999 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0); 1979 rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
2000 if (rf->channel <= 14) 1980 if (rf->channel <= 14)
2001 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1); 1981 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
2002 else 1982 else
2003 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0); 1983 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 0);
2004 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 1984 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
2005 1985
2006 rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr); 1986 rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
2007 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); 1987 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
@@ -2053,6 +2033,60 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
2053 } 2033 }
2054} 2034}
2055 2035
2036static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
2037 struct ieee80211_conf *conf,
2038 struct rf_channel *rf,
2039 struct channel_info *info)
2040{
2041 u8 rfcsr;
2042
2043 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
2044 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
2045
2046 rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
2047 rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
2048 rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
2049
2050 if (info->default_power1 > POWER_BOUND)
2051 rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND);
2052 else
2053 rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1);
2054
2055 if (info->default_power2 > POWER_BOUND)
2056 rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND);
2057 else
2058 rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
2059
2060 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
2061 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2062 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2063 else
2064 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2065
2066 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2067
2068 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2069 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
2070 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
2071
2072 if ( rt2x00dev->default_ant.tx_chain_num == 2 )
2073 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
2074 else
2075 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
2076
2077 if ( rt2x00dev->default_ant.rx_chain_num == 2 )
2078 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
2079 else
2080 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
2081
2082 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
2083 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
2084
2085 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2086
2087 rt2800_rfcsr_write(rt2x00dev, 31, 80);
2088}
2089
2056static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, 2090static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2057 struct ieee80211_conf *conf, 2091 struct ieee80211_conf *conf,
2058 struct rf_channel *rf, 2092 struct rf_channel *rf,
@@ -2182,6 +2216,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2182 case RF3290: 2216 case RF3290:
2183 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info); 2217 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
2184 break; 2218 break;
2219 case RF3322:
2220 rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
2221 break;
2185 case RF5360: 2222 case RF5360:
2186 case RF5370: 2223 case RF5370:
2187 case RF5372: 2224 case RF5372:
@@ -2194,6 +2231,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2194 } 2231 }
2195 2232
2196 if (rt2x00_rf(rt2x00dev, RF3290) || 2233 if (rt2x00_rf(rt2x00dev, RF3290) ||
2234 rt2x00_rf(rt2x00dev, RF3322) ||
2197 rt2x00_rf(rt2x00dev, RF5360) || 2235 rt2x00_rf(rt2x00dev, RF5360) ||
2198 rt2x00_rf(rt2x00dev, RF5370) || 2236 rt2x00_rf(rt2x00dev, RF5370) ||
2199 rt2x00_rf(rt2x00dev, RF5372) || 2237 rt2x00_rf(rt2x00dev, RF5372) ||
@@ -2212,10 +2250,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2212 /* 2250 /*
2213 * Change BBP settings 2251 * Change BBP settings
2214 */ 2252 */
2215 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); 2253 if (rt2x00_rt(rt2x00dev, RT3352)) {
2216 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); 2254 rt2800_bbp_write(rt2x00dev, 27, 0x0);
2217 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); 2255 rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
2218 rt2800_bbp_write(rt2x00dev, 86, 0); 2256 rt2800_bbp_write(rt2x00dev, 27, 0x20);
2257 rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
2258 } else {
2259 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
2260 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
2261 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
2262 rt2800_bbp_write(rt2x00dev, 86, 0);
2263 }
2219 2264
2220 if (rf->channel <= 14) { 2265 if (rf->channel <= 14) {
2221 if (!rt2x00_rt(rt2x00dev, RT5390) && 2266 if (!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -2310,6 +2355,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2310 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg); 2355 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
2311 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg); 2356 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
2312 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg); 2357 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
2358
2359 /*
2360 * Clear update flag
2361 */
2362 if (rt2x00_rt(rt2x00dev, RT3352)) {
2363 rt2800_bbp_read(rt2x00dev, 49, &bbp);
2364 rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
2365 rt2800_bbp_write(rt2x00dev, 49, bbp);
2366 }
2313} 2367}
2314 2368
2315static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) 2369static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
@@ -2821,23 +2875,32 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
2821 2875
2822static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) 2876static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
2823{ 2877{
2878 u8 vgc;
2879
2824 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 2880 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
2825 if (rt2x00_rt(rt2x00dev, RT3070) || 2881 if (rt2x00_rt(rt2x00dev, RT3070) ||
2826 rt2x00_rt(rt2x00dev, RT3071) || 2882 rt2x00_rt(rt2x00dev, RT3071) ||
2827 rt2x00_rt(rt2x00dev, RT3090) || 2883 rt2x00_rt(rt2x00dev, RT3090) ||
2828 rt2x00_rt(rt2x00dev, RT3290) || 2884 rt2x00_rt(rt2x00dev, RT3290) ||
2829 rt2x00_rt(rt2x00dev, RT3390) || 2885 rt2x00_rt(rt2x00dev, RT3390) ||
2886 rt2x00_rt(rt2x00dev, RT3572) ||
2830 rt2x00_rt(rt2x00dev, RT5390) || 2887 rt2x00_rt(rt2x00dev, RT5390) ||
2831 rt2x00_rt(rt2x00dev, RT5392)) 2888 rt2x00_rt(rt2x00dev, RT5392))
2832 return 0x1c + (2 * rt2x00dev->lna_gain); 2889 vgc = 0x1c + (2 * rt2x00dev->lna_gain);
2833 else 2890 else
2834 return 0x2e + rt2x00dev->lna_gain; 2891 vgc = 0x2e + rt2x00dev->lna_gain;
2892 } else { /* 5GHZ band */
2893 if (rt2x00_rt(rt2x00dev, RT3572))
2894 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
2895 else {
2896 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
2897 vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
2898 else
2899 vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3;
2900 }
2835 } 2901 }
2836 2902
2837 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) 2903 return vgc;
2838 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
2839 else
2840 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
2841} 2904}
2842 2905
2843static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev, 2906static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
@@ -2998,11 +3061,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2998 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 3061 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
2999 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 3062 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
3000 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030); 3063 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
3064 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
3065 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
3066 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3067 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
3001 } else if (rt2x00_rt(rt2x00dev, RT3572)) { 3068 } else if (rt2x00_rt(rt2x00dev, RT3572)) {
3002 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 3069 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
3003 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3070 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3004 } else if (rt2x00_rt(rt2x00dev, RT5390) || 3071 } else if (rt2x00_rt(rt2x00dev, RT5390) ||
3005 rt2x00_rt(rt2x00dev, RT5392)) { 3072 rt2x00_rt(rt2x00dev, RT5392)) {
3006 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 3073 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
3007 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3074 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3008 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 3075 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3378,6 +3445,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3378 rt2800_wait_bbp_ready(rt2x00dev))) 3445 rt2800_wait_bbp_ready(rt2x00dev)))
3379 return -EACCES; 3446 return -EACCES;
3380 3447
3448 if (rt2x00_rt(rt2x00dev, RT3352)) {
3449 rt2800_bbp_write(rt2x00dev, 3, 0x00);
3450 rt2800_bbp_write(rt2x00dev, 4, 0x50);
3451 }
3452
3381 if (rt2x00_rt(rt2x00dev, RT3290) || 3453 if (rt2x00_rt(rt2x00dev, RT3290) ||
3382 rt2x00_rt(rt2x00dev, RT5390) || 3454 rt2x00_rt(rt2x00dev, RT5390) ||
3383 rt2x00_rt(rt2x00dev, RT5392)) { 3455 rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3388,15 +3460,20 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3388 3460
3389 if (rt2800_is_305x_soc(rt2x00dev) || 3461 if (rt2800_is_305x_soc(rt2x00dev) ||
3390 rt2x00_rt(rt2x00dev, RT3290) || 3462 rt2x00_rt(rt2x00dev, RT3290) ||
3463 rt2x00_rt(rt2x00dev, RT3352) ||
3391 rt2x00_rt(rt2x00dev, RT3572) || 3464 rt2x00_rt(rt2x00dev, RT3572) ||
3392 rt2x00_rt(rt2x00dev, RT5390) || 3465 rt2x00_rt(rt2x00dev, RT5390) ||
3393 rt2x00_rt(rt2x00dev, RT5392)) 3466 rt2x00_rt(rt2x00dev, RT5392))
3394 rt2800_bbp_write(rt2x00dev, 31, 0x08); 3467 rt2800_bbp_write(rt2x00dev, 31, 0x08);
3395 3468
3469 if (rt2x00_rt(rt2x00dev, RT3352))
3470 rt2800_bbp_write(rt2x00dev, 47, 0x48);
3471
3396 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 3472 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
3397 rt2800_bbp_write(rt2x00dev, 66, 0x38); 3473 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3398 3474
3399 if (rt2x00_rt(rt2x00dev, RT3290) || 3475 if (rt2x00_rt(rt2x00dev, RT3290) ||
3476 rt2x00_rt(rt2x00dev, RT3352) ||
3400 rt2x00_rt(rt2x00dev, RT5390) || 3477 rt2x00_rt(rt2x00dev, RT5390) ||
3401 rt2x00_rt(rt2x00dev, RT5392)) 3478 rt2x00_rt(rt2x00dev, RT5392))
3402 rt2800_bbp_write(rt2x00dev, 68, 0x0b); 3479 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
@@ -3405,6 +3482,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3405 rt2800_bbp_write(rt2x00dev, 69, 0x16); 3482 rt2800_bbp_write(rt2x00dev, 69, 0x16);
3406 rt2800_bbp_write(rt2x00dev, 73, 0x12); 3483 rt2800_bbp_write(rt2x00dev, 73, 0x12);
3407 } else if (rt2x00_rt(rt2x00dev, RT3290) || 3484 } else if (rt2x00_rt(rt2x00dev, RT3290) ||
3485 rt2x00_rt(rt2x00dev, RT3352) ||
3408 rt2x00_rt(rt2x00dev, RT5390) || 3486 rt2x00_rt(rt2x00dev, RT5390) ||
3409 rt2x00_rt(rt2x00dev, RT5392)) { 3487 rt2x00_rt(rt2x00dev, RT5392)) {
3410 rt2800_bbp_write(rt2x00dev, 69, 0x12); 3488 rt2800_bbp_write(rt2x00dev, 69, 0x12);
@@ -3436,15 +3514,17 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3436 } else if (rt2800_is_305x_soc(rt2x00dev)) { 3514 } else if (rt2800_is_305x_soc(rt2x00dev)) {
3437 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 3515 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
3438 rt2800_bbp_write(rt2x00dev, 80, 0x08); 3516 rt2800_bbp_write(rt2x00dev, 80, 0x08);
3439 } else { 3517 } else if (rt2x00_rt(rt2x00dev, RT3290)) {
3440 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3441 }
3442
3443 if (rt2x00_rt(rt2x00dev, RT3290)) {
3444 rt2800_bbp_write(rt2x00dev, 74, 0x0b); 3518 rt2800_bbp_write(rt2x00dev, 74, 0x0b);
3445 rt2800_bbp_write(rt2x00dev, 79, 0x18); 3519 rt2800_bbp_write(rt2x00dev, 79, 0x18);
3446 rt2800_bbp_write(rt2x00dev, 80, 0x09); 3520 rt2800_bbp_write(rt2x00dev, 80, 0x09);
3447 rt2800_bbp_write(rt2x00dev, 81, 0x33); 3521 rt2800_bbp_write(rt2x00dev, 81, 0x33);
3522 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
3523 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
3524 rt2800_bbp_write(rt2x00dev, 80, 0x08);
3525 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3526 } else {
3527 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3448 } 3528 }
3449 3529
3450 rt2800_bbp_write(rt2x00dev, 82, 0x62); 3530 rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -3465,18 +3545,21 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3465 rt2800_bbp_write(rt2x00dev, 84, 0x99); 3545 rt2800_bbp_write(rt2x00dev, 84, 0x99);
3466 3546
3467 if (rt2x00_rt(rt2x00dev, RT3290) || 3547 if (rt2x00_rt(rt2x00dev, RT3290) ||
3548 rt2x00_rt(rt2x00dev, RT3352) ||
3468 rt2x00_rt(rt2x00dev, RT5390) || 3549 rt2x00_rt(rt2x00dev, RT5390) ||
3469 rt2x00_rt(rt2x00dev, RT5392)) 3550 rt2x00_rt(rt2x00dev, RT5392))
3470 rt2800_bbp_write(rt2x00dev, 86, 0x38); 3551 rt2800_bbp_write(rt2x00dev, 86, 0x38);
3471 else 3552 else
3472 rt2800_bbp_write(rt2x00dev, 86, 0x00); 3553 rt2800_bbp_write(rt2x00dev, 86, 0x00);
3473 3554
3474 if (rt2x00_rt(rt2x00dev, RT5392)) 3555 if (rt2x00_rt(rt2x00dev, RT3352) ||
3556 rt2x00_rt(rt2x00dev, RT5392))
3475 rt2800_bbp_write(rt2x00dev, 88, 0x90); 3557 rt2800_bbp_write(rt2x00dev, 88, 0x90);
3476 3558
3477 rt2800_bbp_write(rt2x00dev, 91, 0x04); 3559 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3478 3560
3479 if (rt2x00_rt(rt2x00dev, RT3290) || 3561 if (rt2x00_rt(rt2x00dev, RT3290) ||
3562 rt2x00_rt(rt2x00dev, RT3352) ||
3480 rt2x00_rt(rt2x00dev, RT5390) || 3563 rt2x00_rt(rt2x00dev, RT5390) ||
3481 rt2x00_rt(rt2x00dev, RT5392)) 3564 rt2x00_rt(rt2x00dev, RT5392))
3482 rt2800_bbp_write(rt2x00dev, 92, 0x02); 3565 rt2800_bbp_write(rt2x00dev, 92, 0x02);
@@ -3493,6 +3576,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3493 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 3576 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
3494 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 3577 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
3495 rt2x00_rt(rt2x00dev, RT3290) || 3578 rt2x00_rt(rt2x00dev, RT3290) ||
3579 rt2x00_rt(rt2x00dev, RT3352) ||
3496 rt2x00_rt(rt2x00dev, RT3572) || 3580 rt2x00_rt(rt2x00dev, RT3572) ||
3497 rt2x00_rt(rt2x00dev, RT5390) || 3581 rt2x00_rt(rt2x00dev, RT5390) ||
3498 rt2x00_rt(rt2x00dev, RT5392) || 3582 rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3502,6 +3586,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3502 rt2800_bbp_write(rt2x00dev, 103, 0x00); 3586 rt2800_bbp_write(rt2x00dev, 103, 0x00);
3503 3587
3504 if (rt2x00_rt(rt2x00dev, RT3290) || 3588 if (rt2x00_rt(rt2x00dev, RT3290) ||
3589 rt2x00_rt(rt2x00dev, RT3352) ||
3505 rt2x00_rt(rt2x00dev, RT5390) || 3590 rt2x00_rt(rt2x00dev, RT5390) ||
3506 rt2x00_rt(rt2x00dev, RT5392)) 3591 rt2x00_rt(rt2x00dev, RT5392))
3507 rt2800_bbp_write(rt2x00dev, 104, 0x92); 3592 rt2800_bbp_write(rt2x00dev, 104, 0x92);
@@ -3510,6 +3595,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3510 rt2800_bbp_write(rt2x00dev, 105, 0x01); 3595 rt2800_bbp_write(rt2x00dev, 105, 0x01);
3511 else if (rt2x00_rt(rt2x00dev, RT3290)) 3596 else if (rt2x00_rt(rt2x00dev, RT3290))
3512 rt2800_bbp_write(rt2x00dev, 105, 0x1c); 3597 rt2800_bbp_write(rt2x00dev, 105, 0x1c);
3598 else if (rt2x00_rt(rt2x00dev, RT3352))
3599 rt2800_bbp_write(rt2x00dev, 105, 0x34);
3513 else if (rt2x00_rt(rt2x00dev, RT5390) || 3600 else if (rt2x00_rt(rt2x00dev, RT5390) ||
3514 rt2x00_rt(rt2x00dev, RT5392)) 3601 rt2x00_rt(rt2x00dev, RT5392))
3515 rt2800_bbp_write(rt2x00dev, 105, 0x3c); 3602 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
@@ -3519,11 +3606,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3519 if (rt2x00_rt(rt2x00dev, RT3290) || 3606 if (rt2x00_rt(rt2x00dev, RT3290) ||
3520 rt2x00_rt(rt2x00dev, RT5390)) 3607 rt2x00_rt(rt2x00dev, RT5390))
3521 rt2800_bbp_write(rt2x00dev, 106, 0x03); 3608 rt2800_bbp_write(rt2x00dev, 106, 0x03);
3609 else if (rt2x00_rt(rt2x00dev, RT3352))
3610 rt2800_bbp_write(rt2x00dev, 106, 0x05);
3522 else if (rt2x00_rt(rt2x00dev, RT5392)) 3611 else if (rt2x00_rt(rt2x00dev, RT5392))
3523 rt2800_bbp_write(rt2x00dev, 106, 0x12); 3612 rt2800_bbp_write(rt2x00dev, 106, 0x12);
3524 else 3613 else
3525 rt2800_bbp_write(rt2x00dev, 106, 0x35); 3614 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3526 3615
3616 if (rt2x00_rt(rt2x00dev, RT3352))
3617 rt2800_bbp_write(rt2x00dev, 120, 0x50);
3618
3527 if (rt2x00_rt(rt2x00dev, RT3290) || 3619 if (rt2x00_rt(rt2x00dev, RT3290) ||
3528 rt2x00_rt(rt2x00dev, RT5390) || 3620 rt2x00_rt(rt2x00dev, RT5390) ||
3529 rt2x00_rt(rt2x00dev, RT5392)) 3621 rt2x00_rt(rt2x00dev, RT5392))
@@ -3534,6 +3626,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3534 rt2800_bbp_write(rt2x00dev, 135, 0xf6); 3626 rt2800_bbp_write(rt2x00dev, 135, 0xf6);
3535 } 3627 }
3536 3628
3629 if (rt2x00_rt(rt2x00dev, RT3352))
3630 rt2800_bbp_write(rt2x00dev, 137, 0x0f);
3631
3537 if (rt2x00_rt(rt2x00dev, RT3071) || 3632 if (rt2x00_rt(rt2x00dev, RT3071) ||
3538 rt2x00_rt(rt2x00dev, RT3090) || 3633 rt2x00_rt(rt2x00dev, RT3090) ||
3539 rt2x00_rt(rt2x00dev, RT3390) || 3634 rt2x00_rt(rt2x00dev, RT3390) ||
@@ -3574,6 +3669,28 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3574 rt2800_bbp_write(rt2x00dev, 3, value); 3669 rt2800_bbp_write(rt2x00dev, 3, value);
3575 } 3670 }
3576 3671
3672 if (rt2x00_rt(rt2x00dev, RT3352)) {
3673 rt2800_bbp_write(rt2x00dev, 163, 0xbd);
3674 /* Set ITxBF timeout to 0x9c40=1000msec */
3675 rt2800_bbp_write(rt2x00dev, 179, 0x02);
3676 rt2800_bbp_write(rt2x00dev, 180, 0x00);
3677 rt2800_bbp_write(rt2x00dev, 182, 0x40);
3678 rt2800_bbp_write(rt2x00dev, 180, 0x01);
3679 rt2800_bbp_write(rt2x00dev, 182, 0x9c);
3680 rt2800_bbp_write(rt2x00dev, 179, 0x00);
3681 /* Reprogram the inband interface to put right values in RXWI */
3682 rt2800_bbp_write(rt2x00dev, 142, 0x04);
3683 rt2800_bbp_write(rt2x00dev, 143, 0x3b);
3684 rt2800_bbp_write(rt2x00dev, 142, 0x06);
3685 rt2800_bbp_write(rt2x00dev, 143, 0xa0);
3686 rt2800_bbp_write(rt2x00dev, 142, 0x07);
3687 rt2800_bbp_write(rt2x00dev, 143, 0xa1);
3688 rt2800_bbp_write(rt2x00dev, 142, 0x08);
3689 rt2800_bbp_write(rt2x00dev, 143, 0xa2);
3690
3691 rt2800_bbp_write(rt2x00dev, 148, 0xc8);
3692 }
3693
3577 if (rt2x00_rt(rt2x00dev, RT5390) || 3694 if (rt2x00_rt(rt2x00dev, RT5390) ||
3578 rt2x00_rt(rt2x00dev, RT5392)) { 3695 rt2x00_rt(rt2x00dev, RT5392)) {
3579 int ant, div_mode; 3696 int ant, div_mode;
@@ -3587,16 +3704,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3587 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 3704 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
3588 u32 reg; 3705 u32 reg;
3589 3706
3590 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 3707 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
3591 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0); 3708 rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
3592 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0); 3709 rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
3593 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0); 3710 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
3594 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0); 3711 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
3595 if (ant == 0) 3712 if (ant == 0)
3596 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1); 3713 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
3597 else if (ant == 1) 3714 else if (ant == 1)
3598 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1); 3715 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
3599 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 3716 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
3600 } 3717 }
3601 3718
3602 /* This chip has hardware antenna diversity*/ 3719 /* This chip has hardware antenna diversity*/
@@ -3707,6 +3824,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3707 !rt2x00_rt(rt2x00dev, RT3071) && 3824 !rt2x00_rt(rt2x00dev, RT3071) &&
3708 !rt2x00_rt(rt2x00dev, RT3090) && 3825 !rt2x00_rt(rt2x00dev, RT3090) &&
3709 !rt2x00_rt(rt2x00dev, RT3290) && 3826 !rt2x00_rt(rt2x00dev, RT3290) &&
3827 !rt2x00_rt(rt2x00dev, RT3352) &&
3710 !rt2x00_rt(rt2x00dev, RT3390) && 3828 !rt2x00_rt(rt2x00dev, RT3390) &&
3711 !rt2x00_rt(rt2x00dev, RT3572) && 3829 !rt2x00_rt(rt2x00dev, RT3572) &&
3712 !rt2x00_rt(rt2x00dev, RT5390) && 3830 !rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3903,6 +4021,70 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3903 rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 4021 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
3904 rt2800_rfcsr_write(rt2x00dev, 31, 0x00); 4022 rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
3905 return 0; 4023 return 0;
4024 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
4025 rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
4026 rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
4027 rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
4028 rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
4029 rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
4030 rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
4031 rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
4032 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
4033 rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
4034 rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
4035 rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
4036 rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
4037 rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
4038 rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
4039 rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
4040 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
4041 rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
4042 rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
4043 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
4044 rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
4045 rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
4046 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
4047 rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
4048 rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
4049 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
4050 rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
4051 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4052 rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
4053 rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
4054 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
4055 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
4056 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
4057 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
4058 rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
4059 rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
4060 rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
4061 rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
4062 rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
4063 rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
4064 rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
4065 rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
4066 rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
4067 rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
4068 rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
4069 rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
4070 rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
4071 rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
4072 rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
4073 rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
4074 rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
4075 rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
4076 rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
4077 rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
4078 rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
4079 rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
4080 rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
4081 rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
4082 rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
4083 rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
4084 rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
4085 rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
4086 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
4087 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
3906 } else if (rt2x00_rt(rt2x00dev, RT5390)) { 4088 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
3907 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); 4089 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
3908 rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 4090 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
@@ -4104,6 +4286,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4104 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19); 4286 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
4105 } else if (rt2x00_rt(rt2x00dev, RT3071) || 4287 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
4106 rt2x00_rt(rt2x00dev, RT3090) || 4288 rt2x00_rt(rt2x00dev, RT3090) ||
4289 rt2x00_rt(rt2x00dev, RT3352) ||
4107 rt2x00_rt(rt2x00dev, RT3390) || 4290 rt2x00_rt(rt2x00dev, RT3390) ||
4108 rt2x00_rt(rt2x00dev, RT3572)) { 4291 rt2x00_rt(rt2x00dev, RT3572)) {
4109 drv_data->calibration_bw20 = 4292 drv_data->calibration_bw20 =
@@ -4392,7 +4575,7 @@ void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
4392} 4575}
4393EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); 4576EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
4394 4577
4395int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) 4578static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4396{ 4579{
4397 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 4580 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
4398 u16 word; 4581 u16 word;
@@ -4400,6 +4583,11 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4400 u8 default_lna_gain; 4583 u8 default_lna_gain;
4401 4584
4402 /* 4585 /*
4586 * Read the EEPROM.
4587 */
4588 rt2800_read_eeprom(rt2x00dev);
4589
4590 /*
4403 * Start validation of the data that has been read. 4591 * Start validation of the data that has been read.
4404 */ 4592 */
4405 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 4593 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
@@ -4521,9 +4709,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4521 4709
4522 return 0; 4710 return 0;
4523} 4711}
4524EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
4525 4712
4526int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) 4713static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4527{ 4714{
4528 u32 reg; 4715 u32 reg;
4529 u16 value; 4716 u16 value;
@@ -4562,6 +4749,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4562 case RT3071: 4749 case RT3071:
4563 case RT3090: 4750 case RT3090:
4564 case RT3290: 4751 case RT3290:
4752 case RT3352:
4565 case RT3390: 4753 case RT3390:
4566 case RT3572: 4754 case RT3572:
4567 case RT5390: 4755 case RT5390:
@@ -4584,6 +4772,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4584 case RF3052: 4772 case RF3052:
4585 case RF3290: 4773 case RF3290:
4586 case RF3320: 4774 case RF3320:
4775 case RF3322:
4587 case RF5360: 4776 case RF5360:
4588 case RF5370: 4777 case RF5370:
4589 case RF5372: 4778 case RF5372:
@@ -4608,6 +4797,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4608 4797
4609 if (rt2x00_rt(rt2x00dev, RT3070) || 4798 if (rt2x00_rt(rt2x00dev, RT3070) ||
4610 rt2x00_rt(rt2x00dev, RT3090) || 4799 rt2x00_rt(rt2x00dev, RT3090) ||
4800 rt2x00_rt(rt2x00dev, RT3352) ||
4611 rt2x00_rt(rt2x00dev, RT3390)) { 4801 rt2x00_rt(rt2x00dev, RT3390)) {
4612 value = rt2x00_get_field16(eeprom, 4802 value = rt2x00_get_field16(eeprom,
4613 EEPROM_NIC_CONF1_ANT_DIVERSITY); 4803 EEPROM_NIC_CONF1_ANT_DIVERSITY);
@@ -4681,7 +4871,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4681 4871
4682 return 0; 4872 return 0;
4683} 4873}
4684EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
4685 4874
4686/* 4875/*
4687 * RF value list for rt28xx 4876 * RF value list for rt28xx
@@ -4824,7 +5013,7 @@ static const struct rf_channel rf_vals_3x[] = {
4824 {173, 0x61, 0, 9}, 5013 {173, 0x61, 0, 9},
4825}; 5014};
4826 5015
4827int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 5016static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4828{ 5017{
4829 struct hw_mode_spec *spec = &rt2x00dev->spec; 5018 struct hw_mode_spec *spec = &rt2x00dev->spec;
4830 struct channel_info *info; 5019 struct channel_info *info;
@@ -4901,6 +5090,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4901 rt2x00_rf(rt2x00dev, RF3022) || 5090 rt2x00_rf(rt2x00dev, RF3022) ||
4902 rt2x00_rf(rt2x00dev, RF3290) || 5091 rt2x00_rf(rt2x00dev, RF3290) ||
4903 rt2x00_rf(rt2x00dev, RF3320) || 5092 rt2x00_rf(rt2x00dev, RF3320) ||
5093 rt2x00_rf(rt2x00dev, RF3322) ||
4904 rt2x00_rf(rt2x00dev, RF5360) || 5094 rt2x00_rf(rt2x00dev, RF5360) ||
4905 rt2x00_rf(rt2x00dev, RF5370) || 5095 rt2x00_rf(rt2x00dev, RF5370) ||
4906 rt2x00_rf(rt2x00dev, RF5372) || 5096 rt2x00_rf(rt2x00dev, RF5372) ||
@@ -5000,7 +5190,72 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5000 5190
5001 return 0; 5191 return 0;
5002} 5192}
5003EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode); 5193
5194int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
5195{
5196 int retval;
5197 u32 reg;
5198
5199 /*
5200 * Allocate eeprom data.
5201 */
5202 retval = rt2800_validate_eeprom(rt2x00dev);
5203 if (retval)
5204 return retval;
5205
5206 retval = rt2800_init_eeprom(rt2x00dev);
5207 if (retval)
5208 return retval;
5209
5210 /*
5211 * Enable rfkill polling by setting GPIO direction of the
5212 * rfkill switch GPIO pin correctly.
5213 */
5214 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
5215 rt2x00_set_field32(&reg, GPIO_CTRL_DIR2, 1);
5216 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
5217
5218 /*
5219 * Initialize hw specifications.
5220 */
5221 retval = rt2800_probe_hw_mode(rt2x00dev);
5222 if (retval)
5223 return retval;
5224
5225 /*
5226 * Set device capabilities.
5227 */
5228 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
5229 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
5230 if (!rt2x00_is_usb(rt2x00dev))
5231 __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
5232
5233 /*
5234 * Set device requirements.
5235 */
5236 if (!rt2x00_is_soc(rt2x00dev))
5237 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
5238 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
5239 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
5240 if (!rt2800_hwcrypt_disabled(rt2x00dev))
5241 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
5242 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
5243 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
5244 if (rt2x00_is_usb(rt2x00dev))
5245 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
5246 else {
5247 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
5248 __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
5249 }
5250
5251 /*
5252 * Set the rssi offset.
5253 */
5254 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
5255
5256 return 0;
5257}
5258EXPORT_SYMBOL_GPL(rt2800_probe_hw);
5004 5259
5005/* 5260/*
5006 * IEEE80211 stack callback functions. 5261 * IEEE80211 stack callback functions.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 18a0b67b4c68..a128ceadcb3e 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -43,6 +43,9 @@ struct rt2800_ops {
43 const unsigned int offset, 43 const unsigned int offset,
44 const struct rt2x00_field32 field, u32 *reg); 44 const struct rt2x00_field32 field, u32 *reg);
45 45
46 void (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
47 bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev);
48
46 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev, 49 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
47 const u8 *data, const size_t len); 50 const u8 *data, const size_t len);
48 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev); 51 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
@@ -114,6 +117,20 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
114 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg); 117 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
115} 118}
116 119
120static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
121{
122 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
123
124 rt2800ops->read_eeprom(rt2x00dev);
125}
126
127static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
128{
129 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
130
131 return rt2800ops->hwcrypt_disabled(rt2x00dev);
132}
133
117static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev, 134static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev,
118 const u8 *data, const size_t len) 135 const u8 *data, const size_t len)
119{ 136{
@@ -191,9 +208,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
191 208
192int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 209int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
193void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 210void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
194int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev); 211
195int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev); 212int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
196int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
197 213
198void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32, 214void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
199 u16 *iv16); 215 u16 *iv16);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 4765bbd654cd..27829e1e2e38 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -54,6 +54,11 @@ static bool modparam_nohwcrypt = false;
54module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 54module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
55MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 55MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
56 56
57static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
58{
59 return modparam_nohwcrypt;
60}
61
57static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) 62static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
58{ 63{
59 unsigned int i; 64 unsigned int i;
@@ -965,85 +970,14 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
965/* 970/*
966 * Device probe functions. 971 * Device probe functions.
967 */ 972 */
968static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) 973static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
969{ 974{
970 /*
971 * Read EEPROM into buffer
972 */
973 if (rt2x00_is_soc(rt2x00dev)) 975 if (rt2x00_is_soc(rt2x00dev))
974 rt2800pci_read_eeprom_soc(rt2x00dev); 976 rt2800pci_read_eeprom_soc(rt2x00dev);
975 else if (rt2800pci_efuse_detect(rt2x00dev)) 977 else if (rt2800pci_efuse_detect(rt2x00dev))
976 rt2800pci_read_eeprom_efuse(rt2x00dev); 978 rt2800pci_read_eeprom_efuse(rt2x00dev);
977 else 979 else
978 rt2800pci_read_eeprom_pci(rt2x00dev); 980 rt2800pci_read_eeprom_pci(rt2x00dev);
979
980 return rt2800_validate_eeprom(rt2x00dev);
981}
982
983static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
984{
985 int retval;
986 u32 reg;
987
988 /*
989 * Allocate eeprom data.
990 */
991 retval = rt2800pci_validate_eeprom(rt2x00dev);
992 if (retval)
993 return retval;
994
995 retval = rt2800_init_eeprom(rt2x00dev);
996 if (retval)
997 return retval;
998
999 /*
1000 * Enable rfkill polling by setting GPIO direction of the
1001 * rfkill switch GPIO pin correctly.
1002 */
1003 rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
1004 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
1005 rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
1006
1007 /*
1008 * Initialize hw specifications.
1009 */
1010 retval = rt2800_probe_hw_mode(rt2x00dev);
1011 if (retval)
1012 return retval;
1013
1014 /*
1015 * This device has multiple filters for control frames
1016 * and has a separate filter for PS Poll frames.
1017 */
1018 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
1019 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
1020
1021 /*
1022 * This device has a pre tbtt interrupt and thus fetches
1023 * a new beacon directly prior to transmission.
1024 */
1025 __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
1026
1027 /*
1028 * This device requires firmware.
1029 */
1030 if (!rt2x00_is_soc(rt2x00dev))
1031 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
1032 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
1033 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
1034 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
1035 __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
1036 if (!modparam_nohwcrypt)
1037 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
1038 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
1039 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
1040
1041 /*
1042 * Set the rssi offset.
1043 */
1044 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
1045
1046 return 0;
1047} 981}
1048 982
1049static const struct ieee80211_ops rt2800pci_mac80211_ops = { 983static const struct ieee80211_ops rt2800pci_mac80211_ops = {
@@ -1081,6 +1015,8 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
1081 .register_multiread = rt2x00pci_register_multiread, 1015 .register_multiread = rt2x00pci_register_multiread,
1082 .register_multiwrite = rt2x00pci_register_multiwrite, 1016 .register_multiwrite = rt2x00pci_register_multiwrite,
1083 .regbusy_read = rt2x00pci_regbusy_read, 1017 .regbusy_read = rt2x00pci_regbusy_read,
1018 .read_eeprom = rt2800pci_read_eeprom,
1019 .hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
1084 .drv_write_firmware = rt2800pci_write_firmware, 1020 .drv_write_firmware = rt2800pci_write_firmware,
1085 .drv_init_registers = rt2800pci_init_registers, 1021 .drv_init_registers = rt2800pci_init_registers,
1086 .drv_get_txwi = rt2800pci_get_txwi, 1022 .drv_get_txwi = rt2800pci_get_txwi,
@@ -1093,7 +1029,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1093 .tbtt_tasklet = rt2800pci_tbtt_tasklet, 1029 .tbtt_tasklet = rt2800pci_tbtt_tasklet,
1094 .rxdone_tasklet = rt2800pci_rxdone_tasklet, 1030 .rxdone_tasklet = rt2800pci_rxdone_tasklet,
1095 .autowake_tasklet = rt2800pci_autowake_tasklet, 1031 .autowake_tasklet = rt2800pci_autowake_tasklet,
1096 .probe_hw = rt2800pci_probe_hw, 1032 .probe_hw = rt2800_probe_hw,
1097 .get_firmware_name = rt2800pci_get_firmware_name, 1033 .get_firmware_name = rt2800pci_get_firmware_name,
1098 .check_firmware = rt2800_check_firmware, 1034 .check_firmware = rt2800_check_firmware,
1099 .load_firmware = rt2800_load_firmware, 1035 .load_firmware = rt2800_load_firmware,
@@ -1152,7 +1088,6 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
1152static const struct rt2x00_ops rt2800pci_ops = { 1088static const struct rt2x00_ops rt2800pci_ops = {
1153 .name = KBUILD_MODNAME, 1089 .name = KBUILD_MODNAME,
1154 .drv_data_size = sizeof(struct rt2800_drv_data), 1090 .drv_data_size = sizeof(struct rt2800_drv_data),
1155 .max_sta_intf = 1,
1156 .max_ap_intf = 8, 1091 .max_ap_intf = 8,
1157 .eeprom_size = EEPROM_SIZE, 1092 .eeprom_size = EEPROM_SIZE,
1158 .rf_size = RF_SIZE, 1093 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6b4226b71618..c9e9370eb789 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -49,6 +49,11 @@ static bool modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51 51
52static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
53{
54 return modparam_nohwcrypt;
55}
56
52/* 57/*
53 * Queue handlers. 58 * Queue handlers.
54 */ 59 */
@@ -730,73 +735,27 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
730/* 735/*
731 * Device probe functions. 736 * Device probe functions.
732 */ 737 */
733static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) 738static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
734{ 739{
735 if (rt2800_efuse_detect(rt2x00dev)) 740 if (rt2800_efuse_detect(rt2x00dev))
736 rt2800_read_eeprom_efuse(rt2x00dev); 741 rt2800_read_eeprom_efuse(rt2x00dev);
737 else 742 else
738 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, 743 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
739 EEPROM_SIZE); 744 EEPROM_SIZE);
740
741 return rt2800_validate_eeprom(rt2x00dev);
742} 745}
743 746
744static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) 747static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
745{ 748{
746 int retval; 749 int retval;
747 u32 reg;
748 750
749 /* 751 retval = rt2800_probe_hw(rt2x00dev);
750 * Allocate eeprom data.
751 */
752 retval = rt2800usb_validate_eeprom(rt2x00dev);
753 if (retval) 752 if (retval)
754 return retval; 753 return retval;
755 754
756 retval = rt2800_init_eeprom(rt2x00dev);
757 if (retval)
758 return retval;
759
760 /*
761 * Enable rfkill polling by setting GPIO direction of the
762 * rfkill switch GPIO pin correctly.
763 */
764 rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
765 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
766 rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
767
768 /*
769 * Initialize hw specifications.
770 */
771 retval = rt2800_probe_hw_mode(rt2x00dev);
772 if (retval)
773 return retval;
774
775 /*
776 * This device has multiple filters for control frames
777 * and has a separate filter for PS Poll frames.
778 */
779 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
780 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
781
782 /*
783 * This device requires firmware.
784 */
785 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
786 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
787 if (!modparam_nohwcrypt)
788 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
789 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
790 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
791 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
792 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
793
794 rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout,
795
796 /* 755 /*
797 * Set the rssi offset. 756 * Set txstatus timer function.
798 */ 757 */
799 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; 758 rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
800 759
801 /* 760 /*
802 * Overwrite TX done handler 761 * Overwrite TX done handler
@@ -842,6 +801,8 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
842 .register_multiread = rt2x00usb_register_multiread, 801 .register_multiread = rt2x00usb_register_multiread,
843 .register_multiwrite = rt2x00usb_register_multiwrite, 802 .register_multiwrite = rt2x00usb_register_multiwrite,
844 .regbusy_read = rt2x00usb_regbusy_read, 803 .regbusy_read = rt2x00usb_regbusy_read,
804 .read_eeprom = rt2800usb_read_eeprom,
805 .hwcrypt_disabled = rt2800usb_hwcrypt_disabled,
845 .drv_write_firmware = rt2800usb_write_firmware, 806 .drv_write_firmware = rt2800usb_write_firmware,
846 .drv_init_registers = rt2800usb_init_registers, 807 .drv_init_registers = rt2800usb_init_registers,
847 .drv_get_txwi = rt2800usb_get_txwi, 808 .drv_get_txwi = rt2800usb_get_txwi,
@@ -909,7 +870,6 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
909static const struct rt2x00_ops rt2800usb_ops = { 870static const struct rt2x00_ops rt2800usb_ops = {
910 .name = KBUILD_MODNAME, 871 .name = KBUILD_MODNAME,
911 .drv_data_size = sizeof(struct rt2800_drv_data), 872 .drv_data_size = sizeof(struct rt2800_drv_data),
912 .max_sta_intf = 1,
913 .max_ap_intf = 8, 873 .max_ap_intf = 8,
914 .eeprom_size = EEPROM_SIZE, 874 .eeprom_size = EEPROM_SIZE,
915 .rf_size = RF_SIZE, 875 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8afb546c2b2d..0751b35ef6dc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -188,6 +188,7 @@ struct rt2x00_chip {
188#define RT3071 0x3071 188#define RT3071 0x3071
189#define RT3090 0x3090 /* 2.4GHz PCIe */ 189#define RT3090 0x3090 /* 2.4GHz PCIe */
190#define RT3290 0x3290 190#define RT3290 0x3290
191#define RT3352 0x3352 /* WSOC */
191#define RT3390 0x3390 192#define RT3390 0x3390
192#define RT3572 0x3572 193#define RT3572 0x3572
193#define RT3593 0x3593 194#define RT3593 0x3593
@@ -655,7 +656,6 @@ struct rt2x00lib_ops {
655struct rt2x00_ops { 656struct rt2x00_ops {
656 const char *name; 657 const char *name;
657 const unsigned int drv_data_size; 658 const unsigned int drv_data_size;
658 const unsigned int max_sta_intf;
659 const unsigned int max_ap_intf; 659 const unsigned int max_ap_intf;
660 const unsigned int eeprom_size; 660 const unsigned int eeprom_size;
661 const unsigned int rf_size; 661 const unsigned int rf_size;
@@ -741,6 +741,14 @@ enum rt2x00_capability_flags {
741}; 741};
742 742
743/* 743/*
744 * Interface combinations
745 */
746enum {
747 IF_COMB_AP = 0,
748 NUM_IF_COMB,
749};
750
751/*
744 * rt2x00 device structure. 752 * rt2x00 device structure.
745 */ 753 */
746struct rt2x00_dev { 754struct rt2x00_dev {
@@ -867,6 +875,12 @@ struct rt2x00_dev {
867 unsigned int intf_beaconing; 875 unsigned int intf_beaconing;
868 876
869 /* 877 /*
878 * Interface combinations
879 */
880 struct ieee80211_iface_limit if_limits_ap;
881 struct ieee80211_iface_combination if_combinations[NUM_IF_COMB];
882
883 /*
870 * Link quality 884 * Link quality
871 */ 885 */
872 struct link link; 886 struct link link;
@@ -1287,7 +1301,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
1287/* 1301/*
1288 * mac80211 handlers. 1302 * mac80211 handlers.
1289 */ 1303 */
1290void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 1304void rt2x00mac_tx(struct ieee80211_hw *hw,
1305 struct ieee80211_tx_control *control,
1306 struct sk_buff *skb);
1291int rt2x00mac_start(struct ieee80211_hw *hw); 1307int rt2x00mac_start(struct ieee80211_hw *hw);
1292void rt2x00mac_stop(struct ieee80211_hw *hw); 1308void rt2x00mac_stop(struct ieee80211_hw *hw);
1293int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1309int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 3f07e36f462b..69097d1faeb6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
194 */ 194 */
195 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 195 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
196 while (skb) { 196 while (skb) {
197 rt2x00mac_tx(rt2x00dev->hw, skb); 197 rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
198 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 198 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
199 } 199 }
200} 200}
@@ -1118,6 +1118,34 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1118 rt2x00dev->intf_associated = 0; 1118 rt2x00dev->intf_associated = 0;
1119} 1119}
1120 1120
1121static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1122{
1123 struct ieee80211_iface_limit *if_limit;
1124 struct ieee80211_iface_combination *if_combination;
1125
1126 /*
1127 * Build up AP interface limits structure.
1128 */
1129 if_limit = &rt2x00dev->if_limits_ap;
1130 if_limit->max = rt2x00dev->ops->max_ap_intf;
1131 if_limit->types = BIT(NL80211_IFTYPE_AP);
1132
1133 /*
1134 * Build up AP interface combinations structure.
1135 */
1136 if_combination = &rt2x00dev->if_combinations[IF_COMB_AP];
1137 if_combination->limits = if_limit;
1138 if_combination->n_limits = 1;
1139 if_combination->max_interfaces = if_limit->max;
1140 if_combination->num_different_channels = 1;
1141
1142 /*
1143 * Finally, specify the possible combinations to mac80211.
1144 */
1145 rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations;
1146 rt2x00dev->hw->wiphy->n_iface_combinations = 1;
1147}
1148
1121/* 1149/*
1122 * driver allocation handlers. 1150 * driver allocation handlers.
1123 */ 1151 */
@@ -1126,6 +1154,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1126 int retval = -ENOMEM; 1154 int retval = -ENOMEM;
1127 1155
1128 /* 1156 /*
1157 * Set possible interface combinations.
1158 */
1159 rt2x00lib_set_if_combinations(rt2x00dev);
1160
1161 /*
1129 * Allocate the driver data memory, if necessary. 1162 * Allocate the driver data memory, if necessary.
1130 */ 1163 */
1131 if (rt2x00dev->ops->drv_data_size > 0) { 1164 if (rt2x00dev->ops->drv_data_size > 0) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 4ff26c2159bf..98a9e48f8e4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
99 return retval; 99 return retval;
100} 100}
101 101
102void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 102void rt2x00mac_tx(struct ieee80211_hw *hw,
103 struct ieee80211_tx_control *control,
104 struct sk_buff *skb)
103{ 105{
104 struct rt2x00_dev *rt2x00dev = hw->priv; 106 struct rt2x00_dev *rt2x00dev = hw->priv;
105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -212,46 +214,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
212 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) 214 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
213 return -ENODEV; 215 return -ENODEV;
214 216
215 switch (vif->type) {
216 case NL80211_IFTYPE_AP:
217 /*
218 * We don't support mixed combinations of
219 * sta and ap interfaces.
220 */
221 if (rt2x00dev->intf_sta_count)
222 return -ENOBUFS;
223
224 /*
225 * Check if we exceeded the maximum amount
226 * of supported interfaces.
227 */
228 if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
229 return -ENOBUFS;
230
231 break;
232 case NL80211_IFTYPE_STATION:
233 case NL80211_IFTYPE_ADHOC:
234 case NL80211_IFTYPE_MESH_POINT:
235 case NL80211_IFTYPE_WDS:
236 /*
237 * We don't support mixed combinations of
238 * sta and ap interfaces.
239 */
240 if (rt2x00dev->intf_ap_count)
241 return -ENOBUFS;
242
243 /*
244 * Check if we exceeded the maximum amount
245 * of supported interfaces.
246 */
247 if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
248 return -ENOBUFS;
249
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 /* 217 /*
256 * Loop through all beacon queues to find a free 218 * Loop through all beacon queues to find a free
257 * entry. Since there are as much beacon entries 219 * entry. Since there are as much beacon entries
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index f7e74a0a7759..e488b944a034 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
315static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 315static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
316 struct sk_buff *skb, 316 struct sk_buff *skb,
317 struct txentry_desc *txdesc, 317 struct txentry_desc *txdesc,
318 struct ieee80211_sta *sta,
318 const struct rt2x00_rate *hwrate) 319 const struct rt2x00_rate *hwrate)
319{ 320{
320 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 321 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
322 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 323 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
323 struct rt2x00_sta *sta_priv = NULL; 324 struct rt2x00_sta *sta_priv = NULL;
324 325
325 if (tx_info->control.sta) { 326 if (sta) {
326 txdesc->u.ht.mpdu_density = 327 txdesc->u.ht.mpdu_density =
327 tx_info->control.sta->ht_cap.ampdu_density; 328 sta->ht_cap.ampdu_density;
328 329
329 sta_priv = sta_to_rt2x00_sta(tx_info->control.sta); 330 sta_priv = sta_to_rt2x00_sta(sta);
330 txdesc->u.ht.wcid = sta_priv->wcid; 331 txdesc->u.ht.wcid = sta_priv->wcid;
331 } 332 }
332 333
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
341 * MIMO PS should be set to 1 for STA's using dynamic SM PS 342 * MIMO PS should be set to 1 for STA's using dynamic SM PS
342 * when using more then one tx stream (>MCS7). 343 * when using more then one tx stream (>MCS7).
343 */ 344 */
344 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && 345 if (sta && txdesc->u.ht.mcs > 7 &&
345 ((tx_info->control.sta->ht_cap.cap & 346 ((sta->ht_cap.cap &
346 IEEE80211_HT_CAP_SM_PS) >> 347 IEEE80211_HT_CAP_SM_PS) >>
347 IEEE80211_HT_CAP_SM_PS_SHIFT) == 348 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
348 WLAN_HT_CAP_SM_PS_DYNAMIC) 349 WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
409 410
410static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 411static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
411 struct sk_buff *skb, 412 struct sk_buff *skb,
412 struct txentry_desc *txdesc) 413 struct txentry_desc *txdesc,
414 struct ieee80211_sta *sta)
413{ 415{
414 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
415 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
503 505
504 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 506 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
505 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 507 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
506 hwrate); 508 sta, hwrate);
507 else 509 else
508 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 510 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
509 hwrate); 511 hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
595 * after that we are free to use the skb->cb array 597 * after that we are free to use the skb->cb array
596 * for our information. 598 * for our information.
597 */ 599 */
598 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc); 600 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
599 601
600 /* 602 /*
601 * All information is retrieved from the skb->cb array, 603 * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
740 * after that we are free to use the skb->cb array 742 * after that we are free to use the skb->cb array
741 * for our information. 743 * for our information.
742 */ 744 */
743 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc); 745 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
744 746
745 /* 747 /*
746 * Fill in skb descriptor 748 * Fill in skb descriptor
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b8ec96163922..d6582a2fa353 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -243,7 +243,7 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
243 u32 reg; 243 u32 reg;
244 244
245 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 245 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
246 return rt2x00_get_field32(reg, MAC_CSR13_BIT5); 246 return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
247} 247}
248 248
249#ifdef CONFIG_RT2X00_LIB_LEDS 249#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -715,11 +715,11 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
715 715
716 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 716 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
717 717
718 rt2x00_set_field32(&reg, MAC_CSR13_BIT4, p1); 718 rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0);
719 rt2x00_set_field32(&reg, MAC_CSR13_BIT12, 0); 719 rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1);
720 720
721 rt2x00_set_field32(&reg, MAC_CSR13_BIT3, !p2); 721 rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0);
722 rt2x00_set_field32(&reg, MAC_CSR13_BIT11, 0); 722 rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2);
723 723
724 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); 724 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
725} 725}
@@ -2855,7 +2855,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2855 * rfkill switch GPIO pin correctly. 2855 * rfkill switch GPIO pin correctly.
2856 */ 2856 */
2857 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 2857 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
2858 rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1); 2858 rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1);
2859 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); 2859 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
2860 2860
2861 /* 2861 /*
@@ -3045,7 +3045,6 @@ static const struct data_queue_desc rt61pci_queue_bcn = {
3045 3045
3046static const struct rt2x00_ops rt61pci_ops = { 3046static const struct rt2x00_ops rt61pci_ops = {
3047 .name = KBUILD_MODNAME, 3047 .name = KBUILD_MODNAME,
3048 .max_sta_intf = 1,
3049 .max_ap_intf = 4, 3048 .max_ap_intf = 4,
3050 .eeprom_size = EEPROM_SIZE, 3049 .eeprom_size = EEPROM_SIZE,
3051 .rf_size = RF_SIZE, 3050 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8f3da5a56766..9bc6b6044e34 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -357,22 +357,22 @@ struct hw_pairwise_ta_entry {
357 357
358/* 358/*
359 * MAC_CSR13: GPIO. 359 * MAC_CSR13: GPIO.
360 * MAC_CSR13_VALx: GPIO value
361 * MAC_CSR13_DIRx: GPIO direction: 0 = output; 1 = input
360 */ 362 */
361#define MAC_CSR13 0x3034 363#define MAC_CSR13 0x3034
362#define MAC_CSR13_BIT0 FIELD32(0x00000001) 364#define MAC_CSR13_VAL0 FIELD32(0x00000001)
363#define MAC_CSR13_BIT1 FIELD32(0x00000002) 365#define MAC_CSR13_VAL1 FIELD32(0x00000002)
364#define MAC_CSR13_BIT2 FIELD32(0x00000004) 366#define MAC_CSR13_VAL2 FIELD32(0x00000004)
365#define MAC_CSR13_BIT3 FIELD32(0x00000008) 367#define MAC_CSR13_VAL3 FIELD32(0x00000008)
366#define MAC_CSR13_BIT4 FIELD32(0x00000010) 368#define MAC_CSR13_VAL4 FIELD32(0x00000010)
367#define MAC_CSR13_BIT5 FIELD32(0x00000020) 369#define MAC_CSR13_VAL5 FIELD32(0x00000020)
368#define MAC_CSR13_BIT6 FIELD32(0x00000040) 370#define MAC_CSR13_DIR0 FIELD32(0x00000100)
369#define MAC_CSR13_BIT7 FIELD32(0x00000080) 371#define MAC_CSR13_DIR1 FIELD32(0x00000200)
370#define MAC_CSR13_BIT8 FIELD32(0x00000100) 372#define MAC_CSR13_DIR2 FIELD32(0x00000400)
371#define MAC_CSR13_BIT9 FIELD32(0x00000200) 373#define MAC_CSR13_DIR3 FIELD32(0x00000800)
372#define MAC_CSR13_BIT10 FIELD32(0x00000400) 374#define MAC_CSR13_DIR4 FIELD32(0x00001000)
373#define MAC_CSR13_BIT11 FIELD32(0x00000800) 375#define MAC_CSR13_DIR5 FIELD32(0x00002000)
374#define MAC_CSR13_BIT12 FIELD32(0x00001000)
375#define MAC_CSR13_BIT13 FIELD32(0x00002000)
376 376
377/* 377/*
378 * MAC_CSR14: LED control register. 378 * MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 248436c13ce0..e5eb43b3eee7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -189,7 +189,7 @@ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
189 u32 reg; 189 u32 reg;
190 190
191 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg); 191 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
192 return rt2x00_get_field32(reg, MAC_CSR13_BIT7); 192 return rt2x00_get_field32(reg, MAC_CSR13_VAL7);
193} 193}
194 194
195#ifdef CONFIG_RT2X00_LIB_LEDS 195#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2195,7 +2195,7 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2195 * rfkill switch GPIO pin correctly. 2195 * rfkill switch GPIO pin correctly.
2196 */ 2196 */
2197 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg); 2197 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
2198 rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0); 2198 rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0);
2199 rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); 2199 rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
2200 2200
2201 /* 2201 /*
@@ -2382,7 +2382,6 @@ static const struct data_queue_desc rt73usb_queue_bcn = {
2382 2382
2383static const struct rt2x00_ops rt73usb_ops = { 2383static const struct rt2x00_ops rt73usb_ops = {
2384 .name = KBUILD_MODNAME, 2384 .name = KBUILD_MODNAME,
2385 .max_sta_intf = 1,
2386 .max_ap_intf = 4, 2385 .max_ap_intf = 4,
2387 .eeprom_size = EEPROM_SIZE, 2386 .eeprom_size = EEPROM_SIZE,
2388 .rf_size = RF_SIZE, 2387 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index df1cc116b83b..7577e0ba3877 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -267,24 +267,26 @@ struct hw_pairwise_ta_entry {
267 267
268/* 268/*
269 * MAC_CSR13: GPIO. 269 * MAC_CSR13: GPIO.
270 * MAC_CSR13_VALx: GPIO value
271 * MAC_CSR13_DIRx: GPIO direction: 0 = input; 1 = output
270 */ 272 */
271#define MAC_CSR13 0x3034 273#define MAC_CSR13 0x3034
272#define MAC_CSR13_BIT0 FIELD32(0x00000001) 274#define MAC_CSR13_VAL0 FIELD32(0x00000001)
273#define MAC_CSR13_BIT1 FIELD32(0x00000002) 275#define MAC_CSR13_VAL1 FIELD32(0x00000002)
274#define MAC_CSR13_BIT2 FIELD32(0x00000004) 276#define MAC_CSR13_VAL2 FIELD32(0x00000004)
275#define MAC_CSR13_BIT3 FIELD32(0x00000008) 277#define MAC_CSR13_VAL3 FIELD32(0x00000008)
276#define MAC_CSR13_BIT4 FIELD32(0x00000010) 278#define MAC_CSR13_VAL4 FIELD32(0x00000010)
277#define MAC_CSR13_BIT5 FIELD32(0x00000020) 279#define MAC_CSR13_VAL5 FIELD32(0x00000020)
278#define MAC_CSR13_BIT6 FIELD32(0x00000040) 280#define MAC_CSR13_VAL6 FIELD32(0x00000040)
279#define MAC_CSR13_BIT7 FIELD32(0x00000080) 281#define MAC_CSR13_VAL7 FIELD32(0x00000080)
280#define MAC_CSR13_BIT8 FIELD32(0x00000100) 282#define MAC_CSR13_DIR0 FIELD32(0x00000100)
281#define MAC_CSR13_BIT9 FIELD32(0x00000200) 283#define MAC_CSR13_DIR1 FIELD32(0x00000200)
282#define MAC_CSR13_BIT10 FIELD32(0x00000400) 284#define MAC_CSR13_DIR2 FIELD32(0x00000400)
283#define MAC_CSR13_BIT11 FIELD32(0x00000800) 285#define MAC_CSR13_DIR3 FIELD32(0x00000800)
284#define MAC_CSR13_BIT12 FIELD32(0x00001000) 286#define MAC_CSR13_DIR4 FIELD32(0x00001000)
285#define MAC_CSR13_BIT13 FIELD32(0x00002000) 287#define MAC_CSR13_DIR5 FIELD32(0x00002000)
286#define MAC_CSR13_BIT14 FIELD32(0x00004000) 288#define MAC_CSR13_DIR6 FIELD32(0x00004000)
287#define MAC_CSR13_BIT15 FIELD32(0x00008000) 289#define MAC_CSR13_DIR7 FIELD32(0x00008000)
288 290
289/* 291/*
290 * MAC_CSR14: LED control register. 292 * MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index aceaf689f737..021d83e1b1d3 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
244 return IRQ_HANDLED; 244 return IRQ_HANDLED;
245} 245}
246 246
247static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 247static void rtl8180_tx(struct ieee80211_hw *dev,
248 struct ieee80211_tx_control *control,
249 struct sk_buff *skb)
248{ 250{
249 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
250 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
710 /* TODO: use actual beacon queue */ 712 /* TODO: use actual beacon queue */
711 skb_set_queue_mapping(skb, 0); 713 skb_set_queue_mapping(skb, 0);
712 714
713 rtl8180_tx(dev, skb); 715 rtl8180_tx(dev, NULL, skb);
714 716
715resched: 717resched:
716 /* 718 /*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 533024095c43..7811b6315973 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
228 } 228 }
229} 229}
230 230
231static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 231static void rtl8187_tx(struct ieee80211_hw *dev,
232 struct ieee80211_tx_control *control,
233 struct sk_buff *skb)
232{ 234{
233 struct rtl8187_priv *priv = dev->priv; 235 struct rtl8187_priv *priv = dev->priv;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 236 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
1076 /* TODO: use actual beacon queue */ 1078 /* TODO: use actual beacon queue */
1077 skb_set_queue_mapping(skb, 0); 1079 skb_set_queue_mapping(skb, 0);
1078 1080
1079 rtl8187_tx(dev, skb); 1081 rtl8187_tx(dev, NULL, skb);
1080 1082
1081resched: 1083resched:
1082 /* 1084 /*
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index cefac6a43601..6b28e92d1d21 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,6 +1,6 @@
1config RTL8192CE 1config RTL8192CE
2 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" 2 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
3 depends on MAC80211 && PCI && EXPERIMENTAL 3 depends on MAC80211 && PCI
4 select FW_LOADER 4 select FW_LOADER
5 select RTLWIFI 5 select RTLWIFI
6 select RTL8192C_COMMON 6 select RTL8192C_COMMON
@@ -12,7 +12,7 @@ config RTL8192CE
12 12
13config RTL8192SE 13config RTL8192SE
14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" 14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
15 depends on MAC80211 && EXPERIMENTAL && PCI 15 depends on MAC80211 && PCI
16 select FW_LOADER 16 select FW_LOADER
17 select RTLWIFI 17 select RTLWIFI
18 ---help--- 18 ---help---
@@ -23,7 +23,7 @@ config RTL8192SE
23 23
24config RTL8192DE 24config RTL8192DE
25 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" 25 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
26 depends on MAC80211 && EXPERIMENTAL && PCI 26 depends on MAC80211 && PCI
27 select FW_LOADER 27 select FW_LOADER
28 select RTLWIFI 28 select RTLWIFI
29 ---help--- 29 ---help---
@@ -34,7 +34,7 @@ config RTL8192DE
34 34
35config RTL8192CU 35config RTL8192CU
36 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 36 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
37 depends on MAC80211 && USB && EXPERIMENTAL 37 depends on MAC80211 && USB
38 select FW_LOADER 38 select FW_LOADER
39 select RTLWIFI 39 select RTLWIFI
40 select RTL8192C_COMMON 40 select RTL8192C_COMMON
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 942e56b77b60..59381fe8ed06 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
1341 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); 1341 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
1342 1342
1343 info->control.rates[0].idx = 0; 1343 info->control.rates[0].idx = 0;
1344 info->control.sta = sta;
1345 info->band = hw->conf.channel->band; 1344 info->band = hw->conf.channel->band;
1346 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 1345 rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
1347 } 1346 }
1348err_free: 1347err_free:
1349 return 0; 1348 return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a18ad2a98938..a7c0e52869ba 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
124 mutex_unlock(&rtlpriv->locks.conf_mutex); 124 mutex_unlock(&rtlpriv->locks.conf_mutex);
125} 125}
126 126
127static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 127static void rtl_op_tx(struct ieee80211_hw *hw,
128 struct ieee80211_tx_control *control,
129 struct sk_buff *skb)
128{ 130{
129 struct rtl_priv *rtlpriv = rtl_priv(hw); 131 struct rtl_priv *rtlpriv = rtl_priv(hw);
130 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 132 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
138 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) 140 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
139 goto err_free; 141 goto err_free;
140 142
141 if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) 143 if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
142 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 144 rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
143 145
144 return; 146 return;
145 147
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5983631a1b1a..abc306b502ac 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -502,7 +502,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
502 _rtl_update_earlymode_info(hw, skb, 502 _rtl_update_earlymode_info(hw, skb,
503 &tcb_desc, tid); 503 &tcb_desc, tid);
504 504
505 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 505 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
506 } 506 }
507 } 507 }
508} 508}
@@ -927,7 +927,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
927 info = IEEE80211_SKB_CB(pskb); 927 info = IEEE80211_SKB_CB(pskb);
928 pdesc = &ring->desc[0]; 928 pdesc = &ring->desc[0];
929 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 929 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
930 info, pskb, BEACON_QUEUE, &tcb_desc); 930 info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
931 931
932 __skb_queue_tail(&ring->queue, pskb); 932 __skb_queue_tail(&ring->queue, pskb);
933 933
@@ -1303,11 +1303,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1303} 1303}
1304 1304
1305static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, 1305static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1306 struct ieee80211_sta *sta,
1306 struct sk_buff *skb) 1307 struct sk_buff *skb)
1307{ 1308{
1308 struct rtl_priv *rtlpriv = rtl_priv(hw); 1309 struct rtl_priv *rtlpriv = rtl_priv(hw);
1309 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1310 struct ieee80211_sta *sta = info->control.sta;
1311 struct rtl_sta_info *sta_entry = NULL; 1310 struct rtl_sta_info *sta_entry = NULL;
1312 u8 tid = rtl_get_tid(skb); 1311 u8 tid = rtl_get_tid(skb);
1313 1312
@@ -1335,13 +1334,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1335 return true; 1334 return true;
1336} 1335}
1337 1336
1338static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 1337static int rtl_pci_tx(struct ieee80211_hw *hw,
1339 struct rtl_tcb_desc *ptcb_desc) 1338 struct ieee80211_sta *sta,
1339 struct sk_buff *skb,
1340 struct rtl_tcb_desc *ptcb_desc)
1340{ 1341{
1341 struct rtl_priv *rtlpriv = rtl_priv(hw); 1342 struct rtl_priv *rtlpriv = rtl_priv(hw);
1342 struct rtl_sta_info *sta_entry = NULL; 1343 struct rtl_sta_info *sta_entry = NULL;
1343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1344 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1344 struct ieee80211_sta *sta = info->control.sta;
1345 struct rtl8192_tx_ring *ring; 1345 struct rtl8192_tx_ring *ring;
1346 struct rtl_tx_desc *pdesc; 1346 struct rtl_tx_desc *pdesc;
1347 u8 idx; 1347 u8 idx;
@@ -1416,7 +1416,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1416 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 1416 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1417 1417
1418 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, 1418 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1419 info, skb, hw_queue, ptcb_desc); 1419 info, sta, skb, hw_queue, ptcb_desc);
1420 1420
1421 __skb_queue_tail(&ring->queue, skb); 1421 __skb_queue_tail(&ring->queue, skb);
1422 1422
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index a45afda8259c..1ca4e25c143b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -167,7 +167,7 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
168 dm_digtable->cur_igvalue = 0x20; 168 dm_digtable->cur_igvalue = 0x20;
169 dm_digtable->pre_igvalue = 0x0; 169 dm_digtable->pre_igvalue = 0x0;
170 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 170 dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT; 171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -190,7 +190,7 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
190 long rssi_val_min = 0; 190 long rssi_val_min = 0;
191 191
192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) && 192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
193 (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) { 193 (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) 194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
195 rssi_val_min = 195 rssi_val_min =
196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > 196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -199,8 +199,8 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
200 else 200 else
201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
202 } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT || 202 } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
203 dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) { 203 dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
205 } else if (dm_digtable->curmultista_connectstate == 205 } else if (dm_digtable->curmultista_connectstate ==
206 DIG_MULTISTA_CONNECT) { 206 DIG_MULTISTA_CONNECT) {
@@ -334,7 +334,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
334 multi_sta = true; 334 multi_sta = true;
335 335
336 if (!multi_sta || 336 if (!multi_sta ||
337 dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) { 337 dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
338 initialized = false; 338 initialized = false;
339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
340 return; 340 return;
@@ -378,15 +378,15 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
379 379
380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
381 "presta_connectstate = %x, cursta_connectctate = %x\n", 381 "presta_connectstate = %x, cursta_connectstate = %x\n",
382 dm_digtable->presta_connectstate, 382 dm_digtable->presta_connectstate,
383 dm_digtable->cursta_connectctate); 383 dm_digtable->cursta_connectstate);
384 384
385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate 385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
386 || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT 386 || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
387 || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) { 387 || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
388 388
389 if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) { 389 if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
390 dm_digtable->rssi_val_min = 390 dm_digtable->rssi_val_min =
391 rtl92c_dm_initial_gain_min_pwdb(hw); 391 rtl92c_dm_initial_gain_min_pwdb(hw);
392 rtl92c_dm_ctrl_initgain_by_rssi(hw); 392 rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -407,7 +407,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
409 409
410 if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) { 410 if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); 411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
412 412
413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +484,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
484 return; 484 return;
485 485
486 if (mac->link_state >= MAC80211_LINKED) 486 if (mac->link_state >= MAC80211_LINKED)
487 dm_digtable->cursta_connectctate = DIG_STA_CONNECT; 487 dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
488 else 488 else
489 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 489 dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
490 490
491 rtl92c_dm_initial_gain_sta(hw); 491 rtl92c_dm_initial_gain_sta(hw);
492 rtl92c_dm_initial_gain_multi_sta(hw); 492 rtl92c_dm_initial_gain_multi_sta(hw);
493 rtl92c_dm_cck_packet_detection_thresh(hw); 493 rtl92c_dm_cck_packet_detection_thresh(hw);
494 494
495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate; 495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
496 496
497} 497}
498 498
@@ -1214,18 +1214,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1214 "PreState = %d, CurState = %d\n", 1214 "PreState = %d, CurState = %d\n",
1215 p_ra->pre_ratr_state, p_ra->ratr_state); 1215 p_ra->pre_ratr_state, p_ra->ratr_state);
1216 1216
1217 /* Only the PCI card uses sta in the update rate table 1217 rcu_read_lock();
1218 * callback routine */ 1218 sta = ieee80211_find_sta(mac->vif, mac->bssid);
1219 if (rtlhal->interface == INTF_PCI) {
1220 rcu_read_lock();
1221 sta = ieee80211_find_sta(mac->vif, mac->bssid);
1222 }
1223 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 1219 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
1224 p_ra->ratr_state); 1220 p_ra->ratr_state);
1225 1221
1226 p_ra->pre_ratr_state = p_ra->ratr_state; 1222 p_ra->pre_ratr_state = p_ra->ratr_state;
1227 if (rtlhal->interface == INTF_PCI) 1223 rcu_read_unlock();
1228 rcu_read_unlock();
1229 } 1224 }
1230 } 1225 }
1231} 1226}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 8a7b864faca3..883f23ae9519 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -577,8 +577,7 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
577 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 577 ring = &rtlpci->tx_ring[BEACON_QUEUE];
578 578
579 pskb = __skb_dequeue(&ring->queue); 579 pskb = __skb_dequeue(&ring->queue);
580 if (pskb) 580 kfree_skb(pskb);
581 kfree_skb(pskb);
582 581
583 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 582 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
584 583
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index dd4bb0950a57..86d73b32d995 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -1914,8 +1914,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1914 } 1914 }
1915 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1915 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1916 "ratr_bitmap :%x\n", ratr_bitmap); 1916 "ratr_bitmap :%x\n", ratr_bitmap);
1917 *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | 1917 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
1918 (ratr_index << 28)); 1918 (ratr_index << 28);
1919 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 1919 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1920 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1920 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1921 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", 1921 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 7d8f96405f42..ea2e1bd847c8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -344,7 +344,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
344 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 344 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
345}; 345};
346 346
347DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { 347static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
348 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)}, 348 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
349 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)}, 349 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
350 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)}, 350 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52166640f167..390d6d4fcaa0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
596 596
597void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 597void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
598 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 598 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
599 struct ieee80211_tx_info *info, struct sk_buff *skb, 599 struct ieee80211_tx_info *info,
600 struct ieee80211_sta *sta,
601 struct sk_buff *skb,
600 u8 hw_queue, struct rtl_tcb_desc *tcb_desc) 602 u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
601{ 603{
602 struct rtl_priv *rtlpriv = rtl_priv(hw); 604 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
604 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 606 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 607 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
606 bool defaultadapter = true; 608 bool defaultadapter = true;
607 struct ieee80211_sta *sta;
608 u8 *pdesc = pdesc_tx; 609 u8 *pdesc = pdesc_tx;
609 u16 seq_number; 610 u16 seq_number;
610 __le16 fc = hdr->frame_control; 611 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index c4adb9777365..a7cdd514cb2e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -713,6 +713,7 @@ struct rx_desc_92c {
713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
714 struct ieee80211_hdr *hdr, 714 struct ieee80211_hdr *hdr,
715 u8 *pdesc, struct ieee80211_tx_info *info, 715 u8 *pdesc, struct ieee80211_tx_info *info,
716 struct ieee80211_sta *sta,
716 struct sk_buff *skb, u8 hw_queue, 717 struct sk_buff *skb, u8 hw_queue,
717 struct rtl_tcb_desc *ptcb_desc); 718 struct rtl_tcb_desc *ptcb_desc);
718bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, 719bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 2e6eb356a93e..6e66f04c363f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,12 +491,14 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); 491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
492 for (index = 0; index < 16; index++) 492 for (index = 0; index < 16; index++)
493 checksum = checksum ^ (*(ptr + index)); 493 checksum = checksum ^ (*(ptr + index));
494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum)); 494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
495} 495}
496 496
497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
498 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 498 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
499 struct ieee80211_tx_info *info, struct sk_buff *skb, 499 struct ieee80211_tx_info *info,
500 struct ieee80211_sta *sta,
501 struct sk_buff *skb,
500 u8 queue_index, 502 u8 queue_index,
501 struct rtl_tcb_desc *tcb_desc) 503 struct rtl_tcb_desc *tcb_desc)
502{ 504{
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 506 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 507 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true; 508 bool defaultadapter = true;
507 struct ieee80211_sta *sta = info->control.sta = info->control.sta;
508 u8 *qc = ieee80211_get_qos_ctl(hdr); 509 u8 *qc = ieee80211_get_qos_ctl(hdr);
509 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
510 u16 seq_number; 511 u16 seq_number;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 332b06e78b00..725c53accc58 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
420 struct sk_buff_head *); 420 struct sk_buff_head *);
421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
422 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 422 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
423 struct ieee80211_tx_info *info, struct sk_buff *skb, 423 struct ieee80211_tx_info *info,
424 struct ieee80211_sta *sta,
425 struct sk_buff *skb,
424 u8 queue_index, 426 u8 queue_index,
425 struct rtl_tcb_desc *tcb_desc); 427 struct rtl_tcb_desc *tcb_desc);
426void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, 428void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index c0201ed69dd7..ed868c396c25 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -164,7 +164,7 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
165 de_digtable->cur_igvalue = 0x20; 165 de_digtable->cur_igvalue = 0x20;
166 de_digtable->pre_igvalue = 0x0; 166 de_digtable->pre_igvalue = 0x0;
167 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 167 de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT; 168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -310,7 +310,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
310 struct dig_t *de_digtable = &rtlpriv->dm_digtable; 310 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
311 unsigned long flag = 0; 311 unsigned long flag = 0;
312 312
313 if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) { 313 if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25) 315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
316 de_digtable->cur_cck_pd_state = 316 de_digtable->cur_cck_pd_state =
@@ -342,7 +342,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; 342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
343 } 343 }
344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", 344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
345 de_digtable->cursta_connectctate == DIG_STA_CONNECT ? 345 de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); 346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", 347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? 348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -428,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); 428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
429 /* Decide the current status and if modify initial gain or not */ 429 /* Decide the current status and if modify initial gain or not */
430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) 430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
431 de_digtable->cursta_connectctate = DIG_STA_CONNECT; 431 de_digtable->cursta_connectstate = DIG_STA_CONNECT;
432 else 432 else
433 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 433 de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
434 434
435 /* adjust initial gain according to false alarm counter */ 435 /* adjust initial gain according to false alarm counter */
436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) 436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index eb22dccc418b..23177076b97f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -570,8 +570,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
570 570
571 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 571 ring = &rtlpci->tx_ring[BEACON_QUEUE];
572 pskb = __skb_dequeue(&ring->queue); 572 pskb = __skb_dequeue(&ring->queue);
573 if (pskb) 573 kfree_skb(pskb);
574 kfree_skb(pskb);
575 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 574 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
576 pdesc = &ring->desc[idx]; 575 pdesc = &ring->desc[idx];
577 /* discard output from call below */ 576 /* discard output from call below */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 442031256bce..db0086062d05 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1314,7 +1314,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
1314 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; 1314 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
1315 1315
1316 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n"); 1316 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
1317 /*----Restore RFENV control type----*/ ; 1317 /*----Restore RFENV control type----*/
1318 switch (rfpath) { 1318 switch (rfpath) {
1319 case RF90_PATH_A: 1319 case RF90_PATH_A:
1320 case RF90_PATH_C: 1320 case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index f80690d82c11..4686f340b9d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
551 551
552void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 552void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
553 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 553 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
554 struct ieee80211_tx_info *info, struct sk_buff *skb, 554 struct ieee80211_tx_info *info,
555 struct ieee80211_sta *sta,
556 struct sk_buff *skb,
555 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 557 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
556{ 558{
557 struct rtl_priv *rtlpriv = rtl_priv(hw); 559 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
559 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 561 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 562 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 563 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
562 struct ieee80211_sta *sta = info->control.sta;
563 u8 *pdesc = pdesc_tx; 564 u8 *pdesc = pdesc_tx;
564 u16 seq_number; 565 u16 seq_number;
565 __le16 fc = hdr->frame_control; 566 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 057a52431b00..c1b5dfb79d53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -730,6 +730,7 @@ struct rx_desc_92d {
730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
731 struct ieee80211_hdr *hdr, 731 struct ieee80211_hdr *hdr,
732 u8 *pdesc, struct ieee80211_tx_info *info, 732 u8 *pdesc, struct ieee80211_tx_info *info,
733 struct ieee80211_sta *sta,
733 struct sk_buff *skb, u8 hw_queue, 734 struct sk_buff *skb, u8 hw_queue,
734 struct rtl_tcb_desc *ptcb_desc); 735 struct rtl_tcb_desc *ptcb_desc);
735bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, 736bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36d1cb3aef8a..e3cf4c02122a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
591 591
592void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, 592void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
593 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 593 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
594 struct ieee80211_tx_info *info, struct sk_buff *skb, 594 struct ieee80211_tx_info *info,
595 struct ieee80211_sta *sta,
596 struct sk_buff *skb,
595 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 597 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
596{ 598{
597 struct rtl_priv *rtlpriv = rtl_priv(hw); 599 struct rtl_priv *rtlpriv = rtl_priv(hw);
598 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 600 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 601 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 602 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
601 struct ieee80211_sta *sta = info->control.sta;
602 u8 *pdesc = pdesc_tx; 603 u8 *pdesc = pdesc_tx;
603 u16 seq_number; 604 u16 seq_number;
604 __le16 fc = hdr->frame_control; 605 __le16 fc = hdr->frame_control;
@@ -755,7 +756,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
755 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); 756 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
756 757
757 /* DOWRD 8 */ 758 /* DOWRD 8 */
758 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 759 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
759 760
760 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); 761 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
761} 762}
@@ -785,7 +786,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
785 /* 92SE need not to set TX packet size when firmware download */ 786 /* 92SE need not to set TX packet size when firmware download */
786 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len)); 787 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
787 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); 788 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
788 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 789 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
789 790
790 wmb(); 791 wmb();
791 SET_TX_DESC_OWN(pdesc, 1); 792 SET_TX_DESC_OWN(pdesc, 1);
@@ -804,7 +805,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
804 SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq); 805 SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
805 806
806 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); 807 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
807 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 808 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
808 809
809 wmb(); 810 wmb();
810 SET_TX_DESC_OWN(pdesc, 1); 811 SET_TX_DESC_OWN(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 011e7b0695f2..64dd66f287c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -31,6 +31,7 @@
31 31
32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, 32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
33 u8 *pdesc, struct ieee80211_tx_info *info, 33 u8 *pdesc, struct ieee80211_tx_info *info,
34 struct ieee80211_sta *sta,
34 struct sk_buff *skb, u8 hw_queue, 35 struct sk_buff *skb, u8 hw_queue,
35 struct rtl_tcb_desc *ptcb_desc); 36 struct rtl_tcb_desc *ptcb_desc);
36void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, 37void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index aa970fc18a21..030beb45d8b0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -120,7 +120,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
120 120
121 if (status < 0 && count++ < 4) 121 if (status < 0 && count++ < 4)
122 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", 122 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
123 value, status, le32_to_cpu(*(u32 *)pdata)); 123 value, status, *(u32 *)pdata);
124 return status; 124 return status;
125} 125}
126 126
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
848 _rtl_submit_tx_urb(hw, _urb); 848 _rtl_submit_tx_urb(hw, _urb);
849} 849}
850 850
851static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, 851static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
852 u16 hw_queue) 852 struct ieee80211_sta *sta,
853 struct sk_buff *skb,
854 u16 hw_queue)
853{ 855{
854 struct rtl_priv *rtlpriv = rtl_priv(hw); 856 struct rtl_priv *rtlpriv = rtl_priv(hw);
855 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 857 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
891 seq_number += 1; 893 seq_number += 1;
892 seq_number <<= 4; 894 seq_number <<= 4;
893 } 895 }
894 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb, 896 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
895 hw_queue, &tcb_desc); 897 hw_queue, &tcb_desc);
896 if (!ieee80211_has_morefrags(hdr->frame_control)) { 898 if (!ieee80211_has_morefrags(hdr->frame_control)) {
897 if (qc) 899 if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
901 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 903 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
902} 904}
903 905
904static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 906static int rtl_usb_tx(struct ieee80211_hw *hw,
907 struct ieee80211_sta *sta,
908 struct sk_buff *skb,
905 struct rtl_tcb_desc *dummy) 909 struct rtl_tcb_desc *dummy)
906{ 910{
907 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); 911 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
913 if (unlikely(is_hal_stop(rtlhal))) 917 if (unlikely(is_hal_stop(rtlhal)))
914 goto err_free; 918 goto err_free;
915 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); 919 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
916 _rtl_usb_tx_preprocess(hw, skb, hw_queue); 920 _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
917 _rtl_usb_transmit(hw, skb, hw_queue); 921 _rtl_usb_transmit(hw, skb, hw_queue);
918 return NETDEV_TX_OK; 922 return NETDEV_TX_OK;
919 923
@@ -923,6 +927,7 @@ err_free:
923} 927}
924 928
925static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, 929static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
930 struct ieee80211_sta *sta,
926 struct sk_buff *skb) 931 struct sk_buff *skb)
927{ 932{
928 return false; 933 return false;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cdaa21f29710..f1b6bc693b0a 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
122 EEPROM_BOOT_EFUSE, 122 EEPROM_BOOT_EFUSE,
123}; 123};
124 124
125enum rtl_status { 125enum ttl_status {
126 RTL_STATUS_INTERFACE_START = 0, 126 RTL_STATUS_INTERFACE_START = 0,
127}; 127};
128 128
@@ -135,7 +135,7 @@ enum hardware_type {
135 HARDWARE_TYPE_RTL8192CU, 135 HARDWARE_TYPE_RTL8192CU,
136 HARDWARE_TYPE_RTL8192DE, 136 HARDWARE_TYPE_RTL8192DE,
137 HARDWARE_TYPE_RTL8192DU, 137 HARDWARE_TYPE_RTL8192DU,
138 HARDWARE_TYPE_RTL8723E, 138 HARDWARE_TYPE_RTL8723AE,
139 HARDWARE_TYPE_RTL8723U, 139 HARDWARE_TYPE_RTL8723U,
140 140
141 /* keep it last */ 141 /* keep it last */
@@ -389,6 +389,7 @@ enum rt_enc_alg {
389 RSERVED_ENCRYPTION = 3, 389 RSERVED_ENCRYPTION = 3,
390 AESCCMP_ENCRYPTION = 4, 390 AESCCMP_ENCRYPTION = 4,
391 WEP104_ENCRYPTION = 5, 391 WEP104_ENCRYPTION = 5,
392 AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
392}; 393};
393 394
394enum rtl_hal_state { 395enum rtl_hal_state {
@@ -873,6 +874,7 @@ struct rtl_phy {
873 u32 adda_backup[16]; 874 u32 adda_backup[16];
874 u32 iqk_mac_backup[IQK_MAC_REG_NUM]; 875 u32 iqk_mac_backup[IQK_MAC_REG_NUM];
875 u32 iqk_bb_backup[10]; 876 u32 iqk_bb_backup[10];
877 bool iqk_initialized;
876 878
877 /* Dual mac */ 879 /* Dual mac */
878 bool need_iqk; 880 bool need_iqk;
@@ -910,6 +912,8 @@ struct rtl_phy {
910#define RTL_AGG_OPERATIONAL 3 912#define RTL_AGG_OPERATIONAL 3
911#define RTL_AGG_OFF 0 913#define RTL_AGG_OFF 0
912#define RTL_AGG_ON 1 914#define RTL_AGG_ON 1
915#define RTL_RX_AGG_START 1
916#define RTL_RX_AGG_STOP 0
913#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2 917#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
914#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3 918#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3
915 919
@@ -920,6 +924,7 @@ struct rtl_ht_agg {
920 u64 bitmap; 924 u64 bitmap;
921 u32 rate_n_flags; 925 u32 rate_n_flags;
922 u8 agg_state; 926 u8 agg_state;
927 u8 rx_agg_state;
923}; 928};
924 929
925struct rtl_tid_data { 930struct rtl_tid_data {
@@ -927,11 +932,19 @@ struct rtl_tid_data {
927 struct rtl_ht_agg agg; 932 struct rtl_ht_agg agg;
928}; 933};
929 934
935struct rssi_sta {
936 long undecorated_smoothed_pwdb;
937};
938
930struct rtl_sta_info { 939struct rtl_sta_info {
940 struct list_head list;
931 u8 ratr_index; 941 u8 ratr_index;
932 u8 wireless_mode; 942 u8 wireless_mode;
933 u8 mimo_ps; 943 u8 mimo_ps;
934 struct rtl_tid_data tids[MAX_TID_COUNT]; 944 struct rtl_tid_data tids[MAX_TID_COUNT];
945
946 /* just used for ap adhoc or mesh*/
947 struct rssi_sta rssi_stat;
935} __packed; 948} __packed;
936 949
937struct rtl_priv; 950struct rtl_priv;
@@ -1034,6 +1047,11 @@ struct rtl_mac {
1034struct rtl_hal { 1047struct rtl_hal {
1035 struct ieee80211_hw *hw; 1048 struct ieee80211_hw *hw;
1036 1049
1050 bool up_first_time;
1051 bool first_init;
1052 bool being_init_adapter;
1053 bool bbrf_ready;
1054
1037 enum intf_type interface; 1055 enum intf_type interface;
1038 u16 hw_type; /*92c or 92d or 92s and so on */ 1056 u16 hw_type; /*92c or 92d or 92s and so on */
1039 u8 ic_class; 1057 u8 ic_class;
@@ -1048,6 +1066,7 @@ struct rtl_hal {
1048 u16 fw_subversion; 1066 u16 fw_subversion;
1049 bool h2c_setinprogress; 1067 bool h2c_setinprogress;
1050 u8 last_hmeboxnum; 1068 u8 last_hmeboxnum;
1069 bool fw_ready;
1051 /*Reserve page start offset except beacon in TxQ. */ 1070 /*Reserve page start offset except beacon in TxQ. */
1052 u8 fw_rsvdpage_startoffset; 1071 u8 fw_rsvdpage_startoffset;
1053 u8 h2c_txcmd_seq; 1072 u8 h2c_txcmd_seq;
@@ -1083,6 +1102,8 @@ struct rtl_hal {
1083 bool load_imrandiqk_setting_for2g; 1102 bool load_imrandiqk_setting_for2g;
1084 1103
1085 bool disable_amsdu_8k; 1104 bool disable_amsdu_8k;
1105 bool master_of_dmsp;
1106 bool slave_of_dmsp;
1086}; 1107};
1087 1108
1088struct rtl_security { 1109struct rtl_security {
@@ -1144,6 +1165,9 @@ struct rtl_dm {
1144 bool disable_tx_int; 1165 bool disable_tx_int;
1145 char ofdm_index[2]; 1166 char ofdm_index[2];
1146 char cck_index; 1167 char cck_index;
1168
1169 /* DMSP */
1170 bool supp_phymode_switch;
1147}; 1171};
1148 1172
1149#define EFUSE_MAX_LOGICAL_SIZE 256 1173#define EFUSE_MAX_LOGICAL_SIZE 256
@@ -1337,6 +1361,10 @@ struct rtl_stats {
1337}; 1361};
1338 1362
1339struct rt_link_detect { 1363struct rt_link_detect {
1364 /* count for roaming */
1365 u32 bcn_rx_inperiod;
1366 u32 roam_times;
1367
1340 u32 num_tx_in4period[4]; 1368 u32 num_tx_in4period[4];
1341 u32 num_rx_in4period[4]; 1369 u32 num_rx_in4period[4];
1342 1370
@@ -1344,6 +1372,8 @@ struct rt_link_detect {
1344 u32 num_rx_inperiod; 1372 u32 num_rx_inperiod;
1345 1373
1346 bool busytraffic; 1374 bool busytraffic;
1375 bool tx_busy_traffic;
1376 bool rx_busy_traffic;
1347 bool higher_busytraffic; 1377 bool higher_busytraffic;
1348 bool higher_busyrxtraffic; 1378 bool higher_busyrxtraffic;
1349 1379
@@ -1418,6 +1448,7 @@ struct rtl_hal_ops {
1418 void (*fill_tx_desc) (struct ieee80211_hw *hw, 1448 void (*fill_tx_desc) (struct ieee80211_hw *hw,
1419 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1449 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1420 struct ieee80211_tx_info *info, 1450 struct ieee80211_tx_info *info,
1451 struct ieee80211_sta *sta,
1421 struct sk_buff *skb, u8 hw_queue, 1452 struct sk_buff *skb, u8 hw_queue,
1422 struct rtl_tcb_desc *ptcb_desc); 1453 struct rtl_tcb_desc *ptcb_desc);
1423 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc, 1454 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1454,7 +1485,12 @@ struct rtl_hal_ops {
1454 u32 regaddr, u32 bitmask); 1485 u32 regaddr, u32 bitmask);
1455 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1486 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1456 u32 regaddr, u32 bitmask, u32 data); 1487 u32 regaddr, u32 bitmask, u32 data);
1488 void (*allow_all_destaddr)(struct ieee80211_hw *hw,
1489 bool allow_all_da, bool write_into_reg);
1457 void (*linked_set_reg) (struct ieee80211_hw *hw); 1490 void (*linked_set_reg) (struct ieee80211_hw *hw);
1491 void (*check_switch_to_dmdp) (struct ieee80211_hw *hw);
1492 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
1493 void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
1458 bool (*phy_rf6052_config) (struct ieee80211_hw *hw); 1494 bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
1459 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw, 1495 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
1460 u8 *powerlevel); 1496 u8 *powerlevel);
@@ -1474,12 +1510,18 @@ struct rtl_intf_ops {
1474 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); 1510 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
1475 int (*adapter_start) (struct ieee80211_hw *hw); 1511 int (*adapter_start) (struct ieee80211_hw *hw);
1476 void (*adapter_stop) (struct ieee80211_hw *hw); 1512 void (*adapter_stop) (struct ieee80211_hw *hw);
1513 bool (*check_buddy_priv)(struct ieee80211_hw *hw,
1514 struct rtl_priv **buddy_priv);
1477 1515
1478 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb, 1516 int (*adapter_tx) (struct ieee80211_hw *hw,
1479 struct rtl_tcb_desc *ptcb_desc); 1517 struct ieee80211_sta *sta,
1518 struct sk_buff *skb,
1519 struct rtl_tcb_desc *ptcb_desc);
1480 void (*flush)(struct ieee80211_hw *hw, bool drop); 1520 void (*flush)(struct ieee80211_hw *hw, bool drop);
1481 int (*reset_trx_ring) (struct ieee80211_hw *hw); 1521 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1482 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); 1522 bool (*waitq_insert) (struct ieee80211_hw *hw,
1523 struct ieee80211_sta *sta,
1524 struct sk_buff *skb);
1483 1525
1484 /*pci */ 1526 /*pci */
1485 void (*disable_aspm) (struct ieee80211_hw *hw); 1527 void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1554,11 +1596,16 @@ struct rtl_locks {
1554 spinlock_t h2c_lock; 1596 spinlock_t h2c_lock;
1555 spinlock_t rf_ps_lock; 1597 spinlock_t rf_ps_lock;
1556 spinlock_t rf_lock; 1598 spinlock_t rf_lock;
1599 spinlock_t lps_lock;
1557 spinlock_t waitq_lock; 1600 spinlock_t waitq_lock;
1601 spinlock_t entry_list_lock;
1558 spinlock_t usb_lock; 1602 spinlock_t usb_lock;
1559 1603
1560 /*Dual mac*/ 1604 /*Dual mac*/
1561 spinlock_t cck_and_rw_pagea_lock; 1605 spinlock_t cck_and_rw_pagea_lock;
1606
1607 /*Easy concurrent*/
1608 spinlock_t check_sendpkt_lock;
1562}; 1609};
1563 1610
1564struct rtl_works { 1611struct rtl_works {
@@ -1566,6 +1613,7 @@ struct rtl_works {
1566 1613
1567 /*timer */ 1614 /*timer */
1568 struct timer_list watchdog_timer; 1615 struct timer_list watchdog_timer;
1616 struct timer_list dualmac_easyconcurrent_retrytimer;
1569 1617
1570 /*task */ 1618 /*task */
1571 struct tasklet_struct irq_tasklet; 1619 struct tasklet_struct irq_tasklet;
@@ -1593,6 +1641,31 @@ struct rtl_debug {
1593 char proc_name[20]; 1641 char proc_name[20];
1594}; 1642};
1595 1643
1644#define MIMO_PS_STATIC 0
1645#define MIMO_PS_DYNAMIC 1
1646#define MIMO_PS_NOLIMIT 3
1647
1648struct rtl_dualmac_easy_concurrent_ctl {
1649 enum band_type currentbandtype_backfordmdp;
1650 bool close_bbandrf_for_dmsp;
1651 bool change_to_dmdp;
1652 bool change_to_dmsp;
1653 bool switch_in_process;
1654};
1655
1656struct rtl_dmsp_ctl {
1657 bool activescan_for_slaveofdmsp;
1658 bool scan_for_anothermac_fordmsp;
1659 bool scan_for_itself_fordmsp;
1660 bool writedig_for_anothermacofdmsp;
1661 u32 curdigvalue_for_anothermacofdmsp;
1662 bool changecckpdstate_for_anothermacofdmsp;
1663 u8 curcckpdstate_for_anothermacofdmsp;
1664 bool changetxhighpowerlvl_for_anothermacofdmsp;
1665 u8 curtxhighlvl_for_anothermacofdmsp;
1666 long rssivalmin_for_anothermacofdmsp;
1667};
1668
1596struct ps_t { 1669struct ps_t {
1597 u8 pre_ccastate; 1670 u8 pre_ccastate;
1598 u8 cur_ccasate; 1671 u8 cur_ccasate;
@@ -1619,7 +1692,7 @@ struct dig_t {
1619 u8 dig_twoport_algorithm; 1692 u8 dig_twoport_algorithm;
1620 u8 dig_dbgmode; 1693 u8 dig_dbgmode;
1621 u8 dig_slgorithm_switch; 1694 u8 dig_slgorithm_switch;
1622 u8 cursta_connectctate; 1695 u8 cursta_connectstate;
1623 u8 presta_connectstate; 1696 u8 presta_connectstate;
1624 u8 curmultista_connectstate; 1697 u8 curmultista_connectstate;
1625 char backoff_val; 1698 char backoff_val;
@@ -1652,8 +1725,20 @@ struct dig_t {
1652 char backoffval_range_min; 1725 char backoffval_range_min;
1653}; 1726};
1654 1727
1728struct rtl_global_var {
1729 /* from this list we can get
1730 * other adapter's rtl_priv */
1731 struct list_head glb_priv_list;
1732 spinlock_t glb_list_lock;
1733};
1734
1655struct rtl_priv { 1735struct rtl_priv {
1656 struct completion firmware_loading_complete; 1736 struct completion firmware_loading_complete;
1737 struct list_head list;
1738 struct rtl_priv *buddy_priv;
1739 struct rtl_global_var *glb_var;
1740 struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
1741 struct rtl_dmsp_ctl dmsp_ctl;
1657 struct rtl_locks locks; 1742 struct rtl_locks locks;
1658 struct rtl_works works; 1743 struct rtl_works works;
1659 struct rtl_mac mac80211; 1744 struct rtl_mac mac80211;
@@ -1674,6 +1759,9 @@ struct rtl_priv {
1674 1759
1675 struct rtl_rate_priv *rate_priv; 1760 struct rtl_rate_priv *rate_priv;
1676 1761
1762 /* sta entry list for ap adhoc or mesh */
1763 struct list_head entry_list;
1764
1677 struct rtl_debug dbg; 1765 struct rtl_debug dbg;
1678 int max_fw_size; 1766 int max_fw_size;
1679 1767
@@ -1815,9 +1903,9 @@ struct bt_coexist_info {
1815 EF1BYTE(*((u8 *)(_ptr))) 1903 EF1BYTE(*((u8 *)(_ptr)))
1816/* Read le16 data from memory and convert to host ordering */ 1904/* Read le16 data from memory and convert to host ordering */
1817#define READEF2BYTE(_ptr) \ 1905#define READEF2BYTE(_ptr) \
1818 EF2BYTE(*((u16 *)(_ptr))) 1906 EF2BYTE(*(_ptr))
1819#define READEF4BYTE(_ptr) \ 1907#define READEF4BYTE(_ptr) \
1820 EF4BYTE(*((u32 *)(_ptr))) 1908 EF4BYTE(*(_ptr))
1821 1909
1822/* Write data to memory */ 1910/* Write data to memory */
1823#define WRITEEF1BYTE(_ptr, _val) \ 1911#define WRITEEF1BYTE(_ptr, _val) \
@@ -1826,7 +1914,7 @@ struct bt_coexist_info {
1826#define WRITEEF2BYTE(_ptr, _val) \ 1914#define WRITEEF2BYTE(_ptr, _val) \
1827 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1915 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1828#define WRITEEF4BYTE(_ptr, _val) \ 1916#define WRITEEF4BYTE(_ptr, _val) \
1829 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1917 (*((u32 *)(_ptr))) = EF2BYTE(_val)
1830 1918
1831/* Create a bit mask 1919/* Create a bit mask
1832 * Examples: 1920 * Examples:
@@ -1859,9 +1947,9 @@ struct bt_coexist_info {
1859 * 4-byte pointer in little-endian system. 1947 * 4-byte pointer in little-endian system.
1860 */ 1948 */
1861#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \ 1949#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
1862 (EF4BYTE(*((u32 *)(__pstart)))) 1950 (EF4BYTE(*((__le32 *)(__pstart))))
1863#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \ 1951#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
1864 (EF2BYTE(*((u16 *)(__pstart)))) 1952 (EF2BYTE(*((__le16 *)(__pstart))))
1865#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ 1953#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
1866 (EF1BYTE(*((u8 *)(__pstart)))) 1954 (EF1BYTE(*((u8 *)(__pstart))))
1867 1955
@@ -1908,13 +1996,13 @@ value to host byte ordering.*/
1908 * Set subfield of little-endian 4-byte value to specified value. 1996 * Set subfield of little-endian 4-byte value to specified value.
1909 */ 1997 */
1910#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1998#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
1911 *((u32 *)(__pstart)) = EF4BYTE \ 1999 *((u32 *)(__pstart)) = \
1912 ( \ 2000 ( \
1913 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \ 2001 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
1914 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \ 2002 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
1915 ); 2003 );
1916#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \ 2004#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
1917 *((u16 *)(__pstart)) = EF2BYTE \ 2005 *((u16 *)(__pstart)) = \
1918 ( \ 2006 ( \
1919 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \ 2007 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
1920 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \ 2008 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
@@ -2100,4 +2188,11 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
2100 return ieee80211_find_sta(vif, bssid); 2188 return ieee80211_find_sta(vif, bssid);
2101} 2189}
2102 2190
2191static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
2192 u8 *mac_addr)
2193{
2194 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2195 return ieee80211_find_sta(mac->vif, mac_addr);
2196}
2197
2103#endif 2198#endif
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3118c425bcf1..441cbccbd381 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -354,7 +354,9 @@ out:
354 return ret; 354 return ret;
355} 355}
356 356
357static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 357static void wl1251_op_tx(struct ieee80211_hw *hw,
358 struct ieee80211_tx_control *control,
359 struct sk_buff *skb)
358{ 360{
359 struct wl1251 *wl = hw->priv; 361 struct wl1251 *wl = hw->priv;
360 unsigned long flags; 362 unsigned long flags;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index f429fc110cb0..dadf1dbb002a 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -32,7 +32,6 @@
32#include "../wlcore/acx.h" 32#include "../wlcore/acx.h"
33#include "../wlcore/tx.h" 33#include "../wlcore/tx.h"
34#include "../wlcore/rx.h" 34#include "../wlcore/rx.h"
35#include "../wlcore/io.h"
36#include "../wlcore/boot.h" 35#include "../wlcore/boot.h"
37 36
38#include "wl12xx.h" 37#include "wl12xx.h"
@@ -1185,9 +1184,16 @@ static int wl12xx_enable_interrupts(struct wl1271 *wl)
1185 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, 1184 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1186 WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK)); 1185 WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
1187 if (ret < 0) 1186 if (ret < 0)
1188 goto out; 1187 goto disable_interrupts;
1189 1188
1190 ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL); 1189 ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
1190 if (ret < 0)
1191 goto disable_interrupts;
1192
1193 return ret;
1194
1195disable_interrupts:
1196 wlcore_disable_interrupts(wl);
1191 1197
1192out: 1198out:
1193 return ret; 1199 return ret;
@@ -1583,7 +1589,10 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1583 return wlcore_set_key(wl, cmd, vif, sta, key_conf); 1589 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1584} 1590}
1585 1591
1592static int wl12xx_setup(struct wl1271 *wl);
1593
1586static struct wlcore_ops wl12xx_ops = { 1594static struct wlcore_ops wl12xx_ops = {
1595 .setup = wl12xx_setup,
1587 .identify_chip = wl12xx_identify_chip, 1596 .identify_chip = wl12xx_identify_chip,
1588 .identify_fw = wl12xx_identify_fw, 1597 .identify_fw = wl12xx_identify_fw,
1589 .boot = wl12xx_boot, 1598 .boot = wl12xx_boot,
@@ -1624,26 +1633,15 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1624 }, 1633 },
1625}; 1634};
1626 1635
1627static int __devinit wl12xx_probe(struct platform_device *pdev) 1636static int wl12xx_setup(struct wl1271 *wl)
1628{ 1637{
1629 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 1638 struct wl12xx_priv *priv = wl->priv;
1630 struct wl1271 *wl; 1639 struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
1631 struct ieee80211_hw *hw;
1632 struct wl12xx_priv *priv;
1633
1634 hw = wlcore_alloc_hw(sizeof(*priv));
1635 if (IS_ERR(hw)) {
1636 wl1271_error("can't allocate hw");
1637 return PTR_ERR(hw);
1638 }
1639 1640
1640 wl = hw->priv;
1641 priv = wl->priv;
1642 wl->ops = &wl12xx_ops;
1643 wl->ptable = wl12xx_ptable;
1644 wl->rtable = wl12xx_rtable; 1641 wl->rtable = wl12xx_rtable;
1645 wl->num_tx_desc = 16; 1642 wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
1646 wl->num_rx_desc = 8; 1643 wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
1644 wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
1647 wl->band_rate_to_idx = wl12xx_band_rate_to_idx; 1645 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1648 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; 1646 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1649 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0; 1647 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1695,7 +1693,36 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
1695 wl1271_error("Invalid tcxo parameter %s", tcxo_param); 1693 wl1271_error("Invalid tcxo parameter %s", tcxo_param);
1696 } 1694 }
1697 1695
1698 return wlcore_probe(wl, pdev); 1696 return 0;
1697}
1698
1699static int __devinit wl12xx_probe(struct platform_device *pdev)
1700{
1701 struct wl1271 *wl;
1702 struct ieee80211_hw *hw;
1703 int ret;
1704
1705 hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
1706 WL12XX_AGGR_BUFFER_SIZE);
1707 if (IS_ERR(hw)) {
1708 wl1271_error("can't allocate hw");
1709 ret = PTR_ERR(hw);
1710 goto out;
1711 }
1712
1713 wl = hw->priv;
1714 wl->ops = &wl12xx_ops;
1715 wl->ptable = wl12xx_ptable;
1716 ret = wlcore_probe(wl, pdev);
1717 if (ret)
1718 goto out_free;
1719
1720 return ret;
1721
1722out_free:
1723 wlcore_free_hw(wl);
1724out:
1725 return ret;
1699} 1726}
1700 1727
1701static const struct platform_device_id wl12xx_id_table[] __devinitconst = { 1728static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
@@ -1714,17 +1741,7 @@ static struct platform_driver wl12xx_driver = {
1714 } 1741 }
1715}; 1742};
1716 1743
1717static int __init wl12xx_init(void) 1744module_platform_driver(wl12xx_driver);
1718{
1719 return platform_driver_register(&wl12xx_driver);
1720}
1721module_init(wl12xx_init);
1722
1723static void __exit wl12xx_exit(void)
1724{
1725 platform_driver_unregister(&wl12xx_driver);
1726}
1727module_exit(wl12xx_exit);
1728 1745
1729module_param_named(fref, fref_param, charp, 0); 1746module_param_named(fref, fref_param, charp, 0);
1730MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52"); 1747MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 26990fb4edea..7182bbf6625d 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
38#define WL128X_SUBTYPE_VER 2 38#define WL128X_SUBTYPE_VER 2
39#define WL128X_MINOR_VER 115 39#define WL128X_MINOR_VER 115
40 40
41#define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
42
43#define WL12XX_NUM_TX_DESCRIPTORS 16
44#define WL12XX_NUM_RX_DESCRIPTORS 8
45
46#define WL12XX_NUM_MAC_ADDRESSES 2
47
41struct wl127x_rx_mem_pool_addr { 48struct wl127x_rx_mem_pool_addr {
42 u32 addr; 49 u32 addr;
43 u32 addr_extra; 50 u32 addr_extra;
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 3ce6f1039af3..7f1669cdea09 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -220,7 +220,7 @@ static ssize_t clear_fw_stats_write(struct file *file,
220 220
221 mutex_lock(&wl->mutex); 221 mutex_lock(&wl->mutex);
222 222
223 if (wl->state == WL1271_STATE_OFF) 223 if (unlikely(wl->state != WLCORE_STATE_ON))
224 goto out; 224 goto out;
225 225
226 ret = wl18xx_acx_clear_statistics(wl); 226 ret = wl18xx_acx_clear_statistics(wl);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 69042bb9a097..a39682a7c25f 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -30,7 +30,6 @@
30#include "../wlcore/acx.h" 30#include "../wlcore/acx.h"
31#include "../wlcore/tx.h" 31#include "../wlcore/tx.h"
32#include "../wlcore/rx.h" 32#include "../wlcore/rx.h"
33#include "../wlcore/io.h"
34#include "../wlcore/boot.h" 33#include "../wlcore/boot.h"
35 34
36#include "reg.h" 35#include "reg.h"
@@ -46,7 +45,6 @@
46static char *ht_mode_param = NULL; 45static char *ht_mode_param = NULL;
47static char *board_type_param = NULL; 46static char *board_type_param = NULL;
48static bool checksum_param = false; 47static bool checksum_param = false;
49static bool enable_11a_param = true;
50static int num_rx_desc_param = -1; 48static int num_rx_desc_param = -1;
51 49
52/* phy paramters */ 50/* phy paramters */
@@ -416,7 +414,7 @@ static struct wlcore_conf wl18xx_conf = {
416 .snr_threshold = 0, 414 .snr_threshold = 0,
417 }, 415 },
418 .ht = { 416 .ht = {
419 .rx_ba_win_size = 10, 417 .rx_ba_win_size = 32,
420 .tx_ba_win_size = 64, 418 .tx_ba_win_size = 64,
421 .inactivity_timeout = 10000, 419 .inactivity_timeout = 10000,
422 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP, 420 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
@@ -506,8 +504,8 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
506 .rdl = 0x01, 504 .rdl = 0x01,
507 .auto_detect = 0x00, 505 .auto_detect = 0x00,
508 .dedicated_fem = FEM_NONE, 506 .dedicated_fem = FEM_NONE,
509 .low_band_component = COMPONENT_2_WAY_SWITCH, 507 .low_band_component = COMPONENT_3_WAY_SWITCH,
510 .low_band_component_type = 0x06, 508 .low_band_component_type = 0x04,
511 .high_band_component = COMPONENT_2_WAY_SWITCH, 509 .high_band_component = COMPONENT_2_WAY_SWITCH,
512 .high_band_component_type = 0x09, 510 .high_band_component_type = 0x09,
513 .tcxo_ldo_voltage = 0x00, 511 .tcxo_ldo_voltage = 0x00,
@@ -813,6 +811,13 @@ static int wl18xx_enable_interrupts(struct wl1271 *wl)
813 811
814 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, 812 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
815 WL1271_ACX_INTR_ALL & ~intr_mask); 813 WL1271_ACX_INTR_ALL & ~intr_mask);
814 if (ret < 0)
815 goto disable_interrupts;
816
817 return ret;
818
819disable_interrupts:
820 wlcore_disable_interrupts(wl);
816 821
817out: 822out:
818 return ret; 823 return ret;
@@ -1203,6 +1208,12 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
1203 struct wl18xx_static_data_priv *static_data_priv = 1208 struct wl18xx_static_data_priv *static_data_priv =
1204 (struct wl18xx_static_data_priv *) static_data->priv; 1209 (struct wl18xx_static_data_priv *) static_data->priv;
1205 1210
1211 strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
1212 sizeof(wl->chip.phy_fw_ver_str));
1213
1214 /* make sure the string is NULL-terminated */
1215 wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0';
1216
1206 wl1271_info("PHY firmware version: %s", static_data_priv->phy_version); 1217 wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
1207 1218
1208 return 0; 1219 return 0;
@@ -1241,13 +1252,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1241 if (!change_spare) 1252 if (!change_spare)
1242 return wlcore_set_key(wl, cmd, vif, sta, key_conf); 1253 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1243 1254
1244 /*
1245 * stop the queues and flush to ensure the next packets are
1246 * in sync with FW spare block accounting
1247 */
1248 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1249 wl1271_tx_flush(wl);
1250
1251 ret = wlcore_set_key(wl, cmd, vif, sta, key_conf); 1255 ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
1252 if (ret < 0) 1256 if (ret < 0)
1253 goto out; 1257 goto out;
@@ -1270,7 +1274,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1270 } 1274 }
1271 1275
1272out: 1276out:
1273 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1274 return ret; 1277 return ret;
1275} 1278}
1276 1279
@@ -1293,7 +1296,10 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
1293 return buf_offset; 1296 return buf_offset;
1294} 1297}
1295 1298
1299static int wl18xx_setup(struct wl1271 *wl);
1300
1296static struct wlcore_ops wl18xx_ops = { 1301static struct wlcore_ops wl18xx_ops = {
1302 .setup = wl18xx_setup,
1297 .identify_chip = wl18xx_identify_chip, 1303 .identify_chip = wl18xx_identify_chip,
1298 .boot = wl18xx_boot, 1304 .boot = wl18xx_boot,
1299 .plt_init = wl18xx_plt_init, 1305 .plt_init = wl18xx_plt_init,
@@ -1374,27 +1380,15 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
1374 }, 1380 },
1375}; 1381};
1376 1382
1377static int __devinit wl18xx_probe(struct platform_device *pdev) 1383static int wl18xx_setup(struct wl1271 *wl)
1378{ 1384{
1379 struct wl1271 *wl; 1385 struct wl18xx_priv *priv = wl->priv;
1380 struct ieee80211_hw *hw;
1381 struct wl18xx_priv *priv;
1382 int ret; 1386 int ret;
1383 1387
1384 hw = wlcore_alloc_hw(sizeof(*priv));
1385 if (IS_ERR(hw)) {
1386 wl1271_error("can't allocate hw");
1387 ret = PTR_ERR(hw);
1388 goto out;
1389 }
1390
1391 wl = hw->priv;
1392 priv = wl->priv;
1393 wl->ops = &wl18xx_ops;
1394 wl->ptable = wl18xx_ptable;
1395 wl->rtable = wl18xx_rtable; 1388 wl->rtable = wl18xx_rtable;
1396 wl->num_tx_desc = 32; 1389 wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
1397 wl->num_rx_desc = 32; 1390 wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
1391 wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
1398 wl->band_rate_to_idx = wl18xx_band_rate_to_idx; 1392 wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
1399 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX; 1393 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
1400 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0; 1394 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1405,9 +1399,9 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1405 if (num_rx_desc_param != -1) 1399 if (num_rx_desc_param != -1)
1406 wl->num_rx_desc = num_rx_desc_param; 1400 wl->num_rx_desc = num_rx_desc_param;
1407 1401
1408 ret = wl18xx_conf_init(wl, &pdev->dev); 1402 ret = wl18xx_conf_init(wl, wl->dev);
1409 if (ret < 0) 1403 if (ret < 0)
1410 goto out_free; 1404 return ret;
1411 1405
1412 /* If the module param is set, update it in conf */ 1406 /* If the module param is set, update it in conf */
1413 if (board_type_param) { 1407 if (board_type_param) {
@@ -1424,27 +1418,14 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1424 } else { 1418 } else {
1425 wl1271_error("invalid board type '%s'", 1419 wl1271_error("invalid board type '%s'",
1426 board_type_param); 1420 board_type_param);
1427 ret = -EINVAL; 1421 return -EINVAL;
1428 goto out_free;
1429 } 1422 }
1430 } 1423 }
1431 1424
1432 /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */ 1425 if (priv->conf.phy.board_type >= NUM_BOARD_TYPES) {
1433 switch (priv->conf.phy.board_type) {
1434 case BOARD_TYPE_HDK_18XX:
1435 case BOARD_TYPE_COM8_18XX:
1436 priv->conf.phy.low_band_component_type = 0x06;
1437 break;
1438 case BOARD_TYPE_FPGA_18XX:
1439 case BOARD_TYPE_DVP_18XX:
1440 case BOARD_TYPE_EVB_18XX:
1441 priv->conf.phy.low_band_component_type = 0x05;
1442 break;
1443 default:
1444 wl1271_error("invalid board type '%d'", 1426 wl1271_error("invalid board type '%d'",
1445 priv->conf.phy.board_type); 1427 priv->conf.phy.board_type);
1446 ret = -EINVAL; 1428 return -EINVAL;
1447 goto out_free;
1448 } 1429 }
1449 1430
1450 if (low_band_component_param != -1) 1431 if (low_band_component_param != -1)
@@ -1476,22 +1457,21 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1476 priv->conf.ht.mode = HT_MODE_SISO20; 1457 priv->conf.ht.mode = HT_MODE_SISO20;
1477 else { 1458 else {
1478 wl1271_error("invalid ht_mode '%s'", ht_mode_param); 1459 wl1271_error("invalid ht_mode '%s'", ht_mode_param);
1479 ret = -EINVAL; 1460 return -EINVAL;
1480 goto out_free;
1481 } 1461 }
1482 } 1462 }
1483 1463
1484 if (priv->conf.ht.mode == HT_MODE_DEFAULT) { 1464 if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
1485 /* 1465 /*
1486 * Only support mimo with multiple antennas. Fall back to 1466 * Only support mimo with multiple antennas. Fall back to
1487 * siso20. 1467 * siso40.
1488 */ 1468 */
1489 if (wl18xx_is_mimo_supported(wl)) 1469 if (wl18xx_is_mimo_supported(wl))
1490 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 1470 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1491 &wl18xx_mimo_ht_cap_2ghz); 1471 &wl18xx_mimo_ht_cap_2ghz);
1492 else 1472 else
1493 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 1473 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1494 &wl18xx_siso20_ht_cap); 1474 &wl18xx_siso40_ht_cap_2ghz);
1495 1475
1496 /* 5Ghz is always wide */ 1476 /* 5Ghz is always wide */
1497 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, 1477 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
@@ -1513,9 +1493,34 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1513 wl18xx_ops.init_vif = NULL; 1493 wl18xx_ops.init_vif = NULL;
1514 } 1494 }
1515 1495
1516 wl->enable_11a = enable_11a_param; 1496 /* Enable 11a Band only if we have 5G antennas */
1497 wl->enable_11a = (priv->conf.phy.number_of_assembled_ant5 != 0);
1498
1499 return 0;
1500}
1501
1502static int __devinit wl18xx_probe(struct platform_device *pdev)
1503{
1504 struct wl1271 *wl;
1505 struct ieee80211_hw *hw;
1506 int ret;
1507
1508 hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
1509 WL18XX_AGGR_BUFFER_SIZE);
1510 if (IS_ERR(hw)) {
1511 wl1271_error("can't allocate hw");
1512 ret = PTR_ERR(hw);
1513 goto out;
1514 }
1515
1516 wl = hw->priv;
1517 wl->ops = &wl18xx_ops;
1518 wl->ptable = wl18xx_ptable;
1519 ret = wlcore_probe(wl, pdev);
1520 if (ret)
1521 goto out_free;
1517 1522
1518 return wlcore_probe(wl, pdev); 1523 return ret;
1519 1524
1520out_free: 1525out_free:
1521 wlcore_free_hw(wl); 1526 wlcore_free_hw(wl);
@@ -1539,18 +1544,7 @@ static struct platform_driver wl18xx_driver = {
1539 } 1544 }
1540}; 1545};
1541 1546
1542static int __init wl18xx_init(void) 1547module_platform_driver(wl18xx_driver);
1543{
1544 return platform_driver_register(&wl18xx_driver);
1545}
1546module_init(wl18xx_init);
1547
1548static void __exit wl18xx_exit(void)
1549{
1550 platform_driver_unregister(&wl18xx_driver);
1551}
1552module_exit(wl18xx_exit);
1553
1554module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR); 1548module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
1555MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20"); 1549MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
1556 1550
@@ -1561,9 +1555,6 @@ MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
1561module_param_named(checksum, checksum_param, bool, S_IRUSR); 1555module_param_named(checksum, checksum_param, bool, S_IRUSR);
1562MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)"); 1556MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
1563 1557
1564module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
1565MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
1566
1567module_param_named(dc2dc, dc2dc_param, int, S_IRUSR); 1558module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
1568MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)"); 1559MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
1569 1560
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 6452396fa1d4..96a1e438d677 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -33,6 +33,13 @@
33 33
34#define WL18XX_CMD_MAX_SIZE 740 34#define WL18XX_CMD_MAX_SIZE 740
35 35
36#define WL18XX_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
37
38#define WL18XX_NUM_TX_DESCRIPTORS 32
39#define WL18XX_NUM_RX_DESCRIPTORS 32
40
41#define WL18XX_NUM_MAC_ADDRESSES 3
42
36struct wl18xx_priv { 43struct wl18xx_priv {
37 /* buffer for sending commands to FW */ 44 /* buffer for sending commands to FW */
38 u8 cmd_buf[WL18XX_CMD_MAX_SIZE]; 45 u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 20e1bd923832..eaef3f41b252 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -59,6 +59,9 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
59 u16 status; 59 u16 status;
60 u16 poll_count = 0; 60 u16 poll_count = 0;
61 61
62 if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
63 return -EIO;
64
62 cmd = buf; 65 cmd = buf;
63 cmd->id = cpu_to_le16(id); 66 cmd->id = cpu_to_le16(id);
64 cmd->status = 0; 67 cmd->status = 0;
@@ -990,7 +993,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
990 993
991 ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV, 994 ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
992 skb->data, skb->len, 995 skb->data, skb->len,
993 CMD_TEMPL_KLV_IDX_NULL_DATA, 996 wlvif->sta.klv_template_id,
994 wlvif->basic_rate); 997 wlvif->basic_rate);
995 998
996out: 999out:
@@ -1785,10 +1788,17 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1785 wlvif->bss_type == BSS_TYPE_IBSS))) 1788 wlvif->bss_type == BSS_TYPE_IBSS)))
1786 return -EINVAL; 1789 return -EINVAL;
1787 1790
1788 ret = wl12xx_cmd_role_start_dev(wl, wlvif); 1791 ret = wl12xx_cmd_role_enable(wl,
1792 wl12xx_wlvif_to_vif(wlvif)->addr,
1793 WL1271_ROLE_DEVICE,
1794 &wlvif->dev_role_id);
1789 if (ret < 0) 1795 if (ret < 0)
1790 goto out; 1796 goto out;
1791 1797
1798 ret = wl12xx_cmd_role_start_dev(wl, wlvif);
1799 if (ret < 0)
1800 goto out_disable;
1801
1792 ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id); 1802 ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
1793 if (ret < 0) 1803 if (ret < 0)
1794 goto out_stop; 1804 goto out_stop;
@@ -1797,6 +1807,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1797 1807
1798out_stop: 1808out_stop:
1799 wl12xx_cmd_role_stop_dev(wl, wlvif); 1809 wl12xx_cmd_role_stop_dev(wl, wlvif);
1810out_disable:
1811 wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
1800out: 1812out:
1801 return ret; 1813 return ret;
1802} 1814}
@@ -1824,6 +1836,11 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1824 ret = wl12xx_cmd_role_stop_dev(wl, wlvif); 1836 ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
1825 if (ret < 0) 1837 if (ret < 0)
1826 goto out; 1838 goto out;
1839
1840 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
1841 if (ret < 0)
1842 goto out;
1843
1827out: 1844out:
1828 return ret; 1845 return ret;
1829} 1846}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 4ef0b095f0d6..2409f3d71f63 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -157,11 +157,6 @@ enum wl1271_commands {
157 157
158#define MAX_CMD_PARAMS 572 158#define MAX_CMD_PARAMS 572
159 159
160enum {
161 CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
162 CMD_TEMPL_KLV_IDX_MAX = 4
163};
164
165enum cmd_templ { 160enum cmd_templ {
166 CMD_TEMPL_NULL_DATA = 0, 161 CMD_TEMPL_NULL_DATA = 0,
167 CMD_TEMPL_BEACON, 162 CMD_TEMPL_BEACON,
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index d77224f2ac6b..9e40760bafe1 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -412,8 +412,7 @@ struct conf_rx_settings {
412#define CONF_TX_RATE_RETRY_LIMIT 10 412#define CONF_TX_RATE_RETRY_LIMIT 10
413 413
414/* basic rates for p2p operations (probe req/resp, etc.) */ 414/* basic rates for p2p operations (probe req/resp, etc.) */
415#define CONF_TX_RATE_MASK_BASIC_P2P (CONF_HW_BIT_RATE_6MBPS | \ 415#define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS
416 CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS)
417 416
418/* 417/*
419 * Rates supported for data packets when operating as AP. Note the absence 418 * Rates supported for data packets when operating as AP. Note the absence
diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
index 6b800b3cbea5..db4bf5a68ce2 100644
--- a/drivers/net/wireless/ti/wlcore/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -28,7 +28,7 @@
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/printk.h> 29#include <linux/printk.h>
30 30
31#define DRIVER_NAME "wl12xx" 31#define DRIVER_NAME "wlcore"
32#define DRIVER_PREFIX DRIVER_NAME ": " 32#define DRIVER_PREFIX DRIVER_NAME ": "
33 33
34enum { 34enum {
@@ -73,11 +73,21 @@ extern u32 wl12xx_debug_level;
73#define wl1271_info(fmt, arg...) \ 73#define wl1271_info(fmt, arg...) \
74 pr_info(DRIVER_PREFIX fmt "\n", ##arg) 74 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
75 75
76/* define the debug macro differently if dynamic debug is supported */
77#if defined(CONFIG_DYNAMIC_DEBUG)
76#define wl1271_debug(level, fmt, arg...) \ 78#define wl1271_debug(level, fmt, arg...) \
77 do { \ 79 do { \
78 if (level & wl12xx_debug_level) \ 80 if (unlikely(level & wl12xx_debug_level)) \
79 pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ 81 dynamic_pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
82 } while (0)
83#else
84#define wl1271_debug(level, fmt, arg...) \
85 do { \
86 if (unlikely(level & wl12xx_debug_level)) \
87 printk(KERN_DEBUG pr_fmt(DRIVER_PREFIX fmt "\n"), \
88 ##arg); \
80 } while (0) 89 } while (0)
90#endif
81 91
82/* TODO: use pr_debug_hex_dump when it becomes available */ 92/* TODO: use pr_debug_hex_dump when it becomes available */
83#define wl1271_dump(level, prefix, buf, len) \ 93#define wl1271_dump(level, prefix, buf, len) \
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 80dbc5304fac..c86bb00c2488 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -62,11 +62,14 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
62 62
63 mutex_lock(&wl->mutex); 63 mutex_lock(&wl->mutex);
64 64
65 if (unlikely(wl->state != WLCORE_STATE_ON))
66 goto out;
67
65 ret = wl1271_ps_elp_wakeup(wl); 68 ret = wl1271_ps_elp_wakeup(wl);
66 if (ret < 0) 69 if (ret < 0)
67 goto out; 70 goto out;
68 71
69 if (wl->state == WL1271_STATE_ON && !wl->plt && 72 if (!wl->plt &&
70 time_after(jiffies, wl->stats.fw_stats_update + 73 time_after(jiffies, wl->stats.fw_stats_update +
71 msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) { 74 msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
72 wl1271_acx_statistics(wl, wl->stats.fw_stats); 75 wl1271_acx_statistics(wl, wl->stats.fw_stats);
@@ -286,7 +289,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
286 289
287 wl->conf.conn.dynamic_ps_timeout = value; 290 wl->conf.conn.dynamic_ps_timeout = value;
288 291
289 if (wl->state == WL1271_STATE_OFF) 292 if (unlikely(wl->state != WLCORE_STATE_ON))
290 goto out; 293 goto out;
291 294
292 ret = wl1271_ps_elp_wakeup(wl); 295 ret = wl1271_ps_elp_wakeup(wl);
@@ -353,7 +356,7 @@ static ssize_t forced_ps_write(struct file *file,
353 356
354 wl->conf.conn.forced_ps = value; 357 wl->conf.conn.forced_ps = value;
355 358
356 if (wl->state == WL1271_STATE_OFF) 359 if (unlikely(wl->state != WLCORE_STATE_ON))
357 goto out; 360 goto out;
358 361
359 ret = wl1271_ps_elp_wakeup(wl); 362 ret = wl1271_ps_elp_wakeup(wl);
@@ -486,6 +489,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
486 DRIVER_STATE_PRINT_HEX(platform_quirks); 489 DRIVER_STATE_PRINT_HEX(platform_quirks);
487 DRIVER_STATE_PRINT_HEX(chip.id); 490 DRIVER_STATE_PRINT_HEX(chip.id);
488 DRIVER_STATE_PRINT_STR(chip.fw_ver_str); 491 DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
492 DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
489 DRIVER_STATE_PRINT_INT(sched_scanning); 493 DRIVER_STATE_PRINT_INT(sched_scanning);
490 494
491#undef DRIVER_STATE_PRINT_INT 495#undef DRIVER_STATE_PRINT_INT
@@ -999,7 +1003,7 @@ static ssize_t sleep_auth_write(struct file *file,
999 1003
1000 wl->conf.conn.sta_sleep_auth = value; 1004 wl->conf.conn.sta_sleep_auth = value;
1001 1005
1002 if (wl->state == WL1271_STATE_OFF) { 1006 if (unlikely(wl->state != WLCORE_STATE_ON)) {
1003 /* this will show up on "read" in case we are off */ 1007 /* this will show up on "read" in case we are off */
1004 wl->sleep_auth = value; 1008 wl->sleep_auth = value;
1005 goto out; 1009 goto out;
@@ -1060,14 +1064,16 @@ static ssize_t dev_mem_read(struct file *file,
1060 1064
1061 mutex_lock(&wl->mutex); 1065 mutex_lock(&wl->mutex);
1062 1066
1063 if (wl->state == WL1271_STATE_OFF) { 1067 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
1064 ret = -EFAULT; 1068 ret = -EFAULT;
1065 goto skip_read; 1069 goto skip_read;
1066 } 1070 }
1067 1071
1068 ret = wl1271_ps_elp_wakeup(wl); 1072 /*
1069 if (ret < 0) 1073 * Don't fail if elp_wakeup returns an error, so the device's memory
1070 goto skip_read; 1074 * could be read even if the FW crashed
1075 */
1076 wl1271_ps_elp_wakeup(wl);
1071 1077
1072 /* store current partition and switch partition */ 1078 /* store current partition and switch partition */
1073 memcpy(&old_part, &wl->curr_part, sizeof(old_part)); 1079 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1145,14 +1151,16 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
1145 1151
1146 mutex_lock(&wl->mutex); 1152 mutex_lock(&wl->mutex);
1147 1153
1148 if (wl->state == WL1271_STATE_OFF) { 1154 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
1149 ret = -EFAULT; 1155 ret = -EFAULT;
1150 goto skip_write; 1156 goto skip_write;
1151 } 1157 }
1152 1158
1153 ret = wl1271_ps_elp_wakeup(wl); 1159 /*
1154 if (ret < 0) 1160 * Don't fail if elp_wakeup returns an error, so the device's memory
1155 goto skip_write; 1161 * could be read even if the FW crashed
1162 */
1163 wl1271_ps_elp_wakeup(wl);
1156 1164
1157 /* store current partition and switch partition */ 1165 /* store current partition and switch partition */
1158 memcpy(&old_part, &wl->curr_part, sizeof(old_part)); 1166 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index a3c867786df8..32d157f62f31 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -141,7 +141,7 @@ int wl1271_init_templates_config(struct wl1271 *wl)
141 if (ret < 0) 141 if (ret < 0)
142 return ret; 142 return ret;
143 143
144 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 144 for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) {
145 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, 145 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
146 CMD_TEMPL_KLV, NULL, 146 CMD_TEMPL_KLV, NULL,
147 sizeof(struct ieee80211_qos_hdr), 147 sizeof(struct ieee80211_qos_hdr),
@@ -371,15 +371,7 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
371 struct ieee80211_vif *vif) 371 struct ieee80211_vif *vif)
372{ 372{
373 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 373 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
374 int ret, i; 374 int ret;
375
376 /* disable all keep-alive templates */
377 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
378 ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
379 ACX_KEEP_ALIVE_TPL_INVALID);
380 if (ret < 0)
381 return ret;
382 }
383 375
384 /* disable the keep-alive feature */ 376 /* disable the keep-alive feature */
385 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); 377 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 259149f36fae..f48530fec14f 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -64,7 +64,7 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
64 return -EIO; 64 return -EIO;
65 65
66 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed); 66 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
67 if (ret && wl->state != WL1271_STATE_OFF) 67 if (ret && wl->state != WLCORE_STATE_OFF)
68 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags); 68 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
69 69
70 return ret; 70 return ret;
@@ -80,7 +80,7 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
80 return -EIO; 80 return -EIO;
81 81
82 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed); 82 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
83 if (ret && wl->state != WL1271_STATE_OFF) 83 if (ret && wl->state != WLCORE_STATE_OFF)
84 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags); 84 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
85 85
86 return ret; 86 return ret;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 72548609f711..25530c8760cb 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -248,7 +248,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
248 248
249 mutex_lock(&wl->mutex); 249 mutex_lock(&wl->mutex);
250 250
251 if (unlikely(wl->state == WL1271_STATE_OFF)) 251 if (unlikely(wl->state != WLCORE_STATE_ON))
252 goto out; 252 goto out;
253 253
254 /* Tx went out in the meantime - everything is ok */ 254 /* Tx went out in the meantime - everything is ok */
@@ -512,7 +512,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
512 512
513 wl1271_debug(DEBUG_IRQ, "IRQ work"); 513 wl1271_debug(DEBUG_IRQ, "IRQ work");
514 514
515 if (unlikely(wl->state == WL1271_STATE_OFF)) 515 if (unlikely(wl->state != WLCORE_STATE_ON))
516 goto out; 516 goto out;
517 517
518 ret = wl1271_ps_elp_wakeup(wl); 518 ret = wl1271_ps_elp_wakeup(wl);
@@ -696,7 +696,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
696 * we can't call wl12xx_get_vif_count() here because 696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value 697 * wl->mutex is taken, so use the cached last_vif_count value
698 */ 698 */
699 if (wl->last_vif_count > 1) { 699 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
700 fw_type = WL12XX_FW_TYPE_MULTI; 700 fw_type = WL12XX_FW_TYPE_MULTI;
701 fw_name = wl->mr_fw_name; 701 fw_name = wl->mr_fw_name;
702 } else { 702 } else {
@@ -744,38 +744,14 @@ out:
744 return ret; 744 return ret;
745} 745}
746 746
747static void wl1271_fetch_nvs(struct wl1271 *wl)
748{
749 const struct firmware *fw;
750 int ret;
751
752 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
753
754 if (ret < 0) {
755 wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
756 WL12XX_NVS_NAME, ret);
757 return;
758 }
759
760 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
761
762 if (!wl->nvs) {
763 wl1271_error("could not allocate memory for the nvs file");
764 goto out;
765 }
766
767 wl->nvs_len = fw->size;
768
769out:
770 release_firmware(fw);
771}
772
773void wl12xx_queue_recovery_work(struct wl1271 *wl) 747void wl12xx_queue_recovery_work(struct wl1271 *wl)
774{ 748{
775 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 749 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
776 750
777 /* Avoid a recursive recovery */ 751 /* Avoid a recursive recovery */
778 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { 752 if (wl->state == WLCORE_STATE_ON) {
753 wl->state = WLCORE_STATE_RESTARTING;
754 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
779 wlcore_disable_interrupts_nosync(wl); 755 wlcore_disable_interrupts_nosync(wl);
780 ieee80211_queue_work(wl->hw, &wl->recovery_work); 756 ieee80211_queue_work(wl->hw, &wl->recovery_work);
781 } 757 }
@@ -913,7 +889,7 @@ static void wl1271_recovery_work(struct work_struct *work)
913 889
914 mutex_lock(&wl->mutex); 890 mutex_lock(&wl->mutex);
915 891
916 if (wl->state != WL1271_STATE_ON || wl->plt) 892 if (wl->state == WLCORE_STATE_OFF || wl->plt)
917 goto out_unlock; 893 goto out_unlock;
918 894
919 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { 895 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1081,7 +1057,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1081 1057
1082 wl1271_notice("power up"); 1058 wl1271_notice("power up");
1083 1059
1084 if (wl->state != WL1271_STATE_OFF) { 1060 if (wl->state != WLCORE_STATE_OFF) {
1085 wl1271_error("cannot go into PLT state because not " 1061 wl1271_error("cannot go into PLT state because not "
1086 "in off state: %d", wl->state); 1062 "in off state: %d", wl->state);
1087 ret = -EBUSY; 1063 ret = -EBUSY;
@@ -1102,7 +1078,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1102 if (ret < 0) 1078 if (ret < 0)
1103 goto power_off; 1079 goto power_off;
1104 1080
1105 wl->state = WL1271_STATE_ON; 1081 wl->state = WLCORE_STATE_ON;
1106 wl1271_notice("firmware booted in PLT mode %s (%s)", 1082 wl1271_notice("firmware booted in PLT mode %s (%s)",
1107 PLT_MODE[plt_mode], 1083 PLT_MODE[plt_mode],
1108 wl->chip.fw_ver_str); 1084 wl->chip.fw_ver_str);
@@ -1171,7 +1147,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1171 wl1271_power_off(wl); 1147 wl1271_power_off(wl);
1172 wl->flags = 0; 1148 wl->flags = 0;
1173 wl->sleep_auth = WL1271_PSM_ILLEGAL; 1149 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1174 wl->state = WL1271_STATE_OFF; 1150 wl->state = WLCORE_STATE_OFF;
1175 wl->plt = false; 1151 wl->plt = false;
1176 wl->plt_mode = PLT_OFF; 1152 wl->plt_mode = PLT_OFF;
1177 wl->rx_counter = 0; 1153 wl->rx_counter = 0;
@@ -1181,7 +1157,9 @@ out:
1181 return ret; 1157 return ret;
1182} 1158}
1183 1159
1184static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1160static void wl1271_op_tx(struct ieee80211_hw *hw,
1161 struct ieee80211_tx_control *control,
1162 struct sk_buff *skb)
1185{ 1163{
1186 struct wl1271 *wl = hw->priv; 1164 struct wl1271 *wl = hw->priv;
1187 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1165 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1175,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1197 mapping = skb_get_queue_mapping(skb); 1175 mapping = skb_get_queue_mapping(skb);
1198 q = wl1271_tx_get_queue(mapping); 1176 q = wl1271_tx_get_queue(mapping);
1199 1177
1200 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 1178 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1201 1179
1202 spin_lock_irqsave(&wl->wl_lock, flags); 1180 spin_lock_irqsave(&wl->wl_lock, flags);
1203 1181
@@ -1600,12 +1578,6 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1600 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1578 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1601 goto out; 1579 goto out;
1602 1580
1603 if ((wl->conf.conn.suspend_wake_up_event ==
1604 wl->conf.conn.wake_up_event) &&
1605 (wl->conf.conn.suspend_listen_interval ==
1606 wl->conf.conn.listen_interval))
1607 goto out;
1608
1609 ret = wl1271_ps_elp_wakeup(wl); 1581 ret = wl1271_ps_elp_wakeup(wl);
1610 if (ret < 0) 1582 if (ret < 0)
1611 goto out; 1583 goto out;
@@ -1614,6 +1586,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614 if (ret < 0) 1586 if (ret < 0)
1615 goto out_sleep; 1587 goto out_sleep;
1616 1588
1589 if ((wl->conf.conn.suspend_wake_up_event ==
1590 wl->conf.conn.wake_up_event) &&
1591 (wl->conf.conn.suspend_listen_interval ==
1592 wl->conf.conn.listen_interval))
1593 goto out_sleep;
1594
1617 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1595 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1618 wl->conf.conn.suspend_wake_up_event, 1596 wl->conf.conn.suspend_wake_up_event,
1619 wl->conf.conn.suspend_listen_interval); 1597 wl->conf.conn.suspend_listen_interval);
@@ -1669,11 +1647,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1669 if ((!is_ap) && (!is_sta)) 1647 if ((!is_ap) && (!is_sta))
1670 return; 1648 return;
1671 1649
1672 if (is_sta && 1650 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1673 ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval)))
1677 return; 1651 return;
1678 1652
1679 ret = wl1271_ps_elp_wakeup(wl); 1653 ret = wl1271_ps_elp_wakeup(wl);
@@ -1683,6 +1657,12 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1683 if (is_sta) { 1657 if (is_sta) {
1684 wl1271_configure_wowlan(wl, NULL); 1658 wl1271_configure_wowlan(wl, NULL);
1685 1659
1660 if ((wl->conf.conn.suspend_wake_up_event ==
1661 wl->conf.conn.wake_up_event) &&
1662 (wl->conf.conn.suspend_listen_interval ==
1663 wl->conf.conn.listen_interval))
1664 goto out_sleep;
1665
1686 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1666 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1687 wl->conf.conn.wake_up_event, 1667 wl->conf.conn.wake_up_event,
1688 wl->conf.conn.listen_interval); 1668 wl->conf.conn.listen_interval);
@@ -1695,6 +1675,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1695 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); 1675 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1696 } 1676 }
1697 1677
1678out_sleep:
1698 wl1271_ps_elp_sleep(wl); 1679 wl1271_ps_elp_sleep(wl);
1699} 1680}
1700 1681
@@ -1831,7 +1812,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1831{ 1812{
1832 int i; 1813 int i;
1833 1814
1834 if (wl->state == WL1271_STATE_OFF) { 1815 if (wl->state == WLCORE_STATE_OFF) {
1835 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, 1816 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1836 &wl->flags)) 1817 &wl->flags))
1837 wlcore_enable_interrupts(wl); 1818 wlcore_enable_interrupts(wl);
@@ -1843,7 +1824,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1843 * this must be before the cancel_work calls below, so that the work 1824 * this must be before the cancel_work calls below, so that the work
1844 * functions don't perform further work. 1825 * functions don't perform further work.
1845 */ 1826 */
1846 wl->state = WL1271_STATE_OFF; 1827 wl->state = WLCORE_STATE_OFF;
1847 1828
1848 /* 1829 /*
1849 * Use the nosync variant to disable interrupts, so the mutex could be 1830 * Use the nosync variant to disable interrupts, so the mutex could be
@@ -1854,6 +1835,8 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1854 mutex_unlock(&wl->mutex); 1835 mutex_unlock(&wl->mutex);
1855 1836
1856 wlcore_synchronize_interrupts(wl); 1837 wlcore_synchronize_interrupts(wl);
1838 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1839 cancel_work_sync(&wl->recovery_work);
1857 wl1271_flush_deferred_work(wl); 1840 wl1271_flush_deferred_work(wl);
1858 cancel_delayed_work_sync(&wl->scan_complete_work); 1841 cancel_delayed_work_sync(&wl->scan_complete_work);
1859 cancel_work_sync(&wl->netstack_work); 1842 cancel_work_sync(&wl->netstack_work);
@@ -1956,6 +1939,27 @@ static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1956 *idx = WL12XX_MAX_RATE_POLICIES; 1939 *idx = WL12XX_MAX_RATE_POLICIES;
1957} 1940}
1958 1941
1942static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
1943{
1944 u8 policy = find_first_zero_bit(wl->klv_templates_map,
1945 WLCORE_MAX_KLV_TEMPLATES);
1946 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
1947 return -EBUSY;
1948
1949 __set_bit(policy, wl->klv_templates_map);
1950 *idx = policy;
1951 return 0;
1952}
1953
1954static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
1955{
1956 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
1957 return;
1958
1959 __clear_bit(*idx, wl->klv_templates_map);
1960 *idx = WLCORE_MAX_KLV_TEMPLATES;
1961}
1962
1959static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1963static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1960{ 1964{
1961 switch (wlvif->bss_type) { 1965 switch (wlvif->bss_type) {
@@ -2020,6 +2024,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2020 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2024 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2021 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2025 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2022 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2026 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2027 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2023 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 2028 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2024 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; 2029 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2025 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; 2030 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -2096,7 +2101,7 @@ irq_disable:
2096 /* Unlocking the mutex in the middle of handling is 2101 /* Unlocking the mutex in the middle of handling is
2097 inherently unsafe. In this case we deem it safe to do, 2102 inherently unsafe. In this case we deem it safe to do,
2098 because we need to let any possibly pending IRQ out of 2103 because we need to let any possibly pending IRQ out of
2099 the system (and while we are WL1271_STATE_OFF the IRQ 2104 the system (and while we are WLCORE_STATE_OFF the IRQ
2100 work function will not do anything.) Also, any other 2105 work function will not do anything.) Also, any other
2101 possible concurrent operations will fail due to the 2106 possible concurrent operations will fail due to the
2102 current state, hence the wl1271 struct should be safe. */ 2107 current state, hence the wl1271 struct should be safe. */
@@ -2131,7 +2136,7 @@ power_off:
2131 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", 2136 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2132 wl->enable_11a ? "" : "not "); 2137 wl->enable_11a ? "" : "not ");
2133 2138
2134 wl->state = WL1271_STATE_ON; 2139 wl->state = WLCORE_STATE_ON;
2135out: 2140out:
2136 return booted; 2141 return booted;
2137} 2142}
@@ -2165,7 +2170,11 @@ static bool wl12xx_need_fw_change(struct wl1271 *wl,
2165 wl->last_vif_count = vif_count; 2170 wl->last_vif_count = vif_count;
2166 2171
2167 /* no need for fw change if the device is OFF */ 2172 /* no need for fw change if the device is OFF */
2168 if (wl->state == WL1271_STATE_OFF) 2173 if (wl->state == WLCORE_STATE_OFF)
2174 return false;
2175
2176 /* no need for fw change if a single fw is used */
2177 if (!wl->mr_fw_name)
2169 return false; 2178 return false;
2170 2179
2171 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) 2180 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
@@ -2247,7 +2256,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2247 * TODO: after the nvs issue will be solved, move this block 2256 * TODO: after the nvs issue will be solved, move this block
2248 * to start(), and make sure here the driver is ON. 2257 * to start(), and make sure here the driver is ON.
2249 */ 2258 */
2250 if (wl->state == WL1271_STATE_OFF) { 2259 if (wl->state == WLCORE_STATE_OFF) {
2251 /* 2260 /*
2252 * we still need this in order to configure the fw 2261 * we still need this in order to configure the fw
2253 * while uploading the nvs 2262 * while uploading the nvs
@@ -2261,21 +2270,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2261 } 2270 }
2262 } 2271 }
2263 2272
2264 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2265 wlvif->bss_type == BSS_TYPE_IBSS) {
2266 /*
2267 * The device role is a special role used for
2268 * rx and tx frames prior to association (as
2269 * the STA role can get packets only from
2270 * its associated bssid)
2271 */
2272 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2273 WL1271_ROLE_DEVICE,
2274 &wlvif->dev_role_id);
2275 if (ret < 0)
2276 goto out;
2277 }
2278
2279 ret = wl12xx_cmd_role_enable(wl, vif->addr, 2273 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2280 role_type, &wlvif->role_id); 2274 role_type, &wlvif->role_id);
2281 if (ret < 0) 2275 if (ret < 0)
@@ -2314,7 +2308,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2314 return; 2308 return;
2315 2309
2316 /* because of hardware recovery, we may get here twice */ 2310 /* because of hardware recovery, we may get here twice */
2317 if (wl->state != WL1271_STATE_ON) 2311 if (wl->state == WLCORE_STATE_OFF)
2318 return; 2312 return;
2319 2313
2320 wl1271_info("down"); 2314 wl1271_info("down");
@@ -2344,10 +2338,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2344 wlvif->bss_type == BSS_TYPE_IBSS) { 2338 wlvif->bss_type == BSS_TYPE_IBSS) {
2345 if (wl12xx_dev_role_started(wlvif)) 2339 if (wl12xx_dev_role_started(wlvif))
2346 wl12xx_stop_dev(wl, wlvif); 2340 wl12xx_stop_dev(wl, wlvif);
2347
2348 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2349 if (ret < 0)
2350 goto deinit;
2351 } 2341 }
2352 2342
2353 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); 2343 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
@@ -2366,6 +2356,7 @@ deinit:
2366 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2356 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2367 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2357 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2368 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2358 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2359 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2369 } else { 2360 } else {
2370 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2361 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2371 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; 2362 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
@@ -2430,12 +2421,11 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2421 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2431 struct wl12xx_vif *iter; 2422 struct wl12xx_vif *iter;
2432 struct vif_counter_data vif_count; 2423 struct vif_counter_data vif_count;
2433 bool cancel_recovery = true;
2434 2424
2435 wl12xx_get_vif_count(hw, vif, &vif_count); 2425 wl12xx_get_vif_count(hw, vif, &vif_count);
2436 mutex_lock(&wl->mutex); 2426 mutex_lock(&wl->mutex);
2437 2427
2438 if (wl->state == WL1271_STATE_OFF || 2428 if (wl->state == WLCORE_STATE_OFF ||
2439 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 2429 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2440 goto out; 2430 goto out;
2441 2431
@@ -2455,12 +2445,9 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2455 wl12xx_force_active_psm(wl); 2445 wl12xx_force_active_psm(wl);
2456 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 2446 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2457 wl12xx_queue_recovery_work(wl); 2447 wl12xx_queue_recovery_work(wl);
2458 cancel_recovery = false;
2459 } 2448 }
2460out: 2449out:
2461 mutex_unlock(&wl->mutex); 2450 mutex_unlock(&wl->mutex);
2462 if (cancel_recovery)
2463 cancel_work_sync(&wl->recovery_work);
2464} 2451}
2465 2452
2466static int wl12xx_op_change_interface(struct ieee80211_hw *hw, 2453static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
@@ -2534,7 +2521,7 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2534 goto out; 2521 goto out;
2535 2522
2536 ret = wl1271_acx_keep_alive_config(wl, wlvif, 2523 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2537 CMD_TEMPL_KLV_IDX_NULL_DATA, 2524 wlvif->sta.klv_template_id,
2538 ACX_KEEP_ALIVE_TPL_VALID); 2525 ACX_KEEP_ALIVE_TPL_VALID);
2539 if (ret < 0) 2526 if (ret < 0)
2540 goto out; 2527 goto out;
@@ -2554,6 +2541,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2554 ieee80211_chswitch_done(vif, false); 2541 ieee80211_chswitch_done(vif, false);
2555 } 2542 }
2556 2543
2544 /* invalidate keep-alive template */
2545 wl1271_acx_keep_alive_config(wl, wlvif,
2546 wlvif->sta.klv_template_id,
2547 ACX_KEEP_ALIVE_TPL_INVALID);
2548
2557 /* to stop listening to a channel, we disconnect */ 2549 /* to stop listening to a channel, we disconnect */
2558 ret = wl12xx_cmd_role_stop_sta(wl, wlvif); 2550 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2559 if (ret < 0) 2551 if (ret < 0)
@@ -2594,11 +2586,6 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2594 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 2586 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2595 if (ret < 0) 2587 if (ret < 0)
2596 goto out; 2588 goto out;
2597 ret = wl1271_acx_keep_alive_config(
2598 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2599 ACX_KEEP_ALIVE_TPL_INVALID);
2600 if (ret < 0)
2601 goto out;
2602 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 2589 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2603 } else { 2590 } else {
2604 /* The current firmware only supports sched_scan in idle */ 2591 /* The current firmware only supports sched_scan in idle */
@@ -2770,7 +2757,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2770 if (changed & IEEE80211_CONF_CHANGE_POWER) 2757 if (changed & IEEE80211_CONF_CHANGE_POWER)
2771 wl->power_level = conf->power_level; 2758 wl->power_level = conf->power_level;
2772 2759
2773 if (unlikely(wl->state == WL1271_STATE_OFF)) 2760 if (unlikely(wl->state != WLCORE_STATE_ON))
2774 goto out; 2761 goto out;
2775 2762
2776 ret = wl1271_ps_elp_wakeup(wl); 2763 ret = wl1271_ps_elp_wakeup(wl);
@@ -2804,10 +2791,6 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2804{ 2791{
2805 struct wl1271_filter_params *fp; 2792 struct wl1271_filter_params *fp;
2806 struct netdev_hw_addr *ha; 2793 struct netdev_hw_addr *ha;
2807 struct wl1271 *wl = hw->priv;
2808
2809 if (unlikely(wl->state == WL1271_STATE_OFF))
2810 return 0;
2811 2794
2812 fp = kzalloc(sizeof(*fp), GFP_ATOMIC); 2795 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2813 if (!fp) { 2796 if (!fp) {
@@ -2856,7 +2839,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2856 *total &= WL1271_SUPPORTED_FILTERS; 2839 *total &= WL1271_SUPPORTED_FILTERS;
2857 changed &= WL1271_SUPPORTED_FILTERS; 2840 changed &= WL1271_SUPPORTED_FILTERS;
2858 2841
2859 if (unlikely(wl->state == WL1271_STATE_OFF)) 2842 if (unlikely(wl->state != WLCORE_STATE_ON))
2860 goto out; 2843 goto out;
2861 2844
2862 ret = wl1271_ps_elp_wakeup(wl); 2845 ret = wl1271_ps_elp_wakeup(wl);
@@ -3080,8 +3063,45 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3080 struct ieee80211_key_conf *key_conf) 3063 struct ieee80211_key_conf *key_conf)
3081{ 3064{
3082 struct wl1271 *wl = hw->priv; 3065 struct wl1271 *wl = hw->priv;
3066 int ret;
3067 bool might_change_spare =
3068 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3069 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3083 3070
3084 return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); 3071 if (might_change_spare) {
3072 /*
3073 * stop the queues and flush to ensure the next packets are
3074 * in sync with FW spare block accounting
3075 */
3076 mutex_lock(&wl->mutex);
3077 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3078 mutex_unlock(&wl->mutex);
3079
3080 wl1271_tx_flush(wl);
3081 }
3082
3083 mutex_lock(&wl->mutex);
3084
3085 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3086 ret = -EAGAIN;
3087 goto out_wake_queues;
3088 }
3089
3090 ret = wl1271_ps_elp_wakeup(wl);
3091 if (ret < 0)
3092 goto out_wake_queues;
3093
3094 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3095
3096 wl1271_ps_elp_sleep(wl);
3097
3098out_wake_queues:
3099 if (might_change_spare)
3100 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3101
3102 mutex_unlock(&wl->mutex);
3103
3104 return ret;
3085} 3105}
3086 3106
3087int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 3107int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
@@ -3103,17 +3123,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3103 key_conf->keylen, key_conf->flags); 3123 key_conf->keylen, key_conf->flags);
3104 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 3124 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3105 3125
3106 mutex_lock(&wl->mutex);
3107
3108 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3109 ret = -EAGAIN;
3110 goto out_unlock;
3111 }
3112
3113 ret = wl1271_ps_elp_wakeup(wl);
3114 if (ret < 0)
3115 goto out_unlock;
3116
3117 switch (key_conf->cipher) { 3126 switch (key_conf->cipher) {
3118 case WLAN_CIPHER_SUITE_WEP40: 3127 case WLAN_CIPHER_SUITE_WEP40:
3119 case WLAN_CIPHER_SUITE_WEP104: 3128 case WLAN_CIPHER_SUITE_WEP104:
@@ -3143,8 +3152,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3143 default: 3152 default:
3144 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 3153 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3145 3154
3146 ret = -EOPNOTSUPP; 3155 return -EOPNOTSUPP;
3147 goto out_sleep;
3148 } 3156 }
3149 3157
3150 switch (cmd) { 3158 switch (cmd) {
@@ -3155,7 +3163,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3155 tx_seq_32, tx_seq_16, sta); 3163 tx_seq_32, tx_seq_16, sta);
3156 if (ret < 0) { 3164 if (ret < 0) {
3157 wl1271_error("Could not add or replace key"); 3165 wl1271_error("Could not add or replace key");
3158 goto out_sleep; 3166 return ret;
3159 } 3167 }
3160 3168
3161 /* 3169 /*
@@ -3169,7 +3177,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3169 ret = wl1271_cmd_build_arp_rsp(wl, wlvif); 3177 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3170 if (ret < 0) { 3178 if (ret < 0) {
3171 wl1271_warning("build arp rsp failed: %d", ret); 3179 wl1271_warning("build arp rsp failed: %d", ret);
3172 goto out_sleep; 3180 return ret;
3173 } 3181 }
3174 } 3182 }
3175 break; 3183 break;
@@ -3181,22 +3189,15 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3181 0, 0, sta); 3189 0, 0, sta);
3182 if (ret < 0) { 3190 if (ret < 0) {
3183 wl1271_error("Could not remove key"); 3191 wl1271_error("Could not remove key");
3184 goto out_sleep; 3192 return ret;
3185 } 3193 }
3186 break; 3194 break;
3187 3195
3188 default: 3196 default:
3189 wl1271_error("Unsupported key cmd 0x%x", cmd); 3197 wl1271_error("Unsupported key cmd 0x%x", cmd);
3190 ret = -EOPNOTSUPP; 3198 return -EOPNOTSUPP;
3191 break;
3192 } 3199 }
3193 3200
3194out_sleep:
3195 wl1271_ps_elp_sleep(wl);
3196
3197out_unlock:
3198 mutex_unlock(&wl->mutex);
3199
3200 return ret; 3201 return ret;
3201} 3202}
3202EXPORT_SYMBOL_GPL(wlcore_set_key); 3203EXPORT_SYMBOL_GPL(wlcore_set_key);
@@ -3219,7 +3220,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3219 3220
3220 mutex_lock(&wl->mutex); 3221 mutex_lock(&wl->mutex);
3221 3222
3222 if (wl->state == WL1271_STATE_OFF) { 3223 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3223 /* 3224 /*
3224 * We cannot return -EBUSY here because cfg80211 will expect 3225 * We cannot return -EBUSY here because cfg80211 will expect
3225 * a call to ieee80211_scan_completed if we do - in this case 3226 * a call to ieee80211_scan_completed if we do - in this case
@@ -3259,7 +3260,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3259 3260
3260 mutex_lock(&wl->mutex); 3261 mutex_lock(&wl->mutex);
3261 3262
3262 if (wl->state == WL1271_STATE_OFF) 3263 if (unlikely(wl->state != WLCORE_STATE_ON))
3263 goto out; 3264 goto out;
3264 3265
3265 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 3266 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -3308,7 +3309,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3308 3309
3309 mutex_lock(&wl->mutex); 3310 mutex_lock(&wl->mutex);
3310 3311
3311 if (wl->state == WL1271_STATE_OFF) { 3312 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3312 ret = -EAGAIN; 3313 ret = -EAGAIN;
3313 goto out; 3314 goto out;
3314 } 3315 }
@@ -3345,7 +3346,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3345 3346
3346 mutex_lock(&wl->mutex); 3347 mutex_lock(&wl->mutex);
3347 3348
3348 if (wl->state == WL1271_STATE_OFF) 3349 if (unlikely(wl->state != WLCORE_STATE_ON))
3349 goto out; 3350 goto out;
3350 3351
3351 ret = wl1271_ps_elp_wakeup(wl); 3352 ret = wl1271_ps_elp_wakeup(wl);
@@ -3366,7 +3367,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3366 3367
3367 mutex_lock(&wl->mutex); 3368 mutex_lock(&wl->mutex);
3368 3369
3369 if (unlikely(wl->state == WL1271_STATE_OFF)) { 3370 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3370 ret = -EAGAIN; 3371 ret = -EAGAIN;
3371 goto out; 3372 goto out;
3372 } 3373 }
@@ -3395,7 +3396,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3395 3396
3396 mutex_lock(&wl->mutex); 3397 mutex_lock(&wl->mutex);
3397 3398
3398 if (unlikely(wl->state == WL1271_STATE_OFF)) { 3399 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3399 ret = -EAGAIN; 3400 ret = -EAGAIN;
3400 goto out; 3401 goto out;
3401 } 3402 }
@@ -4171,7 +4172,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4171 4172
4172 mutex_lock(&wl->mutex); 4173 mutex_lock(&wl->mutex);
4173 4174
4174 if (unlikely(wl->state == WL1271_STATE_OFF)) 4175 if (unlikely(wl->state != WLCORE_STATE_ON))
4175 goto out; 4176 goto out;
4176 4177
4177 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) 4178 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
@@ -4255,7 +4256,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4255 4256
4256 mutex_lock(&wl->mutex); 4257 mutex_lock(&wl->mutex);
4257 4258
4258 if (unlikely(wl->state == WL1271_STATE_OFF)) 4259 if (unlikely(wl->state != WLCORE_STATE_ON))
4259 goto out; 4260 goto out;
4260 4261
4261 ret = wl1271_ps_elp_wakeup(wl); 4262 ret = wl1271_ps_elp_wakeup(wl);
@@ -4454,7 +4455,7 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4454 4455
4455 mutex_lock(&wl->mutex); 4456 mutex_lock(&wl->mutex);
4456 4457
4457 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4458 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4458 ret = -EBUSY; 4459 ret = -EBUSY;
4459 goto out; 4460 goto out;
4460 } 4461 }
@@ -4493,7 +4494,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4493 4494
4494 mutex_lock(&wl->mutex); 4495 mutex_lock(&wl->mutex);
4495 4496
4496 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4497 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4497 ret = -EAGAIN; 4498 ret = -EAGAIN;
4498 goto out; 4499 goto out;
4499 } 4500 }
@@ -4611,7 +4612,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4611 mask->control[i].legacy, 4612 mask->control[i].legacy,
4612 i); 4613 i);
4613 4614
4614 if (unlikely(wl->state == WL1271_STATE_OFF)) 4615 if (unlikely(wl->state != WLCORE_STATE_ON))
4615 goto out; 4616 goto out;
4616 4617
4617 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 4618 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
@@ -4647,12 +4648,14 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4647 4648
4648 mutex_lock(&wl->mutex); 4649 mutex_lock(&wl->mutex);
4649 4650
4650 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4651 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4651 wl12xx_for_each_wlvif_sta(wl, wlvif) { 4652 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4652 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 4653 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4653 ieee80211_chswitch_done(vif, false); 4654 ieee80211_chswitch_done(vif, false);
4654 } 4655 }
4655 goto out; 4656 goto out;
4657 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4658 goto out;
4656 } 4659 }
4657 4660
4658 ret = wl1271_ps_elp_wakeup(wl); 4661 ret = wl1271_ps_elp_wakeup(wl);
@@ -4687,7 +4690,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4687 4690
4688 mutex_lock(&wl->mutex); 4691 mutex_lock(&wl->mutex);
4689 4692
4690 if (unlikely(wl->state == WL1271_STATE_OFF)) 4693 if (unlikely(wl->state != WLCORE_STATE_ON))
4691 goto out; 4694 goto out;
4692 4695
4693 /* packets are considered pending if in the TX queue or the FW */ 4696 /* packets are considered pending if in the TX queue or the FW */
@@ -4936,7 +4939,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4936 4939
4937 wl->sg_enabled = res; 4940 wl->sg_enabled = res;
4938 4941
4939 if (wl->state == WL1271_STATE_OFF) 4942 if (unlikely(wl->state != WLCORE_STATE_ON))
4940 goto out; 4943 goto out;
4941 4944
4942 ret = wl1271_ps_elp_wakeup(wl); 4945 ret = wl1271_ps_elp_wakeup(wl);
@@ -5054,7 +5057,7 @@ static void wl1271_connection_loss_work(struct work_struct *work)
5054 5057
5055 mutex_lock(&wl->mutex); 5058 mutex_lock(&wl->mutex);
5056 5059
5057 if (unlikely(wl->state == WL1271_STATE_OFF)) 5060 if (unlikely(wl->state != WLCORE_STATE_ON))
5058 goto out; 5061 goto out;
5059 5062
5060 /* Call mac80211 connection loss */ 5063 /* Call mac80211 connection loss */
@@ -5068,18 +5071,17 @@ out:
5068 mutex_unlock(&wl->mutex); 5071 mutex_unlock(&wl->mutex);
5069} 5072}
5070 5073
5071static void wl12xx_derive_mac_addresses(struct wl1271 *wl, 5074static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5072 u32 oui, u32 nic, int n)
5073{ 5075{
5074 int i; 5076 int i;
5075 5077
5076 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d", 5078 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5077 oui, nic, n); 5079 oui, nic);
5078 5080
5079 if (nic + n - 1 > 0xffffff) 5081 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5080 wl1271_warning("NIC part of the MAC address wraps around!"); 5082 wl1271_warning("NIC part of the MAC address wraps around!");
5081 5083
5082 for (i = 0; i < n; i++) { 5084 for (i = 0; i < wl->num_mac_addr; i++) {
5083 wl->addresses[i].addr[0] = (u8)(oui >> 16); 5085 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5084 wl->addresses[i].addr[1] = (u8)(oui >> 8); 5086 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5085 wl->addresses[i].addr[2] = (u8) oui; 5087 wl->addresses[i].addr[2] = (u8) oui;
@@ -5089,7 +5091,22 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5089 nic++; 5091 nic++;
5090 } 5092 }
5091 5093
5092 wl->hw->wiphy->n_addresses = n; 5094 /* we may be one address short at the most */
5095 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5096
5097 /*
5098 * turn on the LAA bit in the first address and use it as
5099 * the last address.
5100 */
5101 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5102 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5103 memcpy(&wl->addresses[idx], &wl->addresses[0],
5104 sizeof(wl->addresses[0]));
5105 /* LAA bit */
5106 wl->addresses[idx].addr[2] |= BIT(1);
5107 }
5108
5109 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5093 wl->hw->wiphy->addresses = wl->addresses; 5110 wl->hw->wiphy->addresses = wl->addresses;
5094} 5111}
5095 5112
@@ -5128,8 +5145,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
5128 if (wl->mac80211_registered) 5145 if (wl->mac80211_registered)
5129 return 0; 5146 return 0;
5130 5147
5131 wl1271_fetch_nvs(wl); 5148 if (wl->nvs_len >= 12) {
5132 if (wl->nvs != NULL) {
5133 /* NOTE: The wl->nvs->nvs element must be first, in 5149 /* NOTE: The wl->nvs->nvs element must be first, in
5134 * order to simplify the casting, we assume it is at 5150 * order to simplify the casting, we assume it is at
5135 * the beginning of the wl->nvs structure. 5151 * the beginning of the wl->nvs structure.
@@ -5149,7 +5165,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
5149 nic_addr = wl->fuse_nic_addr + 1; 5165 nic_addr = wl->fuse_nic_addr + 1;
5150 } 5166 }
5151 5167
5152 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2); 5168 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5153 5169
5154 ret = ieee80211_register_hw(wl->hw); 5170 ret = ieee80211_register_hw(wl->hw);
5155 if (ret < 0) { 5171 if (ret < 0) {
@@ -5179,7 +5195,7 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
5179 5195
5180static const struct ieee80211_iface_limit wlcore_iface_limits[] = { 5196static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5181 { 5197 {
5182 .max = 2, 5198 .max = 3,
5183 .types = BIT(NL80211_IFTYPE_STATION), 5199 .types = BIT(NL80211_IFTYPE_STATION),
5184 }, 5200 },
5185 { 5201 {
@@ -5194,7 +5210,7 @@ static const struct ieee80211_iface_combination
5194wlcore_iface_combinations[] = { 5210wlcore_iface_combinations[] = {
5195 { 5211 {
5196 .num_different_channels = 1, 5212 .num_different_channels = 1,
5197 .max_interfaces = 2, 5213 .max_interfaces = 3,
5198 .limits = wlcore_iface_limits, 5214 .limits = wlcore_iface_limits,
5199 .n_limits = ARRAY_SIZE(wlcore_iface_limits), 5215 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5200 }, 5216 },
@@ -5310,7 +5326,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5310 5326
5311#define WL1271_DEFAULT_CHANNEL 0 5327#define WL1271_DEFAULT_CHANNEL 0
5312 5328
5313struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size) 5329struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
5314{ 5330{
5315 struct ieee80211_hw *hw; 5331 struct ieee80211_hw *hw;
5316 struct wl1271 *wl; 5332 struct wl1271 *wl;
@@ -5390,17 +5406,19 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5390 5406
5391 spin_lock_init(&wl->wl_lock); 5407 spin_lock_init(&wl->wl_lock);
5392 5408
5393 wl->state = WL1271_STATE_OFF; 5409 wl->state = WLCORE_STATE_OFF;
5394 wl->fw_type = WL12XX_FW_TYPE_NONE; 5410 wl->fw_type = WL12XX_FW_TYPE_NONE;
5395 mutex_init(&wl->mutex); 5411 mutex_init(&wl->mutex);
5396 mutex_init(&wl->flush_mutex); 5412 mutex_init(&wl->flush_mutex);
5413 init_completion(&wl->nvs_loading_complete);
5397 5414
5398 order = get_order(WL1271_AGGR_BUFFER_SIZE); 5415 order = get_order(aggr_buf_size);
5399 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 5416 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5400 if (!wl->aggr_buf) { 5417 if (!wl->aggr_buf) {
5401 ret = -ENOMEM; 5418 ret = -ENOMEM;
5402 goto err_wq; 5419 goto err_wq;
5403 } 5420 }
5421 wl->aggr_buf_size = aggr_buf_size;
5404 5422
5405 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); 5423 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5406 if (!wl->dummy_packet) { 5424 if (!wl->dummy_packet) {
@@ -5463,8 +5481,7 @@ int wlcore_free_hw(struct wl1271 *wl)
5463 device_remove_file(wl->dev, &dev_attr_bt_coex_state); 5481 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5464 free_page((unsigned long)wl->fwlog); 5482 free_page((unsigned long)wl->fwlog);
5465 dev_kfree_skb(wl->dummy_packet); 5483 dev_kfree_skb(wl->dummy_packet);
5466 free_pages((unsigned long)wl->aggr_buf, 5484 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5467 get_order(WL1271_AGGR_BUFFER_SIZE));
5468 5485
5469 wl1271_debugfs_exit(wl); 5486 wl1271_debugfs_exit(wl);
5470 5487
@@ -5514,17 +5531,32 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5514 return IRQ_WAKE_THREAD; 5531 return IRQ_WAKE_THREAD;
5515} 5532}
5516 5533
5517int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) 5534static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5518{ 5535{
5536 struct wl1271 *wl = context;
5537 struct platform_device *pdev = wl->pdev;
5519 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 5538 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5520 unsigned long irqflags; 5539 unsigned long irqflags;
5521 int ret; 5540 int ret;
5522 5541
5523 if (!wl->ops || !wl->ptable) { 5542 if (fw) {
5524 ret = -EINVAL; 5543 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5525 goto out_free_hw; 5544 if (!wl->nvs) {
5545 wl1271_error("Could not allocate nvs data");
5546 goto out;
5547 }
5548 wl->nvs_len = fw->size;
5549 } else {
5550 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5551 WL12XX_NVS_NAME);
5552 wl->nvs = NULL;
5553 wl->nvs_len = 0;
5526 } 5554 }
5527 5555
5556 ret = wl->ops->setup(wl);
5557 if (ret < 0)
5558 goto out_free_nvs;
5559
5528 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS); 5560 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5529 5561
5530 /* adjust some runtime configuration parameters */ 5562 /* adjust some runtime configuration parameters */
@@ -5533,11 +5565,8 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5533 wl->irq = platform_get_irq(pdev, 0); 5565 wl->irq = platform_get_irq(pdev, 0);
5534 wl->platform_quirks = pdata->platform_quirks; 5566 wl->platform_quirks = pdata->platform_quirks;
5535 wl->set_power = pdata->set_power; 5567 wl->set_power = pdata->set_power;
5536 wl->dev = &pdev->dev;
5537 wl->if_ops = pdata->ops; 5568 wl->if_ops = pdata->ops;
5538 5569
5539 platform_set_drvdata(pdev, wl);
5540
5541 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 5570 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5542 irqflags = IRQF_TRIGGER_RISING; 5571 irqflags = IRQF_TRIGGER_RISING;
5543 else 5572 else
@@ -5548,7 +5577,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5548 pdev->name, wl); 5577 pdev->name, wl);
5549 if (ret < 0) { 5578 if (ret < 0) {
5550 wl1271_error("request_irq() failed: %d", ret); 5579 wl1271_error("request_irq() failed: %d", ret);
5551 goto out_free_hw; 5580 goto out_free_nvs;
5552 } 5581 }
5553 5582
5554#ifdef CONFIG_PM 5583#ifdef CONFIG_PM
@@ -5607,6 +5636,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5607 goto out_hw_pg_ver; 5636 goto out_hw_pg_ver;
5608 } 5637 }
5609 5638
5639 wl->initialized = true;
5610 goto out; 5640 goto out;
5611 5641
5612out_hw_pg_ver: 5642out_hw_pg_ver:
@@ -5621,10 +5651,33 @@ out_unreg:
5621out_irq: 5651out_irq:
5622 free_irq(wl->irq, wl); 5652 free_irq(wl->irq, wl);
5623 5653
5624out_free_hw: 5654out_free_nvs:
5625 wlcore_free_hw(wl); 5655 kfree(wl->nvs);
5626 5656
5627out: 5657out:
5658 release_firmware(fw);
5659 complete_all(&wl->nvs_loading_complete);
5660}
5661
5662int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5663{
5664 int ret;
5665
5666 if (!wl->ops || !wl->ptable)
5667 return -EINVAL;
5668
5669 wl->dev = &pdev->dev;
5670 wl->pdev = pdev;
5671 platform_set_drvdata(pdev, wl);
5672
5673 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
5674 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
5675 wl, wlcore_nvs_cb);
5676 if (ret < 0) {
5677 wl1271_error("request_firmware_nowait failed: %d", ret);
5678 complete_all(&wl->nvs_loading_complete);
5679 }
5680
5628 return ret; 5681 return ret;
5629} 5682}
5630EXPORT_SYMBOL_GPL(wlcore_probe); 5683EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -5633,6 +5686,10 @@ int __devexit wlcore_remove(struct platform_device *pdev)
5633{ 5686{
5634 struct wl1271 *wl = platform_get_drvdata(pdev); 5687 struct wl1271 *wl = platform_get_drvdata(pdev);
5635 5688
5689 wait_for_completion(&wl->nvs_loading_complete);
5690 if (!wl->initialized)
5691 return 0;
5692
5636 if (wl->irq_wake_enabled) { 5693 if (wl->irq_wake_enabled) {
5637 device_init_wakeup(wl->dev, 0); 5694 device_init_wakeup(wl->dev, 0);
5638 disable_irq_wake(wl->irq); 5695 disable_irq_wake(wl->irq);
@@ -5663,3 +5720,4 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5663MODULE_LICENSE("GPL"); 5720MODULE_LICENSE("GPL");
5664MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 5721MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5665MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 5722MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
5723MODULE_FIRMWARE(WL12XX_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 46d36fd30eba..4d1414a673fb 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,7 +28,7 @@
28 28
29#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
30 30
31#define ELP_ENTRY_DELAY 5 31#define ELP_ENTRY_DELAY 30
32 32
33void wl1271_elp_work(struct work_struct *work) 33void wl1271_elp_work(struct work_struct *work)
34{ 34{
@@ -44,7 +44,7 @@ void wl1271_elp_work(struct work_struct *work)
44 44
45 mutex_lock(&wl->mutex); 45 mutex_lock(&wl->mutex);
46 46
47 if (unlikely(wl->state == WL1271_STATE_OFF)) 47 if (unlikely(wl->state != WLCORE_STATE_ON))
48 goto out; 48 goto out;
49 49
50 /* our work might have been already cancelled */ 50 /* our work might have been already cancelled */
@@ -98,11 +98,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
98 return; 98 return;
99 } 99 }
100 100
101 if (wl->conf.conn.forced_ps) 101 timeout = ELP_ENTRY_DELAY;
102 timeout = ELP_ENTRY_DELAY;
103 else
104 timeout = wl->conf.conn.dynamic_ps_timeout;
105
106 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 102 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
107 msecs_to_jiffies(timeout)); 103 msecs_to_jiffies(timeout));
108} 104}
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index f55e2f9e7ac5..9ee0ec6fd1db 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -221,7 +221,7 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
221 pkt_len = wlcore_rx_get_buf_size(wl, des); 221 pkt_len = wlcore_rx_get_buf_size(wl, des);
222 align_pkt_len = wlcore_rx_get_align_buf_size(wl, 222 align_pkt_len = wlcore_rx_get_align_buf_size(wl,
223 pkt_len); 223 pkt_len);
224 if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE) 224 if (buf_size + align_pkt_len > wl->aggr_buf_size)
225 break; 225 break;
226 buf_size += align_pkt_len; 226 buf_size += align_pkt_len;
227 rx_counter++; 227 rx_counter++;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index dbeca1bfbb2c..d00501493dfe 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -46,7 +46,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
46 46
47 mutex_lock(&wl->mutex); 47 mutex_lock(&wl->mutex);
48 48
49 if (wl->state == WL1271_STATE_OFF) 49 if (unlikely(wl->state != WLCORE_STATE_ON))
50 goto out; 50 goto out;
51 51
52 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 52 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -184,11 +184,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
184 if (passive) 184 if (passive)
185 scan_options |= WL1271_SCAN_OPT_PASSIVE; 185 scan_options |= WL1271_SCAN_OPT_PASSIVE;
186 186
187 if (wlvif->bss_type == BSS_TYPE_AP_BSS || 187 cmd->params.role_id = wlvif->role_id;
188 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
189 cmd->params.role_id = wlvif->role_id;
190 else
191 cmd->params.role_id = wlvif->dev_role_id;
192 188
193 if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { 189 if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
194 ret = -EINVAL; 190 ret = -EINVAL;
@@ -593,7 +589,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
593 goto out; 589 goto out;
594 } 590 }
595 591
596 cmd->role_id = wlvif->dev_role_id; 592 cmd->role_id = wlvif->role_id;
597 if (!n_match_ssids) { 593 if (!n_match_ssids) {
598 /* No filter, with ssids */ 594 /* No filter, with ssids */
599 type = SCAN_SSID_FILTER_DISABLED; 595 type = SCAN_SSID_FILTER_DISABLED;
@@ -683,7 +679,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
683 if (!cfg) 679 if (!cfg)
684 return -ENOMEM; 680 return -ENOMEM;
685 681
686 cfg->role_id = wlvif->dev_role_id; 682 cfg->role_id = wlvif->role_id;
687 cfg->rssi_threshold = c->rssi_threshold; 683 cfg->rssi_threshold = c->rssi_threshold;
688 cfg->snr_threshold = c->snr_threshold; 684 cfg->snr_threshold = c->snr_threshold;
689 cfg->n_probe_reqs = c->num_probe_reqs; 685 cfg->n_probe_reqs = c->num_probe_reqs;
@@ -718,7 +714,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
718 if (!force_passive && cfg->active[0]) { 714 if (!force_passive && cfg->active[0]) {
719 u8 band = IEEE80211_BAND_2GHZ; 715 u8 band = IEEE80211_BAND_2GHZ;
720 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 716 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
721 wlvif->dev_role_id, band, 717 wlvif->role_id, band,
722 req->ssids[0].ssid, 718 req->ssids[0].ssid,
723 req->ssids[0].ssid_len, 719 req->ssids[0].ssid_len,
724 ies->ie[band], 720 ies->ie[band],
@@ -732,7 +728,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
732 if (!force_passive && cfg->active[1]) { 728 if (!force_passive && cfg->active[1]) {
733 u8 band = IEEE80211_BAND_5GHZ; 729 u8 band = IEEE80211_BAND_5GHZ;
734 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 730 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
735 wlvif->dev_role_id, band, 731 wlvif->role_id, band,
736 req->ssids[0].ssid, 732 req->ssids[0].ssid,
737 req->ssids[0].ssid_len, 733 req->ssids[0].ssid_len,
738 ies->ie[band], 734 ies->ie[band],
@@ -774,7 +770,7 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
774 if (!start) 770 if (!start)
775 return -ENOMEM; 771 return -ENOMEM;
776 772
777 start->role_id = wlvif->dev_role_id; 773 start->role_id = wlvif->role_id;
778 start->tag = WL1271_SCAN_DEFAULT_TAG; 774 start->tag = WL1271_SCAN_DEFAULT_TAG;
779 775
780 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, 776 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -810,7 +806,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
810 return; 806 return;
811 } 807 }
812 808
813 stop->role_id = wlvif->dev_role_id; 809 stop->role_id = wlvif->role_id;
814 stop->tag = WL1271_SCAN_DEFAULT_TAG; 810 stop->tag = WL1271_SCAN_DEFAULT_TAG;
815 811
816 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, 812 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 8da4ed243ebc..a519bc3adec1 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -66,7 +66,13 @@
66/* HW limitation: maximum possible chunk size is 4095 bytes */ 66/* HW limitation: maximum possible chunk size is 4095 bytes */
67#define WSPI_MAX_CHUNK_SIZE 4092 67#define WSPI_MAX_CHUNK_SIZE 4092
68 68
69#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) 69/*
70 * only support SPI for 12xx - this code should be reworked when 18xx
71 * support is introduced
72 */
73#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
74
75#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
70 76
71struct wl12xx_spi_glue { 77struct wl12xx_spi_glue {
72 struct device *dev; 78 struct device *dev;
@@ -271,7 +277,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
271 u32 chunk_len; 277 u32 chunk_len;
272 int i; 278 int i;
273 279
274 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE); 280 WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
275 281
276 spi_message_init(&m); 282 spi_message_init(&m);
277 memset(t, 0, sizeof(t)); 283 memset(t, 0, sizeof(t));
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 49e5ee1525c9..f3442762d884 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -92,7 +92,7 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
92 92
93 mutex_lock(&wl->mutex); 93 mutex_lock(&wl->mutex);
94 94
95 if (wl->state == WL1271_STATE_OFF) { 95 if (unlikely(wl->state != WLCORE_STATE_ON)) {
96 ret = -EINVAL; 96 ret = -EINVAL;
97 goto out; 97 goto out;
98 } 98 }
@@ -164,7 +164,7 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
164 164
165 mutex_lock(&wl->mutex); 165 mutex_lock(&wl->mutex);
166 166
167 if (wl->state == WL1271_STATE_OFF) { 167 if (unlikely(wl->state != WLCORE_STATE_ON)) {
168 ret = -EINVAL; 168 ret = -EINVAL;
169 goto out; 169 goto out;
170 } 170 }
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0081f746482..a90d3cd09408 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
130} 130}
131EXPORT_SYMBOL(wl12xx_is_dummy_packet); 131EXPORT_SYMBOL(wl12xx_is_dummy_packet);
132 132
133u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 133static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134 struct sk_buff *skb) 134 struct sk_buff *skb, struct ieee80211_sta *sta)
135{ 135{
136 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); 136 if (sta) {
137
138 if (control->control.sta) {
139 struct wl1271_station *wl_sta; 137 struct wl1271_station *wl_sta;
140 138
141 wl_sta = (struct wl1271_station *) 139 wl_sta = (struct wl1271_station *)sta->drv_priv;
142 control->control.sta->drv_priv;
143 return wl_sta->hlid; 140 return wl_sta->hlid;
144 } else { 141 } else {
145 struct ieee80211_hdr *hdr; 142 struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156} 153}
157 154
158u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 155u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
159 struct sk_buff *skb) 156 struct sk_buff *skb, struct ieee80211_sta *sta)
160{ 157{
161 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
162 159
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
164 return wl->system_hlid; 161 return wl->system_hlid;
165 162
166 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 163 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
167 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); 164 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
168 165
169 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 166 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
170 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && 167 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -196,7 +193,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
196 int id, ret = -EBUSY, ac; 193 int id, ret = -EBUSY, ac;
197 u32 spare_blocks; 194 u32 spare_blocks;
198 195
199 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 196 if (buf_offset + total_len > wl->aggr_buf_size)
200 return -EAGAIN; 197 return -EAGAIN;
201 198
202 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); 199 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
@@ -322,8 +319,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
322 if (hlid == wlvif->ap.global_hlid) 319 if (hlid == wlvif->ap.global_hlid)
323 rate_idx = wlvif->ap.mgmt_rate_idx; 320 rate_idx = wlvif->ap.mgmt_rate_idx;
324 else if (hlid == wlvif->ap.bcast_hlid || 321 else if (hlid == wlvif->ap.bcast_hlid ||
325 skb->protocol == cpu_to_be16(ETH_P_PAE)) 322 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
326 /* send AP bcast and EAPOLs using the min basic rate */ 323 !ieee80211_is_data(frame_control))
324 /*
325 * send non-data, bcast and EAPOLs using the
326 * min basic rate
327 */
327 rate_idx = wlvif->ap.bcast_rate_idx; 328 rate_idx = wlvif->ap.bcast_rate_idx;
328 else 329 else
329 rate_idx = wlvif->ap.ucast_rate_idx[ac]; 330 rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -344,13 +345,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
344 345
345/* caller must hold wl->mutex */ 346/* caller must hold wl->mutex */
346static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 347static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
347 struct sk_buff *skb, u32 buf_offset) 348 struct sk_buff *skb, u32 buf_offset, u8 hlid)
348{ 349{
349 struct ieee80211_tx_info *info; 350 struct ieee80211_tx_info *info;
350 u32 extra = 0; 351 u32 extra = 0;
351 int ret = 0; 352 int ret = 0;
352 u32 total_len; 353 u32 total_len;
353 u8 hlid;
354 bool is_dummy; 354 bool is_dummy;
355 bool is_gem = false; 355 bool is_gem = false;
356 356
@@ -359,9 +359,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
359 return -EINVAL; 359 return -EINVAL;
360 } 360 }
361 361
362 if (hlid == WL12XX_INVALID_LINK_ID) {
363 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
364 return -EINVAL;
365 }
366
362 info = IEEE80211_SKB_CB(skb); 367 info = IEEE80211_SKB_CB(skb);
363 368
364 /* TODO: handle dummy packets on multi-vifs */
365 is_dummy = wl12xx_is_dummy_packet(wl, skb); 369 is_dummy = wl12xx_is_dummy_packet(wl, skb);
366 370
367 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 371 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +390,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
386 390
387 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); 391 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
388 } 392 }
389 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
390 if (hlid == WL12XX_INVALID_LINK_ID) {
391 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
392 return -EINVAL;
393 }
394 393
395 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, 394 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
396 is_gem); 395 is_gem);
@@ -517,7 +516,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
517} 516}
518 517
519static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, 518static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
520 struct wl12xx_vif *wlvif) 519 struct wl12xx_vif *wlvif,
520 u8 *hlid)
521{ 521{
522 struct sk_buff *skb = NULL; 522 struct sk_buff *skb = NULL;
523 int i, h, start_hlid; 523 int i, h, start_hlid;
@@ -544,10 +544,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
544 if (!skb) 544 if (!skb)
545 wlvif->last_tx_hlid = 0; 545 wlvif->last_tx_hlid = 0;
546 546
547 *hlid = wlvif->last_tx_hlid;
547 return skb; 548 return skb;
548} 549}
549 550
550static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 551static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
551{ 552{
552 unsigned long flags; 553 unsigned long flags;
553 struct wl12xx_vif *wlvif = wl->last_wlvif; 554 struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +557,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
556 /* continue from last wlvif (round robin) */ 557 /* continue from last wlvif (round robin) */
557 if (wlvif) { 558 if (wlvif) {
558 wl12xx_for_each_wlvif_continue(wl, wlvif) { 559 wl12xx_for_each_wlvif_continue(wl, wlvif) {
559 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 560 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
560 if (skb) { 561 if (skb) {
561 wl->last_wlvif = wlvif; 562 wl->last_wlvif = wlvif;
562 break; 563 break;
@@ -565,13 +566,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
565 } 566 }
566 567
567 /* dequeue from the system HLID before the restarting wlvif list */ 568 /* dequeue from the system HLID before the restarting wlvif list */
568 if (!skb) 569 if (!skb) {
569 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); 570 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
571 *hlid = wl->system_hlid;
572 }
570 573
571 /* do a new pass over the wlvif list */ 574 /* do a new pass over the wlvif list */
572 if (!skb) { 575 if (!skb) {
573 wl12xx_for_each_wlvif(wl, wlvif) { 576 wl12xx_for_each_wlvif(wl, wlvif) {
574 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 577 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
575 if (skb) { 578 if (skb) {
576 wl->last_wlvif = wlvif; 579 wl->last_wlvif = wlvif;
577 break; 580 break;
@@ -591,6 +594,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
591 int q; 594 int q;
592 595
593 skb = wl->dummy_packet; 596 skb = wl->dummy_packet;
597 *hlid = wl->system_hlid;
594 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 598 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
595 spin_lock_irqsave(&wl->wl_lock, flags); 599 spin_lock_irqsave(&wl->wl_lock, flags);
596 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 600 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +606,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
602} 606}
603 607
604static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 608static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
605 struct sk_buff *skb) 609 struct sk_buff *skb, u8 hlid)
606{ 610{
607 unsigned long flags; 611 unsigned long flags;
608 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 612 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +614,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
610 if (wl12xx_is_dummy_packet(wl, skb)) { 614 if (wl12xx_is_dummy_packet(wl, skb)) {
611 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 615 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
612 } else { 616 } else {
613 u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
614 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 617 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
615 618
616 /* make sure we dequeue the same packet next time */ 619 /* make sure we dequeue the same packet next time */
@@ -686,26 +689,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
686 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 689 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
687 int ret = 0; 690 int ret = 0;
688 int bus_ret = 0; 691 int bus_ret = 0;
692 u8 hlid;
689 693
690 if (unlikely(wl->state == WL1271_STATE_OFF)) 694 if (unlikely(wl->state != WLCORE_STATE_ON))
691 return 0; 695 return 0;
692 696
693 while ((skb = wl1271_skb_dequeue(wl))) { 697 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
694 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 698 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
695 bool has_data = false; 699 bool has_data = false;
696 700
697 wlvif = NULL; 701 wlvif = NULL;
698 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) 702 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
699 wlvif = wl12xx_vif_to_data(info->control.vif); 703 wlvif = wl12xx_vif_to_data(info->control.vif);
704 else
705 hlid = wl->system_hlid;
700 706
701 has_data = wlvif && wl1271_tx_is_data_present(skb); 707 has_data = wlvif && wl1271_tx_is_data_present(skb);
702 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); 708 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
709 hlid);
703 if (ret == -EAGAIN) { 710 if (ret == -EAGAIN) {
704 /* 711 /*
705 * Aggregation buffer is full. 712 * Aggregation buffer is full.
706 * Flush buffer and try again. 713 * Flush buffer and try again.
707 */ 714 */
708 wl1271_skb_queue_head(wl, wlvif, skb); 715 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
709 716
710 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, 717 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
711 last_len); 718 last_len);
@@ -722,7 +729,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
722 * Firmware buffer is full. 729 * Firmware buffer is full.
723 * Queue back last skb, and stop aggregating. 730 * Queue back last skb, and stop aggregating.
724 */ 731 */
725 wl1271_skb_queue_head(wl, wlvif, skb); 732 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
726 /* No work left, avoid scheduling redundant tx work */ 733 /* No work left, avoid scheduling redundant tx work */
727 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 734 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
728 goto out_ack; 735 goto out_ack;
@@ -732,7 +739,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
732 * fw still expects dummy packet, 739 * fw still expects dummy packet,
733 * so re-enqueue it 740 * so re-enqueue it
734 */ 741 */
735 wl1271_skb_queue_head(wl, wlvif, skb); 742 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
736 else 743 else
737 ieee80211_free_txskb(wl->hw, skb); 744 ieee80211_free_txskb(wl->hw, skb);
738 goto out_ack; 745 goto out_ack;
@@ -1069,39 +1076,54 @@ void wl12xx_tx_reset(struct wl1271 *wl)
1069/* caller must *NOT* hold wl->mutex */ 1076/* caller must *NOT* hold wl->mutex */
1070void wl1271_tx_flush(struct wl1271 *wl) 1077void wl1271_tx_flush(struct wl1271 *wl)
1071{ 1078{
1072 unsigned long timeout; 1079 unsigned long timeout, start_time;
1073 int i; 1080 int i;
1074 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1081 start_time = jiffies;
1082 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1075 1083
1076 /* only one flush should be in progress, for consistent queue state */ 1084 /* only one flush should be in progress, for consistent queue state */
1077 mutex_lock(&wl->flush_mutex); 1085 mutex_lock(&wl->flush_mutex);
1078 1086
1087 mutex_lock(&wl->mutex);
1088 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1089 mutex_unlock(&wl->mutex);
1090 goto out;
1091 }
1092
1079 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1093 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1080 1094
1081 while (!time_after(jiffies, timeout)) { 1095 while (!time_after(jiffies, timeout)) {
1082 mutex_lock(&wl->mutex); 1096 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1083 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
1084 wl->tx_frames_cnt, 1097 wl->tx_frames_cnt,
1085 wl1271_tx_total_queue_count(wl)); 1098 wl1271_tx_total_queue_count(wl));
1099
1100 /* force Tx and give the driver some time to flush data */
1101 mutex_unlock(&wl->mutex);
1102 if (wl1271_tx_total_queue_count(wl))
1103 wl1271_tx_work(&wl->tx_work);
1104 msleep(20);
1105 mutex_lock(&wl->mutex);
1106
1086 if ((wl->tx_frames_cnt == 0) && 1107 if ((wl->tx_frames_cnt == 0) &&
1087 (wl1271_tx_total_queue_count(wl) == 0)) { 1108 (wl1271_tx_total_queue_count(wl) == 0)) {
1088 mutex_unlock(&wl->mutex); 1109 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1089 goto out; 1110 jiffies_to_msecs(jiffies - start_time));
1111 goto out_wake;
1090 } 1112 }
1091 mutex_unlock(&wl->mutex);
1092 msleep(1);
1093 } 1113 }
1094 1114
1095 wl1271_warning("Unable to flush all TX buffers, timed out."); 1115 wl1271_warning("Unable to flush all TX buffers, "
1116 "timed out (timeout %d ms",
1117 WL1271_TX_FLUSH_TIMEOUT / 1000);
1096 1118
1097 /* forcibly flush all Tx buffers on our queues */ 1119 /* forcibly flush all Tx buffers on our queues */
1098 mutex_lock(&wl->mutex);
1099 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1120 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1100 wl1271_tx_reset_link_queues(wl, i); 1121 wl1271_tx_reset_link_queues(wl, i);
1101 mutex_unlock(&wl->mutex);
1102 1122
1103out: 1123out_wake:
1104 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1124 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1125 mutex_unlock(&wl->mutex);
1126out:
1105 mutex_unlock(&wl->flush_mutex); 1127 mutex_unlock(&wl->flush_mutex);
1106} 1128}
1107EXPORT_SYMBOL_GPL(wl1271_tx_flush); 1129EXPORT_SYMBOL_GPL(wl1271_tx_flush);
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 1e939b016155..349520d8b724 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
244 enum ieee80211_band rate_band); 244 enum ieee80211_band rate_band);
245u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 245u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
246u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
247 struct sk_buff *skb);
248u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 246u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
249 struct sk_buff *skb); 247 struct sk_buff *skb, struct ieee80211_sta *sta);
250void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); 248void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
251void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 249void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
252bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); 250bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0ce7a8ebbd46..68584aa0f2b0 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -31,12 +31,19 @@
31/* The maximum number of Tx descriptors in all chip families */ 31/* The maximum number of Tx descriptors in all chip families */
32#define WLCORE_MAX_TX_DESCRIPTORS 32 32#define WLCORE_MAX_TX_DESCRIPTORS 32
33 33
34/*
35 * We always allocate this number of mac addresses. If we don't
36 * have enough allocated addresses, the LAA bit is used
37 */
38#define WLCORE_NUM_MAC_ADDRESSES 3
39
34/* forward declaration */ 40/* forward declaration */
35struct wl1271_tx_hw_descr; 41struct wl1271_tx_hw_descr;
36enum wl_rx_buf_align; 42enum wl_rx_buf_align;
37struct wl1271_rx_descriptor; 43struct wl1271_rx_descriptor;
38 44
39struct wlcore_ops { 45struct wlcore_ops {
46 int (*setup)(struct wl1271 *wl);
40 int (*identify_chip)(struct wl1271 *wl); 47 int (*identify_chip)(struct wl1271 *wl);
41 int (*identify_fw)(struct wl1271 *wl); 48 int (*identify_fw)(struct wl1271 *wl);
42 int (*boot)(struct wl1271 *wl); 49 int (*boot)(struct wl1271 *wl);
@@ -139,10 +146,12 @@ struct wl1271_stats {
139}; 146};
140 147
141struct wl1271 { 148struct wl1271 {
149 bool initialized;
142 struct ieee80211_hw *hw; 150 struct ieee80211_hw *hw;
143 bool mac80211_registered; 151 bool mac80211_registered;
144 152
145 struct device *dev; 153 struct device *dev;
154 struct platform_device *pdev;
146 155
147 void *if_priv; 156 void *if_priv;
148 157
@@ -153,7 +162,7 @@ struct wl1271 {
153 162
154 spinlock_t wl_lock; 163 spinlock_t wl_lock;
155 164
156 enum wl1271_state state; 165 enum wlcore_state state;
157 enum wl12xx_fw_type fw_type; 166 enum wl12xx_fw_type fw_type;
158 bool plt; 167 bool plt;
159 enum plt_mode plt_mode; 168 enum plt_mode plt_mode;
@@ -181,7 +190,7 @@ struct wl1271 {
181 u32 fuse_nic_addr; 190 u32 fuse_nic_addr;
182 191
183 /* we have up to 2 MAC addresses */ 192 /* we have up to 2 MAC addresses */
184 struct mac_address addresses[2]; 193 struct mac_address addresses[WLCORE_NUM_MAC_ADDRESSES];
185 int channel; 194 int channel;
186 u8 system_hlid; 195 u8 system_hlid;
187 196
@@ -190,6 +199,8 @@ struct wl1271 {
190 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; 199 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
191 unsigned long rate_policies_map[ 200 unsigned long rate_policies_map[
192 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; 201 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
202 unsigned long klv_templates_map[
203 BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
193 204
194 struct list_head wlvif_list; 205 struct list_head wlvif_list;
195 206
@@ -237,6 +248,7 @@ struct wl1271 {
237 248
238 /* Intermediate buffer, used for packet aggregation */ 249 /* Intermediate buffer, used for packet aggregation */
239 u8 *aggr_buf; 250 u8 *aggr_buf;
251 u32 aggr_buf_size;
240 252
241 /* Reusable dummy packet template */ 253 /* Reusable dummy packet template */
242 struct sk_buff *dummy_packet; 254 struct sk_buff *dummy_packet;
@@ -393,13 +405,18 @@ struct wl1271 {
393 /* sleep auth value currently configured to FW */ 405 /* sleep auth value currently configured to FW */
394 int sleep_auth; 406 int sleep_auth;
395 407
408 /* the number of allocated MAC addresses in this chip */
409 int num_mac_addr;
410
396 /* the minimum FW version required for the driver to work */ 411 /* the minimum FW version required for the driver to work */
397 unsigned int min_fw_ver[NUM_FW_VER]; 412 unsigned int min_fw_ver[NUM_FW_VER];
413
414 struct completion nvs_loading_complete;
398}; 415};
399 416
400int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); 417int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
401int __devexit wlcore_remove(struct platform_device *pdev); 418int __devexit wlcore_remove(struct platform_device *pdev);
402struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size); 419struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
403int wlcore_free_hw(struct wl1271 *wl); 420int wlcore_free_hw(struct wl1271 *wl);
404int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 421int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
405 struct ieee80211_vif *vif, 422 struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index c0505635bb00..6678d4b18611 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -66,6 +66,7 @@
66#define WLCORE_NUM_BANDS 2 66#define WLCORE_NUM_BANDS 2
67 67
68#define WL12XX_MAX_RATE_POLICIES 16 68#define WL12XX_MAX_RATE_POLICIES 16
69#define WLCORE_MAX_KLV_TEMPLATES 4
69 70
70/* Defined by FW as 0. Will not be freed or allocated. */ 71/* Defined by FW as 0. Will not be freed or allocated. */
71#define WL12XX_SYSTEM_HLID 0 72#define WL12XX_SYSTEM_HLID 0
@@ -83,11 +84,10 @@
83#define WL1271_AP_BSS_INDEX 0 84#define WL1271_AP_BSS_INDEX 0
84#define WL1271_AP_DEF_BEACON_EXP 20 85#define WL1271_AP_DEF_BEACON_EXP 20
85 86
86#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE) 87enum wlcore_state {
87 88 WLCORE_STATE_OFF,
88enum wl1271_state { 89 WLCORE_STATE_RESTARTING,
89 WL1271_STATE_OFF, 90 WLCORE_STATE_ON,
90 WL1271_STATE_ON,
91}; 91};
92 92
93enum wl12xx_fw_type { 93enum wl12xx_fw_type {
@@ -124,6 +124,7 @@ struct wl1271_chip {
124 u32 id; 124 u32 id;
125 char fw_ver_str[ETHTOOL_BUSINFO_LEN]; 125 char fw_ver_str[ETHTOOL_BUSINFO_LEN];
126 unsigned int fw_ver[NUM_FW_VER]; 126 unsigned int fw_ver[NUM_FW_VER];
127 char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN];
127}; 128};
128 129
129#define NUM_TX_QUEUES 4 130#define NUM_TX_QUEUES 4
@@ -337,6 +338,8 @@ struct wl12xx_vif {
337 u8 ap_rate_idx; 338 u8 ap_rate_idx;
338 u8 p2p_rate_idx; 339 u8 p2p_rate_idx;
339 340
341 u8 klv_template_id;
342
340 bool qos; 343 bool qos;
341 } sta; 344 } sta;
342 struct { 345 struct {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 00f6e69c1dcd..730186d0449b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1520,13 +1520,12 @@ static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
1520 union iwreq_data *wrqu, char *extra) 1520 union iwreq_data *wrqu, char *extra)
1521{ 1521{
1522 struct wl3501_card *this = netdev_priv(dev); 1522 struct wl3501_card *this = netdev_priv(dev);
1523 static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
1524 int rc = -EINVAL; 1523 int rc = -EINVAL;
1525 1524
1526 /* FIXME: we support other ARPHRDs...*/ 1525 /* FIXME: we support other ARPHRDs...*/
1527 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 1526 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
1528 goto out; 1527 goto out;
1529 if (!memcmp(bcast, wrqu->ap_addr.sa_data, ETH_ALEN)) { 1528 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) {
1530 /* FIXME: rescan? */ 1529 /* FIXME: rescan? */
1531 } else 1530 } else
1532 memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 1531 memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c9e2660e1263..114364b5d466 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
937 * control block of the skbuff will be initialized. If necessary the incoming 937 * control block of the skbuff will be initialized. If necessary the incoming
938 * mac80211 queues will be stopped. 938 * mac80211 queues will be stopped.
939 */ 939 */
940static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 940static void zd_op_tx(struct ieee80211_hw *hw,
941 struct ieee80211_tx_control *control,
942 struct sk_buff *skb)
941{ 943{
942 struct zd_mac *mac = zd_hw_mac(hw); 944 struct zd_mac *mac = zd_hw_mac(hw);
943 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 945 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
1176 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif); 1178 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
1177 if (!skb) 1179 if (!skb)
1178 break; 1180 break;
1179 zd_op_tx(mac->hw, skb); 1181 zd_op_tx(mac->hw, NULL, skb);
1180 } 1182 }
1181 1183
1182 /* 1184 /*
@@ -1399,7 +1401,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1399 1401
1400 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 1402 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1401 IEEE80211_HW_SIGNAL_UNSPEC | 1403 IEEE80211_HW_SIGNAL_UNSPEC |
1402 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1404 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1405 IEEE80211_HW_MFP_CAPABLE;
1403 1406
1404 hw->wiphy->interface_modes = 1407 hw->wiphy->interface_modes =
1405 BIT(NL80211_IFTYPE_MESH_POINT) | 1408 BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 650f79a1f2bd..c934fe8583f5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev,
1712 break; 1712 break;
1713 1713
1714 case XenbusStateConnected: 1714 case XenbusStateConnected:
1715 netif_notify_peers(netdev); 1715 netdev_notify_peers(netdev);
1716 break; 1716 break;
1717 1717
1718 case XenbusStateClosing: 1718 case XenbusStateClosing:
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 3b20b73ee649..ec857676c39f 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -5,21 +5,9 @@
5menu "Near Field Communication (NFC) devices" 5menu "Near Field Communication (NFC) devices"
6 depends on NFC 6 depends on NFC
7 7
8config PN544_NFC
9 tristate "PN544 NFC driver"
10 depends on I2C
11 select CRC_CCITT
12 default n
13 ---help---
14 Say yes if you want PN544 Near Field Communication driver.
15 This is for i2c connected version. If unsure, say N here.
16
17 To compile this driver as a module, choose m here. The module will
18 be called pn544.
19
20config PN544_HCI_NFC 8config PN544_HCI_NFC
21 tristate "HCI PN544 NFC driver" 9 tristate "HCI PN544 NFC driver"
22 depends on I2C && NFC_SHDLC 10 depends on I2C && NFC_HCI && NFC_SHDLC
23 select CRC_CCITT 11 select CRC_CCITT
24 default n 12 default n
25 ---help--- 13 ---help---
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 473e44cef612..bf05831fdf09 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,7 +2,6 @@
2# Makefile for nfc devices 2# Makefile for nfc devices
3# 3#
4 4
5obj-$(CONFIG_PN544_NFC) += pn544.o
6obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o 5obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o
7obj-$(CONFIG_NFC_PN533) += pn533.o 6obj-$(CONFIG_NFC_PN533) += pn533.o
8obj-$(CONFIG_NFC_WILINK) += nfcwilink.o 7obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index e7fd4938f9bc..50b1ee41afc6 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -352,8 +352,6 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
352 struct nfcwilink *drv = priv_data; 352 struct nfcwilink *drv = priv_data;
353 int rc; 353 int rc;
354 354
355 nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
356
357 if (!skb) 355 if (!skb)
358 return -EFAULT; 356 return -EFAULT;
359 357
@@ -362,6 +360,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
362 return -EFAULT; 360 return -EFAULT;
363 } 361 }
364 362
363 nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
364
365 /* strip the ST header 365 /* strip the ST header
366 (apart for the chnl byte, which is not received in the hdr) */ 366 (apart for the chnl byte, which is not received in the hdr) */
367 skb_pull(skb, (NFCWILINK_HDR_LEN-1)); 367 skb_pull(skb, (NFCWILINK_HDR_LEN-1));
@@ -604,21 +604,7 @@ static struct platform_driver nfcwilink_driver = {
604 }, 604 },
605}; 605};
606 606
607/* ------- Module Init/Exit interfaces ------ */ 607module_platform_driver(nfcwilink_driver);
608static int __init nfcwilink_init(void)
609{
610 printk(KERN_INFO "NFC Driver for TI WiLink");
611
612 return platform_driver_register(&nfcwilink_driver);
613}
614
615static void __exit nfcwilink_exit(void)
616{
617 platform_driver_unregister(&nfcwilink_driver);
618}
619
620module_init(nfcwilink_init);
621module_exit(nfcwilink_exit);
622 608
623/* ------ Module Info ------ */ 609/* ------ Module Info ------ */
624 610
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index d606f52fec84..97c440a8cd61 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -356,6 +356,7 @@ struct pn533 {
356 356
357 struct workqueue_struct *wq; 357 struct workqueue_struct *wq;
358 struct work_struct cmd_work; 358 struct work_struct cmd_work;
359 struct work_struct cmd_complete_work;
359 struct work_struct poll_work; 360 struct work_struct poll_work;
360 struct work_struct mi_work; 361 struct work_struct mi_work;
361 struct work_struct tg_work; 362 struct work_struct tg_work;
@@ -383,6 +384,19 @@ struct pn533 {
383 u8 tgt_mode; 384 u8 tgt_mode;
384 385
385 u32 device_type; 386 u32 device_type;
387
388 struct list_head cmd_queue;
389 u8 cmd_pending;
390};
391
392struct pn533_cmd {
393 struct list_head queue;
394 struct pn533_frame *out_frame;
395 struct pn533_frame *in_frame;
396 int in_frame_len;
397 pn533_cmd_complete_t cmd_complete;
398 void *arg;
399 gfp_t flags;
386}; 400};
387 401
388struct pn533_frame { 402struct pn533_frame {
@@ -487,7 +501,7 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
487 501
488static void pn533_wq_cmd_complete(struct work_struct *work) 502static void pn533_wq_cmd_complete(struct work_struct *work)
489{ 503{
490 struct pn533 *dev = container_of(work, struct pn533, cmd_work); 504 struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
491 struct pn533_frame *in_frame; 505 struct pn533_frame *in_frame;
492 int rc; 506 int rc;
493 507
@@ -502,7 +516,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
502 PN533_FRAME_CMD_PARAMS_LEN(in_frame)); 516 PN533_FRAME_CMD_PARAMS_LEN(in_frame));
503 517
504 if (rc != -EINPROGRESS) 518 if (rc != -EINPROGRESS)
505 mutex_unlock(&dev->cmd_lock); 519 queue_work(dev->wq, &dev->cmd_work);
506} 520}
507 521
508static void pn533_recv_response(struct urb *urb) 522static void pn533_recv_response(struct urb *urb)
@@ -550,7 +564,7 @@ static void pn533_recv_response(struct urb *urb)
550 dev->wq_in_frame = in_frame; 564 dev->wq_in_frame = in_frame;
551 565
552sched_wq: 566sched_wq:
553 queue_work(dev->wq, &dev->cmd_work); 567 queue_work(dev->wq, &dev->cmd_complete_work);
554} 568}
555 569
556static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) 570static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -606,7 +620,7 @@ static void pn533_recv_ack(struct urb *urb)
606 620
607sched_wq: 621sched_wq:
608 dev->wq_in_frame = NULL; 622 dev->wq_in_frame = NULL;
609 queue_work(dev->wq, &dev->cmd_work); 623 queue_work(dev->wq, &dev->cmd_complete_work);
610} 624}
611 625
612static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) 626static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -669,6 +683,31 @@ error:
669 return rc; 683 return rc;
670} 684}
671 685
686static void pn533_wq_cmd(struct work_struct *work)
687{
688 struct pn533 *dev = container_of(work, struct pn533, cmd_work);
689 struct pn533_cmd *cmd;
690
691 mutex_lock(&dev->cmd_lock);
692
693 if (list_empty(&dev->cmd_queue)) {
694 dev->cmd_pending = 0;
695 mutex_unlock(&dev->cmd_lock);
696 return;
697 }
698
699 cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
700
701 mutex_unlock(&dev->cmd_lock);
702
703 __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
704 cmd->in_frame_len, cmd->cmd_complete,
705 cmd->arg, cmd->flags);
706
707 list_del(&cmd->queue);
708 kfree(cmd);
709}
710
672static int pn533_send_cmd_frame_async(struct pn533 *dev, 711static int pn533_send_cmd_frame_async(struct pn533 *dev,
673 struct pn533_frame *out_frame, 712 struct pn533_frame *out_frame,
674 struct pn533_frame *in_frame, 713 struct pn533_frame *in_frame,
@@ -676,21 +715,44 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
676 pn533_cmd_complete_t cmd_complete, 715 pn533_cmd_complete_t cmd_complete,
677 void *arg, gfp_t flags) 716 void *arg, gfp_t flags)
678{ 717{
679 int rc; 718 struct pn533_cmd *cmd;
719 int rc = 0;
680 720
681 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 721 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
682 722
683 if (!mutex_trylock(&dev->cmd_lock)) 723 mutex_lock(&dev->cmd_lock);
684 return -EBUSY;
685 724
686 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame, 725 if (!dev->cmd_pending) {
687 in_frame_len, cmd_complete, arg, flags); 726 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
688 if (rc) 727 in_frame_len, cmd_complete,
689 goto error; 728 arg, flags);
729 if (!rc)
730 dev->cmd_pending = 1;
690 731
691 return 0; 732 goto unlock;
692error: 733 }
734
735 nfc_dev_dbg(&dev->interface->dev, "%s Queueing command", __func__);
736
737 cmd = kzalloc(sizeof(struct pn533_cmd), flags);
738 if (!cmd) {
739 rc = -ENOMEM;
740 goto unlock;
741 }
742
743 INIT_LIST_HEAD(&cmd->queue);
744 cmd->out_frame = out_frame;
745 cmd->in_frame = in_frame;
746 cmd->in_frame_len = in_frame_len;
747 cmd->cmd_complete = cmd_complete;
748 cmd->arg = arg;
749 cmd->flags = flags;
750
751 list_add_tail(&cmd->queue, &dev->cmd_queue);
752
753unlock:
693 mutex_unlock(&dev->cmd_lock); 754 mutex_unlock(&dev->cmd_lock);
755
694 return rc; 756 return rc;
695} 757}
696 758
@@ -1305,8 +1367,6 @@ static void pn533_listen_mode_timer(unsigned long data)
1305 1367
1306 dev->cancel_listen = 1; 1368 dev->cancel_listen = 1;
1307 1369
1308 mutex_unlock(&dev->cmd_lock);
1309
1310 pn533_poll_next_mod(dev); 1370 pn533_poll_next_mod(dev);
1311 1371
1312 queue_work(dev->wq, &dev->poll_work); 1372 queue_work(dev->wq, &dev->poll_work);
@@ -2131,7 +2191,7 @@ error_cmd:
2131 2191
2132 kfree(arg); 2192 kfree(arg);
2133 2193
2134 mutex_unlock(&dev->cmd_lock); 2194 queue_work(dev->wq, &dev->cmd_work);
2135} 2195}
2136 2196
2137static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 2197static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -2330,13 +2390,12 @@ static int pn533_probe(struct usb_interface *interface,
2330 NULL, 0, 2390 NULL, 0,
2331 pn533_send_complete, dev); 2391 pn533_send_complete, dev);
2332 2392
2333 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete); 2393 INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
2394 INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
2334 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); 2395 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
2335 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); 2396 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
2336 INIT_WORK(&dev->poll_work, pn533_wq_poll); 2397 INIT_WORK(&dev->poll_work, pn533_wq_poll);
2337 dev->wq = alloc_workqueue("pn533", 2398 dev->wq = alloc_ordered_workqueue("pn533", 0);
2338 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
2339 1);
2340 if (dev->wq == NULL) 2399 if (dev->wq == NULL)
2341 goto error; 2400 goto error;
2342 2401
@@ -2346,6 +2405,8 @@ static int pn533_probe(struct usb_interface *interface,
2346 2405
2347 skb_queue_head_init(&dev->resp_q); 2406 skb_queue_head_init(&dev->resp_q);
2348 2407
2408 INIT_LIST_HEAD(&dev->cmd_queue);
2409
2349 usb_set_intfdata(interface, dev); 2410 usb_set_intfdata(interface, dev);
2350 2411
2351 pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION); 2412 pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION);
@@ -2417,6 +2478,7 @@ error:
2417static void pn533_disconnect(struct usb_interface *interface) 2478static void pn533_disconnect(struct usb_interface *interface)
2418{ 2479{
2419 struct pn533 *dev; 2480 struct pn533 *dev;
2481 struct pn533_cmd *cmd, *n;
2420 2482
2421 dev = usb_get_intfdata(interface); 2483 dev = usb_get_intfdata(interface);
2422 usb_set_intfdata(interface, NULL); 2484 usb_set_intfdata(interface, NULL);
@@ -2433,6 +2495,11 @@ static void pn533_disconnect(struct usb_interface *interface)
2433 2495
2434 del_timer(&dev->listen_timer); 2496 del_timer(&dev->listen_timer);
2435 2497
2498 list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) {
2499 list_del(&cmd->queue);
2500 kfree(cmd);
2501 }
2502
2436 kfree(dev->in_frame); 2503 kfree(dev->in_frame);
2437 usb_free_urb(dev->in_urb); 2504 usb_free_urb(dev->in_urb);
2438 kfree(dev->out_frame); 2505 kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
deleted file mode 100644
index 724f65d8f9e4..000000000000
--- a/drivers/nfc/pn544.c
+++ /dev/null
@@ -1,893 +0,0 @@
1/*
2 * Driver for the PN544 NFC chip.
3 *
4 * Copyright (C) Nokia Corporation
5 *
6 * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
7 * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/completion.h>
24#include <linux/crc-ccitt.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/miscdevice.h>
29#include <linux/module.h>
30#include <linux/mutex.h>
31#include <linux/nfc/pn544.h>
32#include <linux/poll.h>
33#include <linux/regulator/consumer.h>
34#include <linux/serial_core.h> /* for TCGETS */
35#include <linux/slab.h>
36
37#define DRIVER_CARD "PN544 NFC"
38#define DRIVER_DESC "NFC driver for PN544"
39
40static struct i2c_device_id pn544_id_table[] = {
41 { PN544_DRIVER_NAME, 0 },
42 { }
43};
44MODULE_DEVICE_TABLE(i2c, pn544_id_table);
45
46#define HCI_MODE 0
47#define FW_MODE 1
48
49enum pn544_state {
50 PN544_ST_COLD,
51 PN544_ST_FW_READY,
52 PN544_ST_READY,
53};
54
55enum pn544_irq {
56 PN544_NONE,
57 PN544_INT,
58};
59
60struct pn544_info {
61 struct miscdevice miscdev;
62 struct i2c_client *i2c_dev;
63 struct regulator_bulk_data regs[3];
64
65 enum pn544_state state;
66 wait_queue_head_t read_wait;
67 loff_t read_offset;
68 enum pn544_irq read_irq;
69 struct mutex read_mutex; /* Serialize read_irq access */
70 struct mutex mutex; /* Serialize info struct access */
71 u8 *buf;
72 size_t buflen;
73};
74
75static const char reg_vdd_io[] = "Vdd_IO";
76static const char reg_vbat[] = "VBat";
77static const char reg_vsim[] = "VSim";
78
79/* sysfs interface */
80static ssize_t pn544_test(struct device *dev,
81 struct device_attribute *attr, char *buf)
82{
83 struct pn544_info *info = dev_get_drvdata(dev);
84 struct i2c_client *client = info->i2c_dev;
85 struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
86
87 return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
88}
89
90static int pn544_enable(struct pn544_info *info, int mode)
91{
92 struct pn544_nfc_platform_data *pdata;
93 struct i2c_client *client = info->i2c_dev;
94
95 int r;
96
97 r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
98 if (r < 0)
99 return r;
100
101 pdata = client->dev.platform_data;
102 info->read_irq = PN544_NONE;
103 if (pdata->enable)
104 pdata->enable(mode);
105
106 if (mode) {
107 info->state = PN544_ST_FW_READY;
108 dev_dbg(&client->dev, "now in FW-mode\n");
109 } else {
110 info->state = PN544_ST_READY;
111 dev_dbg(&client->dev, "now in HCI-mode\n");
112 }
113
114 usleep_range(10000, 15000);
115
116 return 0;
117}
118
119static void pn544_disable(struct pn544_info *info)
120{
121 struct pn544_nfc_platform_data *pdata;
122 struct i2c_client *client = info->i2c_dev;
123
124 pdata = client->dev.platform_data;
125 if (pdata->disable)
126 pdata->disable();
127
128 info->state = PN544_ST_COLD;
129
130 dev_dbg(&client->dev, "Now in OFF-mode\n");
131
132 msleep(PN544_RESETVEN_TIME);
133
134 info->read_irq = PN544_NONE;
135 regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
136}
137
138static int check_crc(u8 *buf, int buflen)
139{
140 u8 len;
141 u16 crc;
142
143 len = buf[0] + 1;
144 if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
145 pr_err(PN544_DRIVER_NAME
146 ": CRC; corrupt packet len %u (%d)\n", len, buflen);
147 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
148 16, 2, buf, buflen, false);
149 return -EPERM;
150 }
151 crc = crc_ccitt(0xffff, buf, len - 2);
152 crc = ~crc;
153
154 if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
155 pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
156 crc, buf[len-1], buf[len-2]);
157
158 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
159 16, 2, buf, buflen, false);
160 return -EPERM;
161 }
162 return 0;
163}
164
165static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
166{
167 int r;
168
169 if (len < 4 || len != (buf[0] + 1)) {
170 dev_err(&client->dev, "%s: Illegal message length: %d\n",
171 __func__, len);
172 return -EINVAL;
173 }
174
175 if (check_crc(buf, len))
176 return -EINVAL;
177
178 usleep_range(3000, 6000);
179
180 r = i2c_master_send(client, buf, len);
181 dev_dbg(&client->dev, "send: %d\n", r);
182
183 if (r == -EREMOTEIO) { /* Retry, chip was in standby */
184 usleep_range(6000, 10000);
185 r = i2c_master_send(client, buf, len);
186 dev_dbg(&client->dev, "send2: %d\n", r);
187 }
188
189 if (r != len)
190 return -EREMOTEIO;
191
192 return r;
193}
194
195static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
196{
197 int r;
198 u8 len;
199
200 /*
201 * You could read a packet in one go, but then you'd need to read
202 * max size and rest would be 0xff fill, so we do split reads.
203 */
204 r = i2c_master_recv(client, &len, 1);
205 dev_dbg(&client->dev, "recv1: %d\n", r);
206
207 if (r != 1)
208 return -EREMOTEIO;
209
210 if (len < PN544_LLC_HCI_OVERHEAD)
211 len = PN544_LLC_HCI_OVERHEAD;
212 else if (len > (PN544_MSG_MAX_SIZE - 1))
213 len = PN544_MSG_MAX_SIZE - 1;
214
215 if (1 + len > buflen) /* len+(data+crc16) */
216 return -EMSGSIZE;
217
218 buf[0] = len;
219
220 r = i2c_master_recv(client, buf + 1, len);
221 dev_dbg(&client->dev, "recv2: %d\n", r);
222
223 if (r != len)
224 return -EREMOTEIO;
225
226 usleep_range(3000, 6000);
227
228 return r + 1;
229}
230
231static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
232{
233 int r;
234
235 dev_dbg(&client->dev, "%s\n", __func__);
236
237 if (len < PN544_FW_HEADER_SIZE ||
238 (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
239 return -EINVAL;
240
241 r = i2c_master_send(client, buf, len);
242 dev_dbg(&client->dev, "fw send: %d\n", r);
243
244 if (r == -EREMOTEIO) { /* Retry, chip was in standby */
245 usleep_range(6000, 10000);
246 r = i2c_master_send(client, buf, len);
247 dev_dbg(&client->dev, "fw send2: %d\n", r);
248 }
249
250 if (r != len)
251 return -EREMOTEIO;
252
253 return r;
254}
255
256static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
257{
258 int r, len;
259
260 if (buflen < PN544_FW_HEADER_SIZE)
261 return -EINVAL;
262
263 r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
264 dev_dbg(&client->dev, "FW recv1: %d\n", r);
265
266 if (r < 0)
267 return r;
268
269 if (r < PN544_FW_HEADER_SIZE)
270 return -EINVAL;
271
272 len = (buf[1] << 8) + buf[2];
273 if (len == 0) /* just header, no additional data */
274 return r;
275
276 if (len > buflen - PN544_FW_HEADER_SIZE)
277 return -EMSGSIZE;
278
279 r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
280 dev_dbg(&client->dev, "fw recv2: %d\n", r);
281
282 if (r != len)
283 return -EINVAL;
284
285 return r + PN544_FW_HEADER_SIZE;
286}
287
288static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
289{
290 struct pn544_info *info = dev_id;
291 struct i2c_client *client = info->i2c_dev;
292
293 BUG_ON(!info);
294 BUG_ON(irq != info->i2c_dev->irq);
295
296 dev_dbg(&client->dev, "IRQ\n");
297
298 mutex_lock(&info->read_mutex);
299 info->read_irq = PN544_INT;
300 mutex_unlock(&info->read_mutex);
301
302 wake_up_interruptible(&info->read_wait);
303
304 return IRQ_HANDLED;
305}
306
307static enum pn544_irq pn544_irq_state(struct pn544_info *info)
308{
309 enum pn544_irq irq;
310
311 mutex_lock(&info->read_mutex);
312 irq = info->read_irq;
313 mutex_unlock(&info->read_mutex);
314 /*
315 * XXX: should we check GPIO-line status directly?
316 * return pdata->irq_status() ? PN544_INT : PN544_NONE;
317 */
318
319 return irq;
320}
321
322static ssize_t pn544_read(struct file *file, char __user *buf,
323 size_t count, loff_t *offset)
324{
325 struct pn544_info *info = container_of(file->private_data,
326 struct pn544_info, miscdev);
327 struct i2c_client *client = info->i2c_dev;
328 enum pn544_irq irq;
329 size_t len;
330 int r = 0;
331
332 dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
333 info, count);
334
335 mutex_lock(&info->mutex);
336
337 if (info->state == PN544_ST_COLD) {
338 r = -ENODEV;
339 goto out;
340 }
341
342 irq = pn544_irq_state(info);
343 if (irq == PN544_NONE) {
344 if (file->f_flags & O_NONBLOCK) {
345 r = -EAGAIN;
346 goto out;
347 }
348
349 if (wait_event_interruptible(info->read_wait,
350 (info->read_irq == PN544_INT))) {
351 r = -ERESTARTSYS;
352 goto out;
353 }
354 }
355
356 if (info->state == PN544_ST_FW_READY) {
357 len = min(count, info->buflen);
358
359 mutex_lock(&info->read_mutex);
360 r = pn544_fw_read(info->i2c_dev, info->buf, len);
361 info->read_irq = PN544_NONE;
362 mutex_unlock(&info->read_mutex);
363
364 if (r < 0) {
365 dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
366 goto out;
367 }
368
369 print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
370 16, 2, info->buf, r, false);
371
372 *offset += r;
373 if (copy_to_user(buf, info->buf, r)) {
374 r = -EFAULT;
375 goto out;
376 }
377 } else {
378 len = min(count, info->buflen);
379
380 mutex_lock(&info->read_mutex);
381 r = pn544_i2c_read(info->i2c_dev, info->buf, len);
382 info->read_irq = PN544_NONE;
383 mutex_unlock(&info->read_mutex);
384
385 if (r < 0) {
386 dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
387 goto out;
388 }
389 print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
390 16, 2, info->buf, r, false);
391
392 *offset += r;
393 if (copy_to_user(buf, info->buf, r)) {
394 r = -EFAULT;
395 goto out;
396 }
397 }
398
399out:
400 mutex_unlock(&info->mutex);
401
402 return r;
403}
404
405static unsigned int pn544_poll(struct file *file, poll_table *wait)
406{
407 struct pn544_info *info = container_of(file->private_data,
408 struct pn544_info, miscdev);
409 struct i2c_client *client = info->i2c_dev;
410 int r = 0;
411
412 dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
413
414 mutex_lock(&info->mutex);
415
416 if (info->state == PN544_ST_COLD) {
417 r = -ENODEV;
418 goto out;
419 }
420
421 poll_wait(file, &info->read_wait, wait);
422
423 if (pn544_irq_state(info) == PN544_INT) {
424 r = POLLIN | POLLRDNORM;
425 goto out;
426 }
427out:
428 mutex_unlock(&info->mutex);
429
430 return r;
431}
432
433static ssize_t pn544_write(struct file *file, const char __user *buf,
434 size_t count, loff_t *ppos)
435{
436 struct pn544_info *info = container_of(file->private_data,
437 struct pn544_info, miscdev);
438 struct i2c_client *client = info->i2c_dev;
439 ssize_t len;
440 int r;
441
442 dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
443 info, count);
444
445 mutex_lock(&info->mutex);
446
447 if (info->state == PN544_ST_COLD) {
448 r = -ENODEV;
449 goto out;
450 }
451
452 /*
453 * XXX: should we detect rset-writes and clean possible
454 * read_irq state
455 */
456 if (info->state == PN544_ST_FW_READY) {
457 size_t fw_len;
458
459 if (count < PN544_FW_HEADER_SIZE) {
460 r = -EINVAL;
461 goto out;
462 }
463
464 len = min(count, info->buflen);
465 if (copy_from_user(info->buf, buf, len)) {
466 r = -EFAULT;
467 goto out;
468 }
469
470 print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
471 16, 2, info->buf, len, false);
472
473 fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
474 info->buf[2];
475
476 if (len > fw_len) /* 1 msg at a time */
477 len = fw_len;
478
479 r = pn544_fw_write(info->i2c_dev, info->buf, len);
480 } else {
481 if (count < PN544_LLC_MIN_SIZE) {
482 r = -EINVAL;
483 goto out;
484 }
485
486 len = min(count, info->buflen);
487 if (copy_from_user(info->buf, buf, len)) {
488 r = -EFAULT;
489 goto out;
490 }
491
492 print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
493 16, 2, info->buf, len, false);
494
495 if (len > (info->buf[0] + 1)) /* 1 msg at a time */
496 len = info->buf[0] + 1;
497
498 r = pn544_i2c_write(info->i2c_dev, info->buf, len);
499 }
500out:
501 mutex_unlock(&info->mutex);
502
503 return r;
504
505}
506
507static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
508{
509 struct pn544_info *info = container_of(file->private_data,
510 struct pn544_info, miscdev);
511 struct i2c_client *client = info->i2c_dev;
512 struct pn544_nfc_platform_data *pdata;
513 unsigned int val;
514 int r = 0;
515
516 dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
517
518 mutex_lock(&info->mutex);
519
520 if (info->state == PN544_ST_COLD) {
521 r = -ENODEV;
522 goto out;
523 }
524
525 pdata = info->i2c_dev->dev.platform_data;
526 switch (cmd) {
527 case PN544_GET_FW_MODE:
528 dev_dbg(&client->dev, "%s: PN544_GET_FW_MODE\n", __func__);
529
530 val = (info->state == PN544_ST_FW_READY);
531 if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
532 r = -EFAULT;
533 goto out;
534 }
535
536 break;
537
538 case PN544_SET_FW_MODE:
539 dev_dbg(&client->dev, "%s: PN544_SET_FW_MODE\n", __func__);
540
541 if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
542 r = -EFAULT;
543 goto out;
544 }
545
546 if (val) {
547 if (info->state == PN544_ST_FW_READY)
548 break;
549
550 pn544_disable(info);
551 r = pn544_enable(info, FW_MODE);
552 if (r < 0)
553 goto out;
554 } else {
555 if (info->state == PN544_ST_READY)
556 break;
557 pn544_disable(info);
558 r = pn544_enable(info, HCI_MODE);
559 if (r < 0)
560 goto out;
561 }
562 file->f_pos = info->read_offset;
563 break;
564
565 case TCGETS:
566 dev_dbg(&client->dev, "%s: TCGETS\n", __func__);
567
568 r = -ENOIOCTLCMD;
569 break;
570
571 default:
572 dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
573 r = -ENOIOCTLCMD;
574 break;
575 }
576
577out:
578 mutex_unlock(&info->mutex);
579
580 return r;
581}
582
583static int pn544_open(struct inode *inode, struct file *file)
584{
585 struct pn544_info *info = container_of(file->private_data,
586 struct pn544_info, miscdev);
587 struct i2c_client *client = info->i2c_dev;
588 int r = 0;
589
590 dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
591 info, info->i2c_dev);
592
593 mutex_lock(&info->mutex);
594
595 /*
596 * Only 1 at a time.
597 * XXX: maybe user (counter) would work better
598 */
599 if (info->state != PN544_ST_COLD) {
600 r = -EBUSY;
601 goto out;
602 }
603
604 file->f_pos = info->read_offset;
605 r = pn544_enable(info, HCI_MODE);
606
607out:
608 mutex_unlock(&info->mutex);
609 return r;
610}
611
612static int pn544_close(struct inode *inode, struct file *file)
613{
614 struct pn544_info *info = container_of(file->private_data,
615 struct pn544_info, miscdev);
616 struct i2c_client *client = info->i2c_dev;
617
618 dev_dbg(&client->dev, "%s: info: %p, client %p\n",
619 __func__, info, info->i2c_dev);
620
621 mutex_lock(&info->mutex);
622 pn544_disable(info);
623 mutex_unlock(&info->mutex);
624
625 return 0;
626}
627
628static const struct file_operations pn544_fops = {
629 .owner = THIS_MODULE,
630 .llseek = no_llseek,
631 .read = pn544_read,
632 .write = pn544_write,
633 .poll = pn544_poll,
634 .open = pn544_open,
635 .release = pn544_close,
636 .unlocked_ioctl = pn544_ioctl,
637};
638
639#ifdef CONFIG_PM
640static int pn544_suspend(struct device *dev)
641{
642 struct i2c_client *client = to_i2c_client(dev);
643 struct pn544_info *info;
644 int r = 0;
645
646 dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
647
648 info = i2c_get_clientdata(client);
649 dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
650 info, client);
651
652 mutex_lock(&info->mutex);
653
654 switch (info->state) {
655 case PN544_ST_FW_READY:
656 /* Do not suspend while upgrading FW, please! */
657 r = -EPERM;
658 break;
659
660 case PN544_ST_READY:
661 /*
662 * CHECK: Device should be in standby-mode. No way to check?
663 * Allowing low power mode for the regulator is potentially
664 * dangerous if pn544 does not go to suspension.
665 */
666 break;
667
668 case PN544_ST_COLD:
669 break;
670 };
671
672 mutex_unlock(&info->mutex);
673 return r;
674}
675
676static int pn544_resume(struct device *dev)
677{
678 struct i2c_client *client = to_i2c_client(dev);
679 struct pn544_info *info = i2c_get_clientdata(client);
680 int r = 0;
681
682 dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
683 info, client);
684
685 mutex_lock(&info->mutex);
686
687 switch (info->state) {
688 case PN544_ST_READY:
689 /*
690 * CHECK: If regulator low power mode is allowed in
691 * pn544_suspend, we should go back to normal mode
692 * here.
693 */
694 break;
695
696 case PN544_ST_COLD:
697 break;
698
699 case PN544_ST_FW_READY:
700 break;
701 };
702
703 mutex_unlock(&info->mutex);
704
705 return r;
706}
707
708static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
709#endif
710
711static struct device_attribute pn544_attr =
712 __ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
713
714static int __devinit pn544_probe(struct i2c_client *client,
715 const struct i2c_device_id *id)
716{
717 struct pn544_info *info;
718 struct pn544_nfc_platform_data *pdata;
719 int r = 0;
720
721 dev_dbg(&client->dev, "%s\n", __func__);
722 dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
723
724 /* private data allocation */
725 info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
726 if (!info) {
727 dev_err(&client->dev,
728 "Cannot allocate memory for pn544_info.\n");
729 r = -ENOMEM;
730 goto err_info_alloc;
731 }
732
733 info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
734 info->buf = kzalloc(info->buflen, GFP_KERNEL);
735 if (!info->buf) {
736 dev_err(&client->dev,
737 "Cannot allocate memory for pn544_info->buf.\n");
738 r = -ENOMEM;
739 goto err_buf_alloc;
740 }
741
742 info->regs[0].supply = reg_vdd_io;
743 info->regs[1].supply = reg_vbat;
744 info->regs[2].supply = reg_vsim;
745 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
746 info->regs);
747 if (r < 0)
748 goto err_kmalloc;
749
750 info->i2c_dev = client;
751 info->state = PN544_ST_COLD;
752 info->read_irq = PN544_NONE;
753 mutex_init(&info->read_mutex);
754 mutex_init(&info->mutex);
755 init_waitqueue_head(&info->read_wait);
756 i2c_set_clientdata(client, info);
757 pdata = client->dev.platform_data;
758 if (!pdata) {
759 dev_err(&client->dev, "No platform data\n");
760 r = -EINVAL;
761 goto err_reg;
762 }
763
764 if (!pdata->request_resources) {
765 dev_err(&client->dev, "request_resources() missing\n");
766 r = -EINVAL;
767 goto err_reg;
768 }
769
770 r = pdata->request_resources(client);
771 if (r) {
772 dev_err(&client->dev, "Cannot get platform resources\n");
773 goto err_reg;
774 }
775
776 r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
777 IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
778 info);
779 if (r < 0) {
780 dev_err(&client->dev, "Unable to register IRQ handler\n");
781 goto err_res;
782 }
783
784 /* If we don't have the test we don't need the sysfs file */
785 if (pdata->test) {
786 r = device_create_file(&client->dev, &pn544_attr);
787 if (r) {
788 dev_err(&client->dev,
789 "sysfs registration failed, error %d\n", r);
790 goto err_irq;
791 }
792 }
793
794 info->miscdev.minor = MISC_DYNAMIC_MINOR;
795 info->miscdev.name = PN544_DRIVER_NAME;
796 info->miscdev.fops = &pn544_fops;
797 info->miscdev.parent = &client->dev;
798 r = misc_register(&info->miscdev);
799 if (r < 0) {
800 dev_err(&client->dev, "Device registration failed\n");
801 goto err_sysfs;
802 }
803
804 dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
805 __func__, info, pdata, client);
806
807 return 0;
808
809err_sysfs:
810 if (pdata->test)
811 device_remove_file(&client->dev, &pn544_attr);
812err_irq:
813 free_irq(client->irq, info);
814err_res:
815 if (pdata->free_resources)
816 pdata->free_resources();
817err_reg:
818 regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
819err_kmalloc:
820 kfree(info->buf);
821err_buf_alloc:
822 kfree(info);
823err_info_alloc:
824 return r;
825}
826
827static __devexit int pn544_remove(struct i2c_client *client)
828{
829 struct pn544_info *info = i2c_get_clientdata(client);
830 struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
831
832 dev_dbg(&client->dev, "%s\n", __func__);
833
834 misc_deregister(&info->miscdev);
835 if (pdata->test)
836 device_remove_file(&client->dev, &pn544_attr);
837
838 if (info->state != PN544_ST_COLD) {
839 if (pdata->disable)
840 pdata->disable();
841
842 info->read_irq = PN544_NONE;
843 }
844
845 free_irq(client->irq, info);
846 if (pdata->free_resources)
847 pdata->free_resources();
848
849 regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
850 kfree(info->buf);
851 kfree(info);
852
853 return 0;
854}
855
856static struct i2c_driver pn544_driver = {
857 .driver = {
858 .name = PN544_DRIVER_NAME,
859#ifdef CONFIG_PM
860 .pm = &pn544_pm_ops,
861#endif
862 },
863 .probe = pn544_probe,
864 .id_table = pn544_id_table,
865 .remove = __devexit_p(pn544_remove),
866};
867
868static int __init pn544_init(void)
869{
870 int r;
871
872 pr_debug(DRIVER_DESC ": %s\n", __func__);
873
874 r = i2c_add_driver(&pn544_driver);
875 if (r) {
876 pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
877 return r;
878 }
879
880 return 0;
881}
882
883static void __exit pn544_exit(void)
884{
885 i2c_del_driver(&pn544_driver);
886 pr_info(DRIVER_DESC ", Exiting.\n");
887}
888
889module_init(pn544_init);
890module_exit(pn544_exit);
891
892MODULE_LICENSE("GPL");
893MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index aa71807189ba..c9c8570273ab 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -29,7 +29,7 @@
29 29
30#include <linux/nfc.h> 30#include <linux/nfc.h>
31#include <net/nfc/hci.h> 31#include <net/nfc/hci.h>
32#include <net/nfc/shdlc.h> 32#include <net/nfc/llc.h>
33 33
34#include <linux/nfc/pn544.h> 34#include <linux/nfc/pn544.h>
35 35
@@ -128,10 +128,12 @@ static struct nfc_hci_gate pn544_gates[] = {
128 128
129/* Largest headroom needed for outgoing custom commands */ 129/* Largest headroom needed for outgoing custom commands */
130#define PN544_CMDS_HEADROOM 2 130#define PN544_CMDS_HEADROOM 2
131#define PN544_FRAME_HEADROOM 1
132#define PN544_FRAME_TAILROOM 2
131 133
132struct pn544_hci_info { 134struct pn544_hci_info {
133 struct i2c_client *i2c_dev; 135 struct i2c_client *i2c_dev;
134 struct nfc_shdlc *shdlc; 136 struct nfc_hci_dev *hdev;
135 137
136 enum pn544_state state; 138 enum pn544_state state;
137 139
@@ -146,6 +148,9 @@ struct pn544_hci_info {
146 * < 0 if hardware error occured (e.g. i2c err) 148 * < 0 if hardware error occured (e.g. i2c err)
147 * and prevents normal operation. 149 * and prevents normal operation.
148 */ 150 */
151 int async_cb_type;
152 data_exchange_cb_t async_cb;
153 void *async_cb_context;
149}; 154};
150 155
151static void pn544_hci_platform_init(struct pn544_hci_info *info) 156static void pn544_hci_platform_init(struct pn544_hci_info *info)
@@ -230,8 +235,12 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
230 r = i2c_master_send(client, buf, len); 235 r = i2c_master_send(client, buf, len);
231 } 236 }
232 237
233 if (r >= 0 && r != len) 238 if (r >= 0) {
234 r = -EREMOTEIO; 239 if (r != len)
240 return -EREMOTEIO;
241 else
242 return 0;
243 }
235 244
236 return r; 245 return r;
237} 246}
@@ -341,13 +350,16 @@ flush:
341static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id) 350static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
342{ 351{
343 struct pn544_hci_info *info = dev_id; 352 struct pn544_hci_info *info = dev_id;
344 struct i2c_client *client = info->i2c_dev; 353 struct i2c_client *client;
345 struct sk_buff *skb = NULL; 354 struct sk_buff *skb = NULL;
346 int r; 355 int r;
347 356
348 BUG_ON(!info); 357 if (!info || irq != info->i2c_dev->irq) {
349 BUG_ON(irq != info->i2c_dev->irq); 358 WARN_ON_ONCE(1);
359 return IRQ_NONE;
360 }
350 361
362 client = info->i2c_dev;
351 dev_dbg(&client->dev, "IRQ\n"); 363 dev_dbg(&client->dev, "IRQ\n");
352 364
353 if (info->hard_fault != 0) 365 if (info->hard_fault != 0)
@@ -357,21 +369,21 @@ static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
357 if (r == -EREMOTEIO) { 369 if (r == -EREMOTEIO) {
358 info->hard_fault = r; 370 info->hard_fault = r;
359 371
360 nfc_shdlc_recv_frame(info->shdlc, NULL); 372 nfc_hci_recv_frame(info->hdev, NULL);
361 373
362 return IRQ_HANDLED; 374 return IRQ_HANDLED;
363 } else if ((r == -ENOMEM) || (r == -EBADMSG)) { 375 } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
364 return IRQ_HANDLED; 376 return IRQ_HANDLED;
365 } 377 }
366 378
367 nfc_shdlc_recv_frame(info->shdlc, skb); 379 nfc_hci_recv_frame(info->hdev, skb);
368 380
369 return IRQ_HANDLED; 381 return IRQ_HANDLED;
370} 382}
371 383
372static int pn544_hci_open(struct nfc_shdlc *shdlc) 384static int pn544_hci_open(struct nfc_hci_dev *hdev)
373{ 385{
374 struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc); 386 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
375 int r = 0; 387 int r = 0;
376 388
377 mutex_lock(&info->info_lock); 389 mutex_lock(&info->info_lock);
@@ -391,9 +403,9 @@ out:
391 return r; 403 return r;
392} 404}
393 405
394static void pn544_hci_close(struct nfc_shdlc *shdlc) 406static void pn544_hci_close(struct nfc_hci_dev *hdev)
395{ 407{
396 struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc); 408 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
397 409
398 mutex_lock(&info->info_lock); 410 mutex_lock(&info->info_lock);
399 411
@@ -408,9 +420,8 @@ out:
408 mutex_unlock(&info->info_lock); 420 mutex_unlock(&info->info_lock);
409} 421}
410 422
411static int pn544_hci_ready(struct nfc_shdlc *shdlc) 423static int pn544_hci_ready(struct nfc_hci_dev *hdev)
412{ 424{
413 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
414 struct sk_buff *skb; 425 struct sk_buff *skb;
415 static struct hw_config { 426 static struct hw_config {
416 u8 adr[2]; 427 u8 adr[2];
@@ -576,21 +587,45 @@ static int pn544_hci_ready(struct nfc_shdlc *shdlc)
576 return 0; 587 return 0;
577} 588}
578 589
579static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb) 590static void pn544_hci_add_len_crc(struct sk_buff *skb)
580{ 591{
581 struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc); 592 u16 crc;
593 int len;
594
595 len = skb->len + 2;
596 *skb_push(skb, 1) = len;
597
598 crc = crc_ccitt(0xffff, skb->data, skb->len);
599 crc = ~crc;
600 *skb_put(skb, 1) = crc & 0xff;
601 *skb_put(skb, 1) = crc >> 8;
602}
603
604static void pn544_hci_remove_len_crc(struct sk_buff *skb)
605{
606 skb_pull(skb, PN544_FRAME_HEADROOM);
607 skb_trim(skb, PN544_FRAME_TAILROOM);
608}
609
610static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
611{
612 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
582 struct i2c_client *client = info->i2c_dev; 613 struct i2c_client *client = info->i2c_dev;
614 int r;
583 615
584 if (info->hard_fault != 0) 616 if (info->hard_fault != 0)
585 return info->hard_fault; 617 return info->hard_fault;
586 618
587 return pn544_hci_i2c_write(client, skb->data, skb->len); 619 pn544_hci_add_len_crc(skb);
620 r = pn544_hci_i2c_write(client, skb->data, skb->len);
621 pn544_hci_remove_len_crc(skb);
622
623 return r;
588} 624}
589 625
590static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, 626static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
591 u32 im_protocols, u32 tm_protocols) 627 u32 im_protocols, u32 tm_protocols)
592{ 628{
593 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
594 u8 phases = 0; 629 u8 phases = 0;
595 int r; 630 int r;
596 u8 duration[2]; 631 u8 duration[2];
@@ -641,7 +676,7 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
641 return r; 676 return r;
642} 677}
643 678
644static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate, 679static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
645 struct nfc_target *target) 680 struct nfc_target *target)
646{ 681{
647 switch (gate) { 682 switch (gate) {
@@ -659,11 +694,10 @@ static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
659 return 0; 694 return 0;
660} 695}
661 696
662static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc, 697static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
663 u8 gate, 698 u8 gate,
664 struct nfc_target *target) 699 struct nfc_target *target)
665{ 700{
666 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
667 struct sk_buff *uid_skb; 701 struct sk_buff *uid_skb;
668 int r = 0; 702 int r = 0;
669 703
@@ -704,6 +738,26 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
704 return r; 738 return r;
705} 739}
706 740
741#define PN544_CB_TYPE_READER_F 1
742
743static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
744 int err)
745{
746 struct pn544_hci_info *info = context;
747
748 switch (info->async_cb_type) {
749 case PN544_CB_TYPE_READER_F:
750 if (err == 0)
751 skb_pull(skb, 1);
752 info->async_cb(info->async_cb_context, skb, err);
753 break;
754 default:
755 if (err == 0)
756 kfree_skb(skb);
757 break;
758 }
759}
760
707#define MIFARE_CMD_AUTH_KEY_A 0x60 761#define MIFARE_CMD_AUTH_KEY_A 0x60
708#define MIFARE_CMD_AUTH_KEY_B 0x61 762#define MIFARE_CMD_AUTH_KEY_B 0x61
709#define MIFARE_CMD_HEADER 2 763#define MIFARE_CMD_HEADER 2
@@ -715,13 +769,12 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
715 * <= 0: driver handled the data exchange 769 * <= 0: driver handled the data exchange
716 * 1: driver doesn't especially handle, please do standard processing 770 * 1: driver doesn't especially handle, please do standard processing
717 */ 771 */
718static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc, 772static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
719 struct nfc_target *target, 773 struct nfc_target *target,
720 struct sk_buff *skb, 774 struct sk_buff *skb, data_exchange_cb_t cb,
721 struct sk_buff **res_skb) 775 void *cb_context)
722{ 776{
723 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc); 777 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
724 int r;
725 778
726 pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__, 779 pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
727 target->hci_reader_gate); 780 target->hci_reader_gate);
@@ -746,41 +799,43 @@ static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
746 memcpy(data, uid, MIFARE_UID_LEN); 799 memcpy(data, uid, MIFARE_UID_LEN);
747 } 800 }
748 801
749 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 802 return nfc_hci_send_cmd_async(hdev,
750 PN544_MIFARE_CMD, 803 target->hci_reader_gate,
751 skb->data, skb->len, res_skb); 804 PN544_MIFARE_CMD,
805 skb->data, skb->len,
806 cb, cb_context);
752 } else 807 } else
753 return 1; 808 return 1;
754 case PN544_RF_READER_F_GATE: 809 case PN544_RF_READER_F_GATE:
755 *skb_push(skb, 1) = 0; 810 *skb_push(skb, 1) = 0;
756 *skb_push(skb, 1) = 0; 811 *skb_push(skb, 1) = 0;
757 812
758 r = nfc_hci_send_cmd(hdev, target->hci_reader_gate, 813 info->async_cb_type = PN544_CB_TYPE_READER_F;
759 PN544_FELICA_RAW, 814 info->async_cb = cb;
760 skb->data, skb->len, res_skb); 815 info->async_cb_context = cb_context;
761 if (r == 0) 816
762 skb_pull(*res_skb, 1); 817 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
763 return r; 818 PN544_FELICA_RAW, skb->data,
819 skb->len,
820 pn544_hci_data_exchange_cb, info);
764 case PN544_RF_READER_JEWEL_GATE: 821 case PN544_RF_READER_JEWEL_GATE:
765 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 822 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
766 PN544_JEWEL_RAW_CMD, 823 PN544_JEWEL_RAW_CMD, skb->data,
767 skb->data, skb->len, res_skb); 824 skb->len, cb, cb_context);
768 default: 825 default:
769 return 1; 826 return 1;
770 } 827 }
771} 828}
772 829
773static int pn544_hci_check_presence(struct nfc_shdlc *shdlc, 830static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
774 struct nfc_target *target) 831 struct nfc_target *target)
775{ 832{
776 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
777
778 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 833 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
779 PN544_RF_READER_CMD_PRESENCE_CHECK, 834 PN544_RF_READER_CMD_PRESENCE_CHECK,
780 NULL, 0, NULL); 835 NULL, 0, NULL);
781} 836}
782 837
783static struct nfc_shdlc_ops pn544_shdlc_ops = { 838static struct nfc_hci_ops pn544_hci_ops = {
784 .open = pn544_hci_open, 839 .open = pn544_hci_open,
785 .close = pn544_hci_close, 840 .close = pn544_hci_close,
786 .hci_ready = pn544_hci_ready, 841 .hci_ready = pn544_hci_ready,
@@ -848,8 +903,8 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
848 pn544_hci_platform_init(info); 903 pn544_hci_platform_init(info);
849 904
850 r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn, 905 r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
851 IRQF_TRIGGER_RISING, PN544_HCI_DRIVER_NAME, 906 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
852 info); 907 PN544_HCI_DRIVER_NAME, info);
853 if (r < 0) { 908 if (r < 0) {
854 dev_err(&client->dev, "Unable to register IRQ handler\n"); 909 dev_err(&client->dev, "Unable to register IRQ handler\n");
855 goto err_rti; 910 goto err_rti;
@@ -872,22 +927,30 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
872 NFC_PROTO_ISO14443_B_MASK | 927 NFC_PROTO_ISO14443_B_MASK |
873 NFC_PROTO_NFC_DEP_MASK; 928 NFC_PROTO_NFC_DEP_MASK;
874 929
875 info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops, 930 info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
876 &init_data, protocols, 931 protocols, LLC_SHDLC_NAME,
877 PN544_CMDS_HEADROOM, 0, 932 PN544_FRAME_HEADROOM +
878 PN544_HCI_LLC_MAX_PAYLOAD, 933 PN544_CMDS_HEADROOM,
879 dev_name(&client->dev)); 934 PN544_FRAME_TAILROOM,
880 if (!info->shdlc) { 935 PN544_HCI_LLC_MAX_PAYLOAD);
881 dev_err(&client->dev, "Cannot allocate nfc shdlc.\n"); 936 if (!info->hdev) {
937 dev_err(&client->dev, "Cannot allocate nfc hdev.\n");
882 r = -ENOMEM; 938 r = -ENOMEM;
883 goto err_allocshdlc; 939 goto err_alloc_hdev;
884 } 940 }
885 941
886 nfc_shdlc_set_clientdata(info->shdlc, info); 942 nfc_hci_set_clientdata(info->hdev, info);
943
944 r = nfc_hci_register_device(info->hdev);
945 if (r)
946 goto err_regdev;
887 947
888 return 0; 948 return 0;
889 949
890err_allocshdlc: 950err_regdev:
951 nfc_hci_free_device(info->hdev);
952
953err_alloc_hdev:
891 free_irq(client->irq, info); 954 free_irq(client->irq, info);
892 955
893err_rti: 956err_rti:
@@ -908,7 +971,7 @@ static __devexit int pn544_hci_remove(struct i2c_client *client)
908 971
909 dev_dbg(&client->dev, "%s\n", __func__); 972 dev_dbg(&client->dev, "%s\n", __func__);
910 973
911 nfc_shdlc_free(info->shdlc); 974 nfc_hci_free_device(info->hdev);
912 975
913 if (info->state != PN544_ST_COLD) { 976 if (info->state != PN544_ST_COLD) {
914 if (pdata->disable) 977 if (pdata->disable)
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 1e528b539a07..79f4bce061bd 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -143,10 +143,12 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
143 kt = timespec_to_ktime(ts); 143 kt = timespec_to_ktime(ts);
144 delta = ktime_to_ns(kt); 144 delta = ktime_to_ns(kt);
145 err = ops->adjtime(ops, delta); 145 err = ops->adjtime(ops, delta);
146
147 } else if (tx->modes & ADJ_FREQUENCY) { 146 } else if (tx->modes & ADJ_FREQUENCY) {
148
149 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq)); 147 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
148 ptp->dialed_frequency = tx->freq;
149 } else if (tx->modes == 0) {
150 tx->freq = ptp->dialed_frequency;
151 err = 0;
150 } 152 }
151 153
152 return err; 154 return err;
@@ -180,7 +182,8 @@ static void delete_ptp_clock(struct posix_clock *pc)
180 182
181/* public interface */ 183/* public interface */
182 184
183struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info) 185struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
186 struct device *parent)
184{ 187{
185 struct ptp_clock *ptp; 188 struct ptp_clock *ptp;
186 int err = 0, index, major = MAJOR(ptp_devt); 189 int err = 0, index, major = MAJOR(ptp_devt);
@@ -213,7 +216,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
213 init_waitqueue_head(&ptp->tsev_wq); 216 init_waitqueue_head(&ptp->tsev_wq);
214 217
215 /* Create a new device in our class. */ 218 /* Create a new device in our class. */
216 ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp, 219 ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
217 "ptp%d", ptp->index); 220 "ptp%d", ptp->index);
218 if (IS_ERR(ptp->dev)) 221 if (IS_ERR(ptp->dev))
219 goto no_device; 222 goto no_device;
@@ -300,6 +303,11 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
300 pps_get_ts(&evt); 303 pps_get_ts(&evt);
301 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); 304 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
302 break; 305 break;
306
307 case PTP_CLOCK_PPSUSR:
308 pps_event(ptp->pps_source, &event->pps_times,
309 PTP_PPS_EVENT, NULL);
310 break;
303 } 311 }
304} 312}
305EXPORT_SYMBOL(ptp_clock_event); 313EXPORT_SYMBOL(ptp_clock_event);
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index e03c40692b00..d49b85164fd2 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -298,7 +298,7 @@ static int __init ptp_ixp_init(void)
298 298
299 ixp_clock.caps = ptp_ixp_caps; 299 ixp_clock.caps = ptp_ixp_caps;
300 300
301 ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps); 301 ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps, NULL);
302 302
303 if (IS_ERR(ixp_clock.ptp_clock)) 303 if (IS_ERR(ixp_clock.ptp_clock))
304 return PTR_ERR(ixp_clock.ptp_clock); 304 return PTR_ERR(ixp_clock.ptp_clock);
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 3a9c17eced10..e624e4dd2abb 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -627,7 +627,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
627 } 627 }
628 628
629 chip->caps = ptp_pch_caps; 629 chip->caps = ptp_pch_caps;
630 chip->ptp_clock = ptp_clock_register(&chip->caps); 630 chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
631 631
632 if (IS_ERR(chip->ptp_clock)) 632 if (IS_ERR(chip->ptp_clock))
633 return PTR_ERR(chip->ptp_clock); 633 return PTR_ERR(chip->ptp_clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 4d5b5082c3b1..69d32070cc65 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -45,6 +45,7 @@ struct ptp_clock {
45 dev_t devid; 45 dev_t devid;
46 int index; /* index into clocks.map */ 46 int index; /* index into clocks.map */
47 struct pps_device *pps_source; 47 struct pps_device *pps_source;
48 long dialed_frequency; /* remembers the frequency adjustment */
48 struct timestamp_event_queue tsevq; /* simple fifo for time stamps */ 49 struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
49 struct mutex tsevq_mux; /* one process at a time reading the fifo */ 50 struct mutex tsevq_mux; /* one process at a time reading the fifo */
50 wait_queue_head_t tsev_wq; 51 wait_queue_head_t tsev_wq;
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index d4ade9e92fbb..fb92524d24ef 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1523,7 +1523,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1523 goto done; 1523 goto done;
1524 default: 1524 default:
1525 break; 1525 break;
1526 }; 1526 }
1527 1527
1528 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1528 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1529 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1529 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 5227e5734a9d..98ea9cc6f1aa 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1454,7 +1454,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1454 ch_fsm_len, GFP_KERNEL); 1454 ch_fsm_len, GFP_KERNEL);
1455 } 1455 }
1456 if (ch->fsm == NULL) 1456 if (ch->fsm == NULL)
1457 goto free_return; 1457 goto nomem_return;
1458 1458
1459 fsm_newstate(ch->fsm, CTC_STATE_IDLE); 1459 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1460 1460
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a3adf4b1c60d..2ca0f1dd7a00 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -282,7 +282,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
282 282
283 LCS_DBF_TEXT(3, setup, "iwritccw"); 283 LCS_DBF_TEXT(3, setup, "iwritccw");
284 /* Setup write ccws. */ 284 /* Setup write ccws. */
285 memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1); 285 memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
286 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 286 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
287 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; 287 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
288 card->write.ccws[cnt].count = 0; 288 card->write.ccws[cnt].count = 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cf6da7fafe54..3e25d3150456 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -489,7 +489,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
489 atomic_set(&reply->refcnt, 1); 489 atomic_set(&reply->refcnt, 1);
490 atomic_set(&reply->received, 0); 490 atomic_set(&reply->received, 0);
491 reply->card = card; 491 reply->card = card;
492 }; 492 }
493 return reply; 493 return reply;
494} 494}
495 495
@@ -1257,7 +1257,30 @@ static void qeth_clean_channel(struct qeth_channel *channel)
1257 kfree(channel->iob[cnt].data); 1257 kfree(channel->iob[cnt].data);
1258} 1258}
1259 1259
1260static void qeth_get_channel_path_desc(struct qeth_card *card) 1260static void qeth_set_single_write_queues(struct qeth_card *card)
1261{
1262 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1263 (card->qdio.no_out_queues == 4))
1264 qeth_free_qdio_buffers(card);
1265
1266 card->qdio.no_out_queues = 1;
1267 if (card->qdio.default_out_queue != 0)
1268 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1269
1270 card->qdio.default_out_queue = 0;
1271}
1272
1273static void qeth_set_multiple_write_queues(struct qeth_card *card)
1274{
1275 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1276 (card->qdio.no_out_queues == 1)) {
1277 qeth_free_qdio_buffers(card);
1278 card->qdio.default_out_queue = 2;
1279 }
1280 card->qdio.no_out_queues = 4;
1281}
1282
1283static void qeth_update_from_chp_desc(struct qeth_card *card)
1261{ 1284{
1262 struct ccw_device *ccwdev; 1285 struct ccw_device *ccwdev;
1263 struct channelPath_dsc { 1286 struct channelPath_dsc {
@@ -1274,38 +1297,23 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
1274 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 1297 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1275 1298
1276 ccwdev = card->data.ccwdev; 1299 ccwdev = card->data.ccwdev;
1277 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); 1300 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1278 if (chp_dsc != NULL) { 1301 if (!chp_dsc)
1279 if (card->info.type != QETH_CARD_TYPE_IQD) { 1302 goto out;
1280 /* CHPP field bit 6 == 1 -> single queue */ 1303
1281 if ((chp_dsc->chpp & 0x02) == 0x02) { 1304 card->info.func_level = 0x4100 + chp_dsc->desc;
1282 if ((atomic_read(&card->qdio.state) != 1305 if (card->info.type == QETH_CARD_TYPE_IQD)
1283 QETH_QDIO_UNINITIALIZED) && 1306 goto out;
1284 (card->qdio.no_out_queues == 4)) 1307
1285 /* change from 4 to 1 outbound queues */ 1308 /* CHPP field bit 6 == 1 -> single queue */
1286 qeth_free_qdio_buffers(card); 1309 if ((chp_dsc->chpp & 0x02) == 0x02)
1287 card->qdio.no_out_queues = 1; 1310 qeth_set_single_write_queues(card);
1288 if (card->qdio.default_out_queue != 0) 1311 else
1289 dev_info(&card->gdev->dev, 1312 qeth_set_multiple_write_queues(card);
1290 "Priority Queueing not supported\n"); 1313out:
1291 card->qdio.default_out_queue = 0; 1314 kfree(chp_dsc);
1292 } else {
1293 if ((atomic_read(&card->qdio.state) !=
1294 QETH_QDIO_UNINITIALIZED) &&
1295 (card->qdio.no_out_queues == 1)) {
1296 /* change from 1 to 4 outbound queues */
1297 qeth_free_qdio_buffers(card);
1298 card->qdio.default_out_queue = 2;
1299 }
1300 card->qdio.no_out_queues = 4;
1301 }
1302 }
1303 card->info.func_level = 0x4100 + chp_dsc->desc;
1304 kfree(chp_dsc);
1305 }
1306 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1315 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1307 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1316 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1308 return;
1309} 1317}
1310 1318
1311static void qeth_init_qdio_info(struct qeth_card *card) 1319static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1473,7 +1481,7 @@ static int qeth_determine_card_type(struct qeth_card *card)
1473 card->qdio.no_in_queues = 1; 1481 card->qdio.no_in_queues = 1;
1474 card->info.is_multicast_different = 1482 card->info.is_multicast_different =
1475 known_devices[i][QETH_MULTICAST_IND]; 1483 known_devices[i][QETH_MULTICAST_IND];
1476 qeth_get_channel_path_desc(card); 1484 qeth_update_from_chp_desc(card);
1477 return 0; 1485 return 0;
1478 } 1486 }
1479 i++; 1487 i++;
@@ -2029,7 +2037,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2029 if (time_after(jiffies, timeout)) 2037 if (time_after(jiffies, timeout))
2030 goto time_err; 2038 goto time_err;
2031 cpu_relax(); 2039 cpu_relax();
2032 }; 2040 }
2033 } 2041 }
2034 2042
2035 if (reply->rc == -EIO) 2043 if (reply->rc == -EIO)
@@ -4735,7 +4743,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
4735 4743
4736 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 4744 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
4737 atomic_set(&card->force_alloc_skb, 0); 4745 atomic_set(&card->force_alloc_skb, 0);
4738 qeth_get_channel_path_desc(card); 4746 qeth_update_from_chp_desc(card);
4739retry: 4747retry:
4740 if (retries) 4748 if (retries)
4741 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 4749 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c5f03fa70fba..4cd310cb5bdf 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -794,6 +794,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
794 rc = -EEXIST; 794 rc = -EEXIST;
795 spin_unlock_irqrestore(&card->ip_lock, flags); 795 spin_unlock_irqrestore(&card->ip_lock, flags);
796 if (rc) { 796 if (rc) {
797 kfree(ipaddr);
797 return rc; 798 return rc;
798 } 799 }
799 if (!qeth_l3_add_ip(card, ipaddr)) 800 if (!qeth_l3_add_ip(card, ipaddr))
@@ -858,6 +859,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
858 rc = -EEXIST; 859 rc = -EEXIST;
859 spin_unlock_irqrestore(&card->ip_lock, flags); 860 spin_unlock_irqrestore(&card->ip_lock, flags);
860 if (rc) { 861 if (rc) {
862 kfree(ipaddr);
861 return rc; 863 return rc;
862 } 864 }
863 if (!qeth_l3_add_ip(card, ipaddr)) 865 if (!qeth_l3_add_ip(card, ipaddr))
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 8818dd681c19..65123a21b97e 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -33,40 +33,6 @@
33struct sock *scsi_nl_sock = NULL; 33struct sock *scsi_nl_sock = NULL;
34EXPORT_SYMBOL_GPL(scsi_nl_sock); 34EXPORT_SYMBOL_GPL(scsi_nl_sock);
35 35
36static DEFINE_SPINLOCK(scsi_nl_lock);
37static struct list_head scsi_nl_drivers;
38
39static u32 scsi_nl_state;
40#define STATE_EHANDLER_BSY 0x00000001
41
42struct scsi_nl_transport {
43 int (*msg_handler)(struct sk_buff *);
44 void (*event_handler)(struct notifier_block *, unsigned long, void *);
45 unsigned int refcnt;
46 int flags;
47};
48
49/* flags values (bit flags) */
50#define HANDLER_DELETING 0x1
51
52static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
53 { {NULL, }, };
54
55
56struct scsi_nl_drvr {
57 struct list_head next;
58 int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
59 u32 len, u32 pid);
60 void (*devt_handler)(struct notifier_block *nb,
61 unsigned long event, void *notify_ptr);
62 struct scsi_host_template *hostt;
63 u64 vendor_id;
64 unsigned int refcnt;
65 int flags;
66};
67
68
69
70/** 36/**
71 * scsi_nl_rcv_msg - Receive message handler. 37 * scsi_nl_rcv_msg - Receive message handler.
72 * @skb: socket receive buffer 38 * @skb: socket receive buffer
@@ -81,7 +47,6 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
81{ 47{
82 struct nlmsghdr *nlh; 48 struct nlmsghdr *nlh;
83 struct scsi_nl_hdr *hdr; 49 struct scsi_nl_hdr *hdr;
84 unsigned long flags;
85 u32 rlen; 50 u32 rlen;
86 int err, tport; 51 int err, tport;
87 52
@@ -126,22 +91,24 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
126 /* 91 /*
127 * Deliver message to the appropriate transport 92 * Deliver message to the appropriate transport
128 */ 93 */
129 spin_lock_irqsave(&scsi_nl_lock, flags);
130
131 tport = hdr->transport; 94 tport = hdr->transport;
132 if ((tport < SCSI_NL_MAX_TRANSPORTS) && 95 if (tport == SCSI_NL_TRANSPORT) {
133 !(transports[tport].flags & HANDLER_DELETING) && 96 switch (hdr->msgtype) {
134 (transports[tport].msg_handler)) { 97 case SCSI_NL_SHOST_VENDOR:
135 transports[tport].refcnt++; 98 /* Locate the driver that corresponds to the message */
136 spin_unlock_irqrestore(&scsi_nl_lock, flags); 99 err = -ESRCH;
137 err = transports[tport].msg_handler(skb); 100 break;
138 spin_lock_irqsave(&scsi_nl_lock, flags); 101 default:
139 transports[tport].refcnt--; 102 err = -EBADR;
140 } else 103 break;
104 }
105 if (err)
106 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
107 __func__, hdr->msgtype, err);
108 }
109 else
141 err = -ENOENT; 110 err = -ENOENT;
142 111
143 spin_unlock_irqrestore(&scsi_nl_lock, flags);
144
145next_msg: 112next_msg:
146 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) 113 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
147 netlink_ack(skb, nlh, err); 114 netlink_ack(skb, nlh, err);
@@ -150,333 +117,6 @@ next_msg:
150 } 117 }
151} 118}
152 119
153
154/**
155 * scsi_nl_rcv_event - Event handler for a netlink socket.
156 * @this: event notifier block
157 * @event: event type
158 * @ptr: event payload
159 *
160 **/
161static int
162scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
163{
164 struct netlink_notify *n = ptr;
165 struct scsi_nl_drvr *driver;
166 unsigned long flags;
167 int tport;
168
169 if (n->protocol != NETLINK_SCSITRANSPORT)
170 return NOTIFY_DONE;
171
172 spin_lock_irqsave(&scsi_nl_lock, flags);
173 scsi_nl_state |= STATE_EHANDLER_BSY;
174
175 /*
176 * Pass event on to any transports that may be listening
177 */
178 for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
179 if (!(transports[tport].flags & HANDLER_DELETING) &&
180 (transports[tport].event_handler)) {
181 spin_unlock_irqrestore(&scsi_nl_lock, flags);
182 transports[tport].event_handler(this, event, ptr);
183 spin_lock_irqsave(&scsi_nl_lock, flags);
184 }
185 }
186
187 /*
188 * Pass event on to any drivers that may be listening
189 */
190 list_for_each_entry(driver, &scsi_nl_drivers, next) {
191 if (!(driver->flags & HANDLER_DELETING) &&
192 (driver->devt_handler)) {
193 spin_unlock_irqrestore(&scsi_nl_lock, flags);
194 driver->devt_handler(this, event, ptr);
195 spin_lock_irqsave(&scsi_nl_lock, flags);
196 }
197 }
198
199 scsi_nl_state &= ~STATE_EHANDLER_BSY;
200 spin_unlock_irqrestore(&scsi_nl_lock, flags);
201
202 return NOTIFY_DONE;
203}
204
205static struct notifier_block scsi_netlink_notifier = {
206 .notifier_call = scsi_nl_rcv_event,
207};
208
209
210/*
211 * GENERIC SCSI transport receive and event handlers
212 */
213
214/**
215 * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
216 * @skb: socket receive buffer
217 **/
218static int
219scsi_generic_msg_handler(struct sk_buff *skb)
220{
221 struct nlmsghdr *nlh = nlmsg_hdr(skb);
222 struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
223 struct scsi_nl_drvr *driver;
224 struct Scsi_Host *shost;
225 unsigned long flags;
226 int err = 0, match, pid;
227
228 pid = NETLINK_CREDS(skb)->pid;
229
230 switch (snlh->msgtype) {
231 case SCSI_NL_SHOST_VENDOR:
232 {
233 struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
234
235 /* Locate the driver that corresponds to the message */
236 spin_lock_irqsave(&scsi_nl_lock, flags);
237 match = 0;
238 list_for_each_entry(driver, &scsi_nl_drivers, next) {
239 if (driver->vendor_id == msg->vendor_id) {
240 match = 1;
241 break;
242 }
243 }
244
245 if ((!match) || (!driver->dmsg_handler)) {
246 spin_unlock_irqrestore(&scsi_nl_lock, flags);
247 err = -ESRCH;
248 goto rcv_exit;
249 }
250
251 if (driver->flags & HANDLER_DELETING) {
252 spin_unlock_irqrestore(&scsi_nl_lock, flags);
253 err = -ESHUTDOWN;
254 goto rcv_exit;
255 }
256
257 driver->refcnt++;
258 spin_unlock_irqrestore(&scsi_nl_lock, flags);
259
260
261 /* if successful, scsi_host_lookup takes a shost reference */
262 shost = scsi_host_lookup(msg->host_no);
263 if (!shost) {
264 err = -ENODEV;
265 goto driver_exit;
266 }
267
268 /* is this host owned by the vendor ? */
269 if (shost->hostt != driver->hostt) {
270 err = -EINVAL;
271 goto vendormsg_put;
272 }
273
274 /* pass message on to the driver */
275 err = driver->dmsg_handler(shost, (void *)&msg[1],
276 msg->vmsg_datalen, pid);
277
278vendormsg_put:
279 /* release reference by scsi_host_lookup */
280 scsi_host_put(shost);
281
282driver_exit:
283 /* release our own reference on the registration object */
284 spin_lock_irqsave(&scsi_nl_lock, flags);
285 driver->refcnt--;
286 spin_unlock_irqrestore(&scsi_nl_lock, flags);
287 break;
288 }
289
290 default:
291 err = -EBADR;
292 break;
293 }
294
295rcv_exit:
296 if (err)
297 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
298 __func__, snlh->msgtype, err);
299 return err;
300}
301
302
303/**
304 * scsi_nl_add_transport -
305 * Registers message and event handlers for a transport. Enables
306 * receipt of netlink messages and events to a transport.
307 *
308 * @tport: transport registering handlers
309 * @msg_handler: receive message handler callback
310 * @event_handler: receive event handler callback
311 **/
312int
313scsi_nl_add_transport(u8 tport,
314 int (*msg_handler)(struct sk_buff *),
315 void (*event_handler)(struct notifier_block *, unsigned long, void *))
316{
317 unsigned long flags;
318 int err = 0;
319
320 if (tport >= SCSI_NL_MAX_TRANSPORTS)
321 return -EINVAL;
322
323 spin_lock_irqsave(&scsi_nl_lock, flags);
324
325 if (scsi_nl_state & STATE_EHANDLER_BSY) {
326 spin_unlock_irqrestore(&scsi_nl_lock, flags);
327 msleep(1);
328 spin_lock_irqsave(&scsi_nl_lock, flags);
329 }
330
331 if (transports[tport].msg_handler || transports[tport].event_handler) {
332 err = -EALREADY;
333 goto register_out;
334 }
335
336 transports[tport].msg_handler = msg_handler;
337 transports[tport].event_handler = event_handler;
338 transports[tport].flags = 0;
339 transports[tport].refcnt = 0;
340
341register_out:
342 spin_unlock_irqrestore(&scsi_nl_lock, flags);
343
344 return err;
345}
346EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
347
348
349/**
350 * scsi_nl_remove_transport -
351 * Disable transport receiption of messages and events
352 *
353 * @tport: transport deregistering handlers
354 *
355 **/
356void
357scsi_nl_remove_transport(u8 tport)
358{
359 unsigned long flags;
360
361 spin_lock_irqsave(&scsi_nl_lock, flags);
362 if (scsi_nl_state & STATE_EHANDLER_BSY) {
363 spin_unlock_irqrestore(&scsi_nl_lock, flags);
364 msleep(1);
365 spin_lock_irqsave(&scsi_nl_lock, flags);
366 }
367
368 if (tport < SCSI_NL_MAX_TRANSPORTS) {
369 transports[tport].flags |= HANDLER_DELETING;
370
371 while (transports[tport].refcnt != 0) {
372 spin_unlock_irqrestore(&scsi_nl_lock, flags);
373 schedule_timeout_uninterruptible(HZ/4);
374 spin_lock_irqsave(&scsi_nl_lock, flags);
375 }
376 transports[tport].msg_handler = NULL;
377 transports[tport].event_handler = NULL;
378 transports[tport].flags = 0;
379 }
380
381 spin_unlock_irqrestore(&scsi_nl_lock, flags);
382
383 return;
384}
385EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
386
387
388/**
389 * scsi_nl_add_driver -
390 * A driver is registering its interfaces for SCSI netlink messages
391 *
392 * @vendor_id: A unique identification value for the driver.
393 * @hostt: address of the driver's host template. Used
394 * to verify an shost is bound to the driver
395 * @nlmsg_handler: receive message handler callback
396 * @nlevt_handler: receive event handler callback
397 *
398 * Returns:
399 * 0 on Success
400 * error result otherwise
401 **/
402int
403scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
404 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
405 u32 len, u32 pid),
406 void (*nlevt_handler)(struct notifier_block *nb,
407 unsigned long event, void *notify_ptr))
408{
409 struct scsi_nl_drvr *driver;
410 unsigned long flags;
411
412 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
413 if (unlikely(!driver)) {
414 printk(KERN_ERR "%s: allocation failure\n", __func__);
415 return -ENOMEM;
416 }
417
418 driver->dmsg_handler = nlmsg_handler;
419 driver->devt_handler = nlevt_handler;
420 driver->hostt = hostt;
421 driver->vendor_id = vendor_id;
422
423 spin_lock_irqsave(&scsi_nl_lock, flags);
424 if (scsi_nl_state & STATE_EHANDLER_BSY) {
425 spin_unlock_irqrestore(&scsi_nl_lock, flags);
426 msleep(1);
427 spin_lock_irqsave(&scsi_nl_lock, flags);
428 }
429 list_add_tail(&driver->next, &scsi_nl_drivers);
430 spin_unlock_irqrestore(&scsi_nl_lock, flags);
431
432 return 0;
433}
434EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
435
436
437/**
438 * scsi_nl_remove_driver -
439 * An driver is unregistering with the SCSI netlink messages
440 *
441 * @vendor_id: The unique identification value for the driver.
442 **/
443void
444scsi_nl_remove_driver(u64 vendor_id)
445{
446 struct scsi_nl_drvr *driver;
447 unsigned long flags;
448
449 spin_lock_irqsave(&scsi_nl_lock, flags);
450 if (scsi_nl_state & STATE_EHANDLER_BSY) {
451 spin_unlock_irqrestore(&scsi_nl_lock, flags);
452 msleep(1);
453 spin_lock_irqsave(&scsi_nl_lock, flags);
454 }
455
456 list_for_each_entry(driver, &scsi_nl_drivers, next) {
457 if (driver->vendor_id == vendor_id) {
458 driver->flags |= HANDLER_DELETING;
459 while (driver->refcnt != 0) {
460 spin_unlock_irqrestore(&scsi_nl_lock, flags);
461 schedule_timeout_uninterruptible(HZ/4);
462 spin_lock_irqsave(&scsi_nl_lock, flags);
463 }
464 list_del(&driver->next);
465 kfree(driver);
466 spin_unlock_irqrestore(&scsi_nl_lock, flags);
467 return;
468 }
469 }
470
471 spin_unlock_irqrestore(&scsi_nl_lock, flags);
472
473 printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
474 __func__, (unsigned long long)vendor_id);
475 return;
476}
477EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
478
479
480/** 120/**
481 * scsi_netlink_init - Called by SCSI subsystem to initialize 121 * scsi_netlink_init - Called by SCSI subsystem to initialize
482 * the SCSI transport netlink interface 122 * the SCSI transport netlink interface
@@ -485,36 +125,19 @@ EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
485void 125void
486scsi_netlink_init(void) 126scsi_netlink_init(void)
487{ 127{
488 int error;
489 struct netlink_kernel_cfg cfg = { 128 struct netlink_kernel_cfg cfg = {
490 .input = scsi_nl_rcv_msg, 129 .input = scsi_nl_rcv_msg,
491 .groups = SCSI_NL_GRP_CNT, 130 .groups = SCSI_NL_GRP_CNT,
492 }; 131 };
493 132
494 INIT_LIST_HEAD(&scsi_nl_drivers);
495
496 error = netlink_register_notifier(&scsi_netlink_notifier);
497 if (error) {
498 printk(KERN_ERR "%s: register of event handler failed - %d\n",
499 __func__, error);
500 return;
501 }
502
503 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, 133 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
504 THIS_MODULE, &cfg); 134 &cfg);
505 if (!scsi_nl_sock) { 135 if (!scsi_nl_sock) {
506 printk(KERN_ERR "%s: register of receive handler failed\n", 136 printk(KERN_ERR "%s: register of receive handler failed\n",
507 __func__); 137 __func__);
508 netlink_unregister_notifier(&scsi_netlink_notifier);
509 return; 138 return;
510 } 139 }
511 140
512 /* Register the entry points for the generic SCSI transport */
513 error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
514 scsi_generic_msg_handler, NULL);
515 if (error)
516 printk(KERN_ERR "%s: register of GENERIC transport handler"
517 " failed - %d\n", __func__, error);
518 return; 141 return;
519} 142}
520 143
@@ -526,158 +149,10 @@ scsi_netlink_init(void)
526void 149void
527scsi_netlink_exit(void) 150scsi_netlink_exit(void)
528{ 151{
529 scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
530
531 if (scsi_nl_sock) { 152 if (scsi_nl_sock) {
532 netlink_kernel_release(scsi_nl_sock); 153 netlink_kernel_release(scsi_nl_sock);
533 netlink_unregister_notifier(&scsi_netlink_notifier);
534 } 154 }
535 155
536 return; 156 return;
537} 157}
538 158
539
540/*
541 * Exported Interfaces
542 */
543
544/**
545 * scsi_nl_send_transport_msg -
546 * Generic function to send a single message from a SCSI transport to
547 * a single process
548 *
549 * @pid: receiving pid
550 * @hdr: message payload
551 *
552 **/
553void
554scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
555{
556 struct sk_buff *skb;
557 struct nlmsghdr *nlh;
558 const char *fn;
559 char *datab;
560 u32 len, skblen;
561 int err;
562
563 if (!scsi_nl_sock) {
564 err = -ENOENT;
565 fn = "netlink socket";
566 goto msg_fail;
567 }
568
569 len = NLMSG_SPACE(hdr->msglen);
570 skblen = NLMSG_SPACE(len);
571
572 skb = alloc_skb(skblen, GFP_KERNEL);
573 if (!skb) {
574 err = -ENOBUFS;
575 fn = "alloc_skb";
576 goto msg_fail;
577 }
578
579 nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
580 if (!nlh) {
581 err = -ENOBUFS;
582 fn = "nlmsg_put";
583 goto msg_fail_skb;
584 }
585 datab = NLMSG_DATA(nlh);
586 memcpy(datab, hdr, hdr->msglen);
587
588 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
589 if (err < 0) {
590 fn = "nlmsg_unicast";
591 /* nlmsg_unicast already kfree_skb'd */
592 goto msg_fail;
593 }
594
595 return;
596
597msg_fail_skb:
598 kfree_skb(skb);
599msg_fail:
600 printk(KERN_WARNING
601 "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
602 "msglen %d: %s : err %d\n",
603 __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
604 fn, err);
605 return;
606}
607EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
608
609
610/**
611 * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
612 * to a specific process id.
613 *
614 * @pid: process id of the receiver
615 * @host_no: host # sending the message
616 * @vendor_id: unique identifier for the driver's vendor
617 * @data_len: amount, in bytes, of vendor unique payload data
618 * @data_buf: pointer to vendor unique data buffer
619 *
620 * Returns:
621 * 0 on successful return
622 * otherwise, failing error code
623 *
624 * Notes:
625 * This routine assumes no locks are held on entry.
626 */
627int
628scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
629 char *data_buf, u32 data_len)
630{
631 struct sk_buff *skb;
632 struct nlmsghdr *nlh;
633 struct scsi_nl_host_vendor_msg *msg;
634 u32 len, skblen;
635 int err;
636
637 if (!scsi_nl_sock) {
638 err = -ENOENT;
639 goto send_vendor_fail;
640 }
641
642 len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
643 skblen = NLMSG_SPACE(len);
644
645 skb = alloc_skb(skblen, GFP_KERNEL);
646 if (!skb) {
647 err = -ENOBUFS;
648 goto send_vendor_fail;
649 }
650
651 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
652 skblen - sizeof(*nlh), 0);
653 if (!nlh) {
654 err = -ENOBUFS;
655 goto send_vendor_fail_skb;
656 }
657 msg = NLMSG_DATA(nlh);
658
659 INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
660 SCSI_NL_SHOST_VENDOR, len);
661 msg->vendor_id = vendor_id;
662 msg->host_no = host_no;
663 msg->vmsg_datalen = data_len; /* bytes */
664 memcpy(&msg[1], data_buf, data_len);
665
666 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
667 if (err)
668 /* nlmsg_multicast already kfree_skb'd */
669 goto send_vendor_fail;
670
671 return 0;
672
673send_vendor_fail_skb:
674 kfree_skb(skb);
675send_vendor_fail:
676 printk(KERN_WARNING
677 "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
678 __func__, host_no, err);
679 return err;
680}
681EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
682
683
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fa1dfaa83e32..31969f2e13ce 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2119,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2119 switch (nlh->nlmsg_type) { 2119 switch (nlh->nlmsg_type) {
2120 case ISCSI_UEVENT_CREATE_SESSION: 2120 case ISCSI_UEVENT_CREATE_SESSION:
2121 err = iscsi_if_create_session(priv, ep, ev, 2121 err = iscsi_if_create_session(priv, ep, ev,
2122 NETLINK_CB(skb).pid, 2122 NETLINK_CB(skb).portid,
2123 ev->u.c_session.initial_cmdsn, 2123 ev->u.c_session.initial_cmdsn,
2124 ev->u.c_session.cmds_max, 2124 ev->u.c_session.cmds_max,
2125 ev->u.c_session.queue_depth); 2125 ev->u.c_session.queue_depth);
@@ -2132,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2132 } 2132 }
2133 2133
2134 err = iscsi_if_create_session(priv, ep, ev, 2134 err = iscsi_if_create_session(priv, ep, ev,
2135 NETLINK_CB(skb).pid, 2135 NETLINK_CB(skb).portid,
2136 ev->u.c_bound_session.initial_cmdsn, 2136 ev->u.c_bound_session.initial_cmdsn,
2137 ev->u.c_bound_session.cmds_max, 2137 ev->u.c_bound_session.cmds_max,
2138 ev->u.c_bound_session.queue_depth); 2138 ev->u.c_bound_session.queue_depth);
@@ -2969,8 +2969,7 @@ static __init int iscsi_transport_init(void)
2969 if (err) 2969 if (err)
2970 goto unregister_conn_class; 2970 goto unregister_conn_class;
2971 2971
2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
2973 THIS_MODULE, &cfg);
2974 if (!nls) { 2973 if (!nls) {
2975 err = -ENOBUFS; 2974 err = -ENOBUFS;
2976 goto unregister_session_class; 2975 goto unregister_session_class;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 7e2ddc042f5b..c6250867a95d 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -190,16 +190,30 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
190{ 190{
191 struct ssb_bus *bus = mcore->dev->bus; 191 struct ssb_bus *bus = mcore->dev->bus;
192 192
193 mcore->flash_buswidth = 2; 193 /* When there is no chipcommon on the bus there is 4MB flash */
194 if (bus->chipco.dev) { 194 if (!bus->chipco.dev) {
195 mcore->flash_window = 0x1c000000; 195 mcore->flash_buswidth = 2;
196 mcore->flash_window_size = 0x02000000; 196 mcore->flash_window = SSB_FLASH1;
197 mcore->flash_window_size = SSB_FLASH1_SZ;
198 return;
199 }
200
201 /* There is ChipCommon, so use it to read info about flash */
202 switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
203 case SSB_CHIPCO_FLASHT_STSER:
204 case SSB_CHIPCO_FLASHT_ATSER:
205 pr_err("Serial flash not supported\n");
206 break;
207 case SSB_CHIPCO_FLASHT_PARA:
208 pr_debug("Found parallel flash\n");
209 mcore->flash_window = SSB_FLASH2;
210 mcore->flash_window_size = SSB_FLASH2_SZ;
197 if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) 211 if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
198 & SSB_CHIPCO_CFG_DS16) == 0) 212 & SSB_CHIPCO_CFG_DS16) == 0)
199 mcore->flash_buswidth = 1; 213 mcore->flash_buswidth = 1;
200 } else { 214 else
201 mcore->flash_window = 0x1fc00000; 215 mcore->flash_buswidth = 2;
202 mcore->flash_window_size = 0x00400000; 216 break;
203 } 217 }
204} 218}
205 219
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 3abb31df8f28..20d0aec52e72 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -95,7 +95,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
95 init_MUTEX(&netlink_mutex); 95 init_MUTEX(&netlink_mutex);
96#endif 96#endif
97 97
98 sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg); 98 sock = netlink_kernel_create(&init_net, unit, &cfg);
99 99
100 if (sock) 100 if (sock)
101 rcv_cb = cb; 101 rcv_cb = cb;
@@ -135,7 +135,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
135 } 135 }
136 memcpy(nlmsg_data(nlh), msg, len); 136 memcpy(nlmsg_data(nlh), msg, len);
137 137
138 NETLINK_CB(skb).pid = 0; 138 NETLINK_CB(skb).portid = 0;
139 NETLINK_CB(skb).dst_group = 0; 139 NETLINK_CB(skb).dst_group = 0;
140 140
141 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC); 141 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 0ca857ac473e..48aa1361903e 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -119,7 +119,9 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
119 *total_flags = new_flags; 119 *total_flags = new_flags;
120} 120}
121 121
122static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 122static void wbsoft_tx(struct ieee80211_hw *dev,
123 struct ieee80211_tx_control *control,
124 struct sk_buff *skb)
123{ 125{
124 struct wbsoft_priv *priv = dev->priv; 126 struct wbsoft_priv *priv = dev->priv;
125 127