aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 17:54:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 17:54:29 -0400
commitcc998ff8811530be521f6b316f37ab7676a07938 (patch)
treea054b3bf4b2ef406bf756a6cfc9be2f9115f17ae /drivers
parent57d730924d5cc2c3e280af16a9306587c3a511db (diff)
parent0d40f75bdab241868c0eb6f97aef9f8b3a66f7b3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: "Noteworthy changes this time around: 1) Multicast rejoin support for team driver, from Jiri Pirko. 2) Centralize and simplify TCP RTT measurement handling in order to reduce the impact of bad RTO seeding from SYN/ACKs. Also, when both timestamps and local RTT measurements are available prefer the later because there are broken middleware devices which scramble the timestamp. From Yuchung Cheng. 3) Add TCP_NOTSENT_LOWAT socket option to limit the amount of kernel memory consumed to queue up unsend user data. From Eric Dumazet. 4) Add a "physical port ID" abstraction for network devices, from Jiri Pirko. 5) Add a "suppress" operation to influence fib_rules lookups, from Stefan Tomanek. 6) Add a networking development FAQ, from Paul Gortmaker. 7) Extend the information provided by tcp_probe and add ipv6 support, from Daniel Borkmann. 8) Use RCU locking more extensively in openvswitch data paths, from Pravin B Shelar. 9) Add SCTP support to openvswitch, from Joe Stringer. 10) Add EF10 chip support to SFC driver, from Ben Hutchings. 11) Add new SYNPROXY netfilter target, from Patrick McHardy. 12) Compute a rate approximation for sending in TCP sockets, and use this to more intelligently coalesce TSO frames. Furthermore, add a new packet scheduler which takes advantage of this estimate when available. From Eric Dumazet. 13) Allow AF_PACKET fanouts with random selection, from Daniel Borkmann. 14) Add ipv6 support to vxlan driver, from Cong Wang" Resolved conflicts as per discussion. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1218 commits) openvswitch: Fix alignment of struct sw_flow_key. netfilter: Fix build errors with xt_socket.c tcp: Add missing braces to do_tcp_setsockopt caif: Add missing braces to multiline if in cfctrl_linkup_request bnx2x: Add missing braces in bnx2x:bnx2x_link_initialize vxlan: Fix kernel panic on device delete. net: mvneta: implement ->ndo_do_ioctl() to support PHY ioctls net: mvneta: properly disable HW PHY polling and ensure adjust_link() works icplus: Use netif_running to determine device state ethernet/arc/arc_emac: Fix huge delays in large file copies tuntap: orphan frags before trying to set tx timestamp tuntap: purge socket error queue on detach qlcnic: use standard NAPI weights ipv6:introduce function to find route for redirect bnx2x: VF RSS support - VF side bnx2x: VF RSS support - PF side vxlan: Notify drivers for listening UDP port changes net: usbnet: update addr_assign_type if appropriate driver/net: enic: update enic maintainers and driver driver/net: enic: Exposing symbols for Cisco's low latency driver ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/he.c11
-rw-r--r--drivers/atm/nicstar.c26
-rw-r--r--drivers/bcma/Kconfig10
-rw-r--r--drivers/bcma/driver_pci.c65
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/bcma/scan.c28
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/net/bonding/bond_3ad.c52
-rw-r--r--drivers/net/bonding/bond_alb.c144
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c961
-rw-r--r--drivers/net/bonding/bond_procfs.c12
-rw-r--r--drivers/net/bonding/bond_sysfs.c90
-rw-r--r--drivers/net/bonding/bonding.h96
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/flexcan.c83
-rw-r--r--drivers/net/can/mcp251x.c98
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c23
-rw-r--r--drivers/net/can/mscan/mscan.c25
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c6
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c317
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c93
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c309
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c396
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h41
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c189
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h69
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c146
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h12
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c195
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h55
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c48
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h30
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c257
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c329
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h176
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c8
-rw-r--r--drivers/net/ethernet/dlink/sundance.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h76
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c503
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h97
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c777
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c245
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c21
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c168
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c74
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h19
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c15
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c107
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c140
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c130
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c198
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h42
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c155
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c80
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c132
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c148
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c321
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c157
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c180
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c542
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c90
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c177
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h13
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/moxa/Kconfig30
-rw-r--r--drivers/net/ethernet/moxa/Makefile5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c559
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h330
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c213
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c67
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c98
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c20
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h304
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c743
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c292
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c237
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c223
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c454
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c165
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c71
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h10
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3043
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c500
-rw-r--r--drivers/net/ethernet/sfc/efx.h129
-rw-r--r--drivers/net/ethernet/sfc/enum.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c399
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1171
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c362
-rw-r--r--drivers/net/ethernet/sfc/farch.c2942
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h (renamed from drivers/net/ethernet/sfc/regs.h)272
-rw-r--r--drivers/net/ethernet/sfc/filter.c1274
-rw-r--r--drivers/net/ethernet/sfc/filter.h238
-rw-r--r--drivers/net/ethernet/sfc/io.h50
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1262
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h313
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c274
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h5540
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c (renamed from drivers/net/ethernet/sfc/mcdi_phy.c)345
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c634
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h408
-rw-r--r--drivers/net/ethernet/sfc/nic.c1902
-rw-r--r--drivers/net/ethernet/sfc/nic.h539
-rw-r--r--drivers/net/ethernet/sfc/phy.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c95
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c176
-rw-r--r--drivers/net/ethernet/sfc/selftest.c15
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c711
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c102
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c35
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h22
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis190.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c28
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c263
-rw-r--r--drivers/net/ethernet/ti/cpsw.h42
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c7
-rw-r--r--drivers/net/ethernet/tile/Kconfig11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1116
-rw-r--r--drivers/net/ethernet/tile/tilepro.c241
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/defxx.c6
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/nsc-ircc.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c8
-rw-r--r--drivers/net/irda/via-ircc.c8
-rw-r--r--drivers/net/irda/w83977af_ir.c8
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/macvtap.c105
-rw-r--r--drivers/net/netconsole.c13
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-octeon.c4
-rw-r--r--drivers/net/phy/mdio-sun4i.c18
-rw-r--r--drivers/net/phy/micrel.c105
-rw-r--r--drivers/net/ppp/pptp.c12
-rw-r--r--drivers/net/team/team.c203
-rw-r--r--drivers/net/tun.c223
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h2
-rw-r--r--drivers/net/usb/asix_devices.c5
-rw-r--r--drivers/net/usb/ax88172a.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c24
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c845
-rw-r--r--drivers/net/usb/sr9700.c560
-rw-r--r--drivers/net/usb/sr9700.h173
-rw-r--r--drivers/net/usb/usbnet.c58
-rw-r--r--drivers/net/virtio_net.c44
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c211
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h10
-rw-r--r--drivers/net/vxlan.c1308
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c321
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h58
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c87
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h49
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c488
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c59
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c672
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c190
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c310
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c115
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c68
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c157
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c494
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c533
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile3
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h22
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h27
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c22
-rw-r--r--drivers/net/wireless/b43/dma.c6
-rw-r--r--drivers/net/wireless/b43/main.c14
-rw-r--r--drivers/net/wireless/b43legacy/dma.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c279
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c481
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c228
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c65
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c399
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c405
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h1
-rw-r--r--drivers/net/wireless/cw1200/bh.c4
-rw-r--r--drivers/net/wireless/cw1200/main.c2
-rw-r--r--drivers/net/wireless/cw1200/wsm.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c31
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c18
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c15
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c172
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c105
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c162
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h80
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c217
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c271
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h147
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h255
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c82
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h112
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c58
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c383
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c653
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h80
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c158
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c103
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c30
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c144
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c60
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c15
-rw-r--r--drivers/net/wireless/mwifiex/11n.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c177
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c42
-rw-r--r--drivers/net/wireless/mwifiex/decl.h12
-rw-r--r--drivers/net/wireless/mwifiex/fw.h72
-rw-r--r--drivers/net/wireless/mwifiex/ie.c2
-rw-r--r--drivers/net/wireless/mwifiex/init.c15
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h43
-rw-r--r--drivers/net/wireless/mwifiex/join.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c110
-rw-r--r--drivers/net/wireless/mwifiex/main.h11
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c39
-rw-r--r--drivers/net/wireless/mwifiex/scan.c63
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c219
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c49
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c130
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c70
-rw-r--r--drivers/net/wireless/mwifiex/usb.c58
-rw-r--r--drivers/net/wireless/mwifiex/util.c4
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c16
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h279
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1655
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c43
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.h3
-rw-r--r--drivers/net/wireless/zd1201.c8
-rw-r--r--drivers/net/xen-netback/common.h150
-rw-r--r--drivers/net/xen-netback/interface.c135
-rw-r--r--drivers/net/xen-netback/netback.c833
-rw-r--r--drivers/nfc/nfcsim.c6
-rw-r--r--drivers/nfc/pn533.c389
-rw-r--r--drivers/nfc/pn544/i2c.c360
-rw-r--r--drivers/nfc/pn544/mei.c2
-rw-r--r--drivers/nfc/pn544/pn544.c20
-rw-r--r--drivers/nfc/pn544/pn544.h7
-rw-r--r--drivers/pci/pci.c43
-rw-r--r--drivers/pci/pci.h3
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c12
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c8
-rw-r--r--drivers/staging/vt6655/hostap.c2
-rw-r--r--drivers/staging/vt6655/ioctl.c2
-rw-r--r--drivers/staging/vt6655/wpactl.c2
-rw-r--r--drivers/vhost/net.c92
-rw-r--r--drivers/vhost/vhost.c56
555 files changed, 43674 insertions, 18702 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 507362a76a73..449f6298dc89 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1088,15 +1088,8 @@ static int he_start(struct atm_dev *dev)
1088 for (i = 0; i < 6; ++i) 1088 for (i = 0; i < 6; ++i)
1089 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); 1089 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1090 1090
1091 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n", 1091 hprintk("%s%s, %pM\n", he_dev->prod_id,
1092 he_dev->prod_id, 1092 he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1093 he_dev->media & 0x40 ? "SM" : "MM",
1094 dev->esi[0],
1095 dev->esi[1],
1096 dev->esi[2],
1097 dev->esi[3],
1098 dev->esi[4],
1099 dev->esi[5]);
1100 he_dev->atm_dev->link_rate = he_is622(he_dev) ? 1093 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1101 ATM_OC12_PCR : ATM_OC3_PCR; 1094 ATM_OC12_PCR : ATM_OC3_PCR;
1102 1095
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6587dc295eb0..409502a78e7e 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -153,7 +153,6 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
153static void which_list(ns_dev * card, struct sk_buff *skb); 153static void which_list(ns_dev * card, struct sk_buff *skb);
154#endif 154#endif
155static void ns_poll(unsigned long arg); 155static void ns_poll(unsigned long arg);
156static int ns_parse_mac(char *mac, unsigned char *esi);
157static void ns_phy_put(struct atm_dev *dev, unsigned char value, 156static void ns_phy_put(struct atm_dev *dev, unsigned char value,
158 unsigned long addr); 157 unsigned long addr);
159static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); 158static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -779,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
779 return error; 778 return error;
780 } 779 }
781 780
782 if (ns_parse_mac(mac[i], card->atmdev->esi)) { 781 if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
783 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 782 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
784 card->atmdev->esi, 6); 783 card->atmdev->esi, 6);
785 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 784 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
@@ -2802,29 +2801,6 @@ static void ns_poll(unsigned long arg)
2802 PRINTK("nicstar: Leaving ns_poll().\n"); 2801 PRINTK("nicstar: Leaving ns_poll().\n");
2803} 2802}
2804 2803
2805static int ns_parse_mac(char *mac, unsigned char *esi)
2806{
2807 int i, j;
2808 short byte1, byte0;
2809
2810 if (mac == NULL || esi == NULL)
2811 return -1;
2812 j = 0;
2813 for (i = 0; i < 6; i++) {
2814 if ((byte1 = hex_to_bin(mac[j++])) < 0)
2815 return -1;
2816 if ((byte0 = hex_to_bin(mac[j++])) < 0)
2817 return -1;
2818 esi[i] = (unsigned char)(byte1 * 16 + byte0);
2819 if (i < 5) {
2820 if (mac[j++] != ':')
2821 return -1;
2822 }
2823 }
2824 return 0;
2825}
2826
2827
2828static void ns_phy_put(struct atm_dev *dev, unsigned char value, 2804static void ns_phy_put(struct atm_dev *dev, unsigned char value,
2829 unsigned long addr) 2805 unsigned long addr)
2830{ 2806{
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 380a2003231e..7c081b38ef3e 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE
35 PCI core hostmode operation (external PCI bus). 35 PCI core hostmode operation (external PCI bus).
36 36
37config BCMA_HOST_SOC 37config BCMA_HOST_SOC
38 bool 38 bool "Support for BCMA in a SoC"
39 depends on BCMA_DRIVER_MIPS 39 depends on BCMA
40 help
41 Host interface for a Broadcom AIX bus directly mapped into
42 the memory. This only works with the Broadcom SoCs from the
43 BCM47XX line.
44
45 If unsure, say N
40 46
41config BCMA_DRIVER_MIPS 47config BCMA_DRIVER_MIPS
42 bool "BCMA Broadcom MIPS core driver" 48 bool "BCMA Broadcom MIPS core driver"
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index cf7a476a519f..c9fd6943ce45 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -31,7 +31,7 @@ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
31 pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data); 31 pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
32} 32}
33 33
34static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) 34static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
35{ 35{
36 u32 v; 36 u32 v;
37 int i; 37 int i;
@@ -55,7 +55,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
55 } 55 }
56} 56}
57 57
58static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) 58static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
59{ 59{
60 int max_retries = 10; 60 int max_retries = 10;
61 u16 ret = 0; 61 u16 ret = 0;
@@ -98,7 +98,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
98 return ret; 98 return ret;
99} 99}
100 100
101static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device, 101static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
102 u8 address, u16 data) 102 u8 address, u16 data)
103{ 103{
104 int max_retries = 10; 104 int max_retries = 10;
@@ -137,6 +137,13 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); 137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
138} 138}
139 139
140static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
141 u8 address, u16 data)
142{
143 bcma_pcie_mdio_write(pc, device, address, data);
144 return bcma_pcie_mdio_read(pc, device, address);
145}
146
140/************************************************** 147/**************************************************
141 * Workarounds. 148 * Workarounds.
142 **************************************************/ 149 **************************************************/
@@ -203,6 +210,25 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
203 } 210 }
204} 211}
205 212
213static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
214{
215 u16 data;
216
217 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
218 data = up ? 0x74 : 0x7C;
219 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
220 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
221 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
222 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
223 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
224 data = up ? 0x75 : 0x7D;
225 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
226 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
227 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
228 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
229 }
230}
231
206/************************************************** 232/**************************************************
207 * Init. 233 * Init.
208 **************************************************/ 234 **************************************************/
@@ -262,7 +288,7 @@ out:
262} 288}
263EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl); 289EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
264 290
265void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) 291static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
266{ 292{
267 u32 w; 293 u32 w;
268 294
@@ -274,4 +300,33 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
274 bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w); 300 bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
275 bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); 301 bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
276} 302}
277EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer); 303
304void bcma_core_pci_up(struct bcma_bus *bus)
305{
306 struct bcma_drv_pci *pc;
307
308 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
309 return;
310
311 pc = &bus->drv_pci[0];
312
313 bcma_core_pci_power_save(pc, true);
314
315 bcma_core_pci_extend_L1timer(pc, true);
316}
317EXPORT_SYMBOL_GPL(bcma_core_pci_up);
318
319void bcma_core_pci_down(struct bcma_bus *bus)
320{
321 struct bcma_drv_pci *pc;
322
323 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
324 return;
325
326 pc = &bus->drv_pci[0];
327
328 bcma_core_pci_extend_L1timer(pc, false);
329
330 bcma_core_pci_power_save(pc, false);
331}
332EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 30629a3d44cc..c3d7b03c2fdc 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -581,6 +581,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
581int bcma_core_pci_plat_dev_init(struct pci_dev *dev) 581int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
582{ 582{
583 struct bcma_drv_pci_host *pc_host; 583 struct bcma_drv_pci_host *pc_host;
584 int readrq;
584 585
585 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { 586 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
586 /* This is not a device on the PCI-core bridge. */ 587 /* This is not a device on the PCI-core bridge. */
@@ -595,6 +596,11 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
595 dev->irq = bcma_core_irq(pc_host->pdev->core); 596 dev->irq = bcma_core_irq(pc_host->pdev->core);
596 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); 597 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
597 598
599 readrq = pcie_get_readrq(dev);
600 if (readrq > 128) {
601 pr_info("change PCIe max read request size from %i to 128\n", readrq);
602 pcie_set_readrq(dev, 128);
603 }
598 return 0; 604 return 0;
599} 605}
600EXPORT_SYMBOL(bcma_core_pci_plat_dev_init); 606EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 0067422ec17d..90ee350442a9 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus)
237 err = bcma_bus_scan(bus); 237 err = bcma_bus_scan(bus);
238 if (err) { 238 if (err) {
239 bcma_err(bus, "Failed to scan: %d\n", err); 239 bcma_err(bus, "Failed to scan: %d\n", err);
240 return -1; 240 return err;
241 } 241 }
242 242
243 /* Early init CC core */ 243 /* Early init CC core */
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 8bffa5c9818c..cd6b20fce680 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
32 { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" }, 32 { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
33 { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" }, 33 { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
34 { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" }, 34 { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
35 { BCMA_CORE_PCIEG2, "PCIe Gen 2" },
36 { BCMA_CORE_DMA, "DMA" },
37 { BCMA_CORE_SDIO3, "SDIO3" },
38 { BCMA_CORE_USB20, "USB 2.0" },
39 { BCMA_CORE_USB30, "USB 3.0" },
40 { BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
41 { BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
42 { BCMA_CORE_ROM, "ROM" },
43 { BCMA_CORE_NAND, "NAND flash controller" },
44 { BCMA_CORE_QSPI, "SPI flash controller" },
45 { BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
46 { BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
35 { BCMA_CORE_AMEMC, "AMEMC (DDR)" }, 47 { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
36 { BCMA_CORE_ALTA, "ALTA (I2S)" }, 48 { BCMA_CORE_ALTA, "ALTA (I2S)" },
37 { BCMA_CORE_INVALID, "Invalid" }, 49 { BCMA_CORE_INVALID, "Invalid" },
@@ -201,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
201 return ent; 213 return ent;
202} 214}
203 215
204static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, 216static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
205 u32 type, u8 port) 217 u32 type, u8 port)
206{ 218{
207 u32 addrl, addrh, sizel, sizeh = 0; 219 u32 addrl, addrh, sizel, sizeh = 0;
@@ -213,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
213 ((ent & SCAN_ADDR_TYPE) != type) || 225 ((ent & SCAN_ADDR_TYPE) != type) ||
214 (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) { 226 (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
215 bcma_erom_push_ent(eromptr); 227 bcma_erom_push_ent(eromptr);
216 return -EINVAL; 228 return (u32)-EINVAL;
217 } 229 }
218 230
219 addrl = ent & SCAN_ADDR_ADDR; 231 addrl = ent & SCAN_ADDR_ADDR;
@@ -261,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
261 struct bcma_device_id *match, int core_num, 273 struct bcma_device_id *match, int core_num,
262 struct bcma_device *core) 274 struct bcma_device *core)
263{ 275{
264 s32 tmp; 276 u32 tmp;
265 u8 i, j; 277 u8 i, j;
266 s32 cia, cib; 278 s32 cia, cib;
267 u8 ports[2], wrappers[2]; 279 u8 ports[2], wrappers[2];
@@ -339,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
339 * the main register space for the core 351 * the main register space for the core
340 */ 352 */
341 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); 353 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
342 if (tmp <= 0) { 354 if (tmp == 0 || IS_ERR_VALUE(tmp)) {
343 /* Try again to see if it is a bridge */ 355 /* Try again to see if it is a bridge */
344 tmp = bcma_erom_get_addr_desc(bus, eromptr, 356 tmp = bcma_erom_get_addr_desc(bus, eromptr,
345 SCAN_ADDR_TYPE_BRIDGE, 0); 357 SCAN_ADDR_TYPE_BRIDGE, 0);
346 if (tmp <= 0) { 358 if (tmp == 0 || IS_ERR_VALUE(tmp)) {
347 return -EILSEQ; 359 return -EILSEQ;
348 } else { 360 } else {
349 bcma_info(bus, "Bridge found\n"); 361 bcma_info(bus, "Bridge found\n");
@@ -357,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
357 for (j = 0; ; j++) { 369 for (j = 0; ; j++) {
358 tmp = bcma_erom_get_addr_desc(bus, eromptr, 370 tmp = bcma_erom_get_addr_desc(bus, eromptr,
359 SCAN_ADDR_TYPE_SLAVE, i); 371 SCAN_ADDR_TYPE_SLAVE, i);
360 if (tmp < 0) { 372 if (IS_ERR_VALUE(tmp)) {
361 /* no more entries for port _i_ */ 373 /* no more entries for port _i_ */
362 /* pr_debug("erom: slave port %d " 374 /* pr_debug("erom: slave port %d "
363 * "has %d descriptors\n", i, j); */ 375 * "has %d descriptors\n", i, j); */
@@ -374,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
374 for (j = 0; ; j++) { 386 for (j = 0; ; j++) {
375 tmp = bcma_erom_get_addr_desc(bus, eromptr, 387 tmp = bcma_erom_get_addr_desc(bus, eromptr,
376 SCAN_ADDR_TYPE_MWRAP, i); 388 SCAN_ADDR_TYPE_MWRAP, i);
377 if (tmp < 0) { 389 if (IS_ERR_VALUE(tmp)) {
378 /* no more entries for port _i_ */ 390 /* no more entries for port _i_ */
379 /* pr_debug("erom: master wrapper %d " 391 /* pr_debug("erom: master wrapper %d "
380 * "has %d descriptors\n", i, j); */ 392 * "has %d descriptors\n", i, j); */
@@ -392,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
392 for (j = 0; ; j++) { 404 for (j = 0; ; j++) {
393 tmp = bcma_erom_get_addr_desc(bus, eromptr, 405 tmp = bcma_erom_get_addr_desc(bus, eromptr,
394 SCAN_ADDR_TYPE_SWRAP, i + hack); 406 SCAN_ADDR_TYPE_SWRAP, i + hack);
395 if (tmp < 0) { 407 if (IS_ERR_VALUE(tmp)) {
396 /* no more entries for port _i_ */ 408 /* no more entries for port _i_ */
397 /* pr_debug("erom: master wrapper %d " 409 /* pr_debug("erom: master wrapper %d "
398 * has %d descriptors\n", i, j); */ 410 * has %d descriptors\n", i, j); */
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index db2c3c305df8..023d35e3c7a7 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
43 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 43 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
44 return -EFAULT; 44 return -EFAULT;
45 45
46 ret = strict_strtol(buf, 10, &result); 46 ret = kstrtol(buf, 10, &result);
47 if (ret) 47 if (ret)
48 return ret; 48 return ret;
49 49
@@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
89 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 89 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
90 return -EFAULT; 90 return -EFAULT;
91 91
92 ret = strict_strtol(buf, 10, &result); 92 ret = kstrtol(buf, 10, &result);
93 if (ret) 93 if (ret)
94 return ret; 94 return ret;
95 95
@@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
135 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 135 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
136 return -EFAULT; 136 return -EFAULT;
137 137
138 ret = strict_strtol(buf, 10, &result); 138 ret = kstrtol(buf, 10, &result);
139 if (ret) 139 if (ret)
140 return ret; 140 return ret;
141 141
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 75c262694632..00da6df9f71e 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
486 if (firmwarelen - offset < txlen) 486 if (firmwarelen - offset < txlen)
487 txlen = firmwarelen - offset; 487 txlen = firmwarelen - offset;
488 488
489 tx_blocks = (txlen + blksz_dl - 1) / blksz_dl; 489 tx_blocks = DIV_ROUND_UP(txlen, blksz_dl);
490 490
491 memcpy(fwbuf, &firmware[offset], txlen); 491 memcpy(fwbuf, &firmware[offset], txlen);
492 } 492 }
@@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
873 } 873 }
874 874
875 blksz = SDIO_BLOCK_SIZE; 875 blksz = SDIO_BLOCK_SIZE;
876 buf_block_len = (nb + blksz - 1) / blksz; 876 buf_block_len = DIV_ROUND_UP(nb, blksz);
877 877
878 sdio_claim_host(card->func); 878 sdio_claim_host(card->func);
879 879
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index a7e4939787c9..7f910c76ca0a 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1307,11 +1307,11 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1307 } 1307 }
1308 if (fifo2 & 2) { 1308 if (fifo2 & 2) {
1309 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2; 1309 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1310 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS + 1310 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
1311 HFCPCI_INTS_B2REC); 1311 HFCPCI_INTS_B2REC);
1312 } else { 1312 } else {
1313 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1; 1313 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1314 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS + 1314 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
1315 HFCPCI_INTS_B1REC); 1315 HFCPCI_INTS_B1REC);
1316 } 1316 }
1317#ifdef REVERSE_BITORDER 1317#ifdef REVERSE_BITORDER
@@ -1346,14 +1346,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1346 if (fifo2 & 2) { 1346 if (fifo2 & 2) {
1347 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; 1347 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1348 if (!tics) 1348 if (!tics)
1349 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + 1349 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1350 HFCPCI_INTS_B2REC); 1350 HFCPCI_INTS_B2REC);
1351 hc->hw.ctmt |= 2; 1351 hc->hw.ctmt |= 2;
1352 hc->hw.conn &= ~0x18; 1352 hc->hw.conn &= ~0x18;
1353 } else { 1353 } else {
1354 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; 1354 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1355 if (!tics) 1355 if (!tics)
1356 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + 1356 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1357 HFCPCI_INTS_B1REC); 1357 HFCPCI_INTS_B1REC);
1358 hc->hw.ctmt |= 1; 1358 hc->hw.ctmt |= 1;
1359 hc->hw.conn &= ~0x03; 1359 hc->hw.conn &= ~0x03;
@@ -1375,14 +1375,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1375 if (fifo2 & 2) { 1375 if (fifo2 & 2) {
1376 hc->hw.last_bfifo_cnt[1] = 0; 1376 hc->hw.last_bfifo_cnt[1] = 0;
1377 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; 1377 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1378 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + 1378 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1379 HFCPCI_INTS_B2REC); 1379 HFCPCI_INTS_B2REC);
1380 hc->hw.ctmt &= ~2; 1380 hc->hw.ctmt &= ~2;
1381 hc->hw.conn &= ~0x18; 1381 hc->hw.conn &= ~0x18;
1382 } else { 1382 } else {
1383 hc->hw.last_bfifo_cnt[0] = 0; 1383 hc->hw.last_bfifo_cnt[0] = 0;
1384 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; 1384 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1385 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + 1385 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1386 HFCPCI_INTS_B1REC); 1386 HFCPCI_INTS_B1REC);
1387 hc->hw.ctmt &= ~1; 1387 hc->hw.ctmt &= ~1;
1388 hc->hw.conn &= ~0x03; 1388 hc->hw.conn &= ~0x03;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 390061d09693..0d8f427ade93 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -143,10 +143,9 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
143 */ 143 */
144static inline struct port *__get_first_port(struct bonding *bond) 144static inline struct port *__get_first_port(struct bonding *bond)
145{ 145{
146 if (bond->slave_cnt == 0) 146 struct slave *first_slave = bond_first_slave(bond);
147 return NULL;
148 147
149 return &(SLAVE_AD_INFO(bond->first_slave).port); 148 return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
150} 149}
151 150
152/** 151/**
@@ -159,13 +158,16 @@ static inline struct port *__get_first_port(struct bonding *bond)
159static inline struct port *__get_next_port(struct port *port) 158static inline struct port *__get_next_port(struct port *port)
160{ 159{
161 struct bonding *bond = __get_bond_by_port(port); 160 struct bonding *bond = __get_bond_by_port(port);
162 struct slave *slave = port->slave; 161 struct slave *slave = port->slave, *slave_next;
163 162
164 // If there's no bond for this port, or this is the last slave 163 // If there's no bond for this port, or this is the last slave
165 if ((bond == NULL) || (slave->next == bond->first_slave)) 164 if (bond == NULL)
165 return NULL;
166 slave_next = bond_next_slave(bond, slave);
167 if (!slave_next || bond_is_first_slave(bond, slave_next))
166 return NULL; 168 return NULL;
167 169
168 return &(SLAVE_AD_INFO(slave->next).port); 170 return &(SLAVE_AD_INFO(slave_next).port);
169} 171}
170 172
171/** 173/**
@@ -178,12 +180,14 @@ static inline struct port *__get_next_port(struct port *port)
178static inline struct aggregator *__get_first_agg(struct port *port) 180static inline struct aggregator *__get_first_agg(struct port *port)
179{ 181{
180 struct bonding *bond = __get_bond_by_port(port); 182 struct bonding *bond = __get_bond_by_port(port);
183 struct slave *first_slave;
181 184
182 // If there's no bond for this port, or bond has no slaves 185 // If there's no bond for this port, or bond has no slaves
183 if ((bond == NULL) || (bond->slave_cnt == 0)) 186 if (bond == NULL)
184 return NULL; 187 return NULL;
188 first_slave = bond_first_slave(bond);
185 189
186 return &(SLAVE_AD_INFO(bond->first_slave).aggregator); 190 return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
187} 191}
188 192
189/** 193/**
@@ -195,14 +199,17 @@ static inline struct aggregator *__get_first_agg(struct port *port)
195 */ 199 */
196static inline struct aggregator *__get_next_agg(struct aggregator *aggregator) 200static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
197{ 201{
198 struct slave *slave = aggregator->slave; 202 struct slave *slave = aggregator->slave, *slave_next;
199 struct bonding *bond = bond_get_bond_by_slave(slave); 203 struct bonding *bond = bond_get_bond_by_slave(slave);
200 204
201 // If there's no bond for this aggregator, or this is the last slave 205 // If there's no bond for this aggregator, or this is the last slave
202 if ((bond == NULL) || (slave->next == bond->first_slave)) 206 if (bond == NULL)
207 return NULL;
208 slave_next = bond_next_slave(bond, slave);
209 if (!slave_next || bond_is_first_slave(bond, slave_next))
203 return NULL; 210 return NULL;
204 211
205 return &(SLAVE_AD_INFO(slave->next).aggregator); 212 return &(SLAVE_AD_INFO(slave_next).aggregator);
206} 213}
207 214
208/* 215/*
@@ -2110,7 +2117,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2110 read_lock(&bond->lock); 2117 read_lock(&bond->lock);
2111 2118
2112 //check if there are any slaves 2119 //check if there are any slaves
2113 if (bond->slave_cnt == 0) 2120 if (list_empty(&bond->slave_list))
2114 goto re_arm; 2121 goto re_arm;
2115 2122
2116 // check if agg_select_timer timer after initialize is timed out 2123 // check if agg_select_timer timer after initialize is timed out
@@ -2336,8 +2343,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2336int bond_3ad_set_carrier(struct bonding *bond) 2343int bond_3ad_set_carrier(struct bonding *bond)
2337{ 2344{
2338 struct aggregator *active; 2345 struct aggregator *active;
2346 struct slave *first_slave;
2339 2347
2340 active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator)); 2348 first_slave = bond_first_slave(bond);
2349 if (!first_slave)
2350 return 0;
2351 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
2341 if (active) { 2352 if (active) {
2342 /* are enough slaves available to consider link up? */ 2353 /* are enough slaves available to consider link up? */
2343 if (active->num_of_ports < bond->params.min_links) { 2354 if (active->num_of_ports < bond->params.min_links) {
@@ -2415,6 +2426,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2415 struct ad_info ad_info; 2426 struct ad_info ad_info;
2416 int res = 1; 2427 int res = 1;
2417 2428
2429 read_lock(&bond->lock);
2418 if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { 2430 if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
2419 pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", 2431 pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
2420 dev->name); 2432 dev->name);
@@ -2432,7 +2444,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2432 2444
2433 slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg); 2445 slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
2434 2446
2435 bond_for_each_slave(bond, slave, i) { 2447 bond_for_each_slave(bond, slave) {
2436 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; 2448 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
2437 2449
2438 if (agg && (agg->aggregator_identifier == agg_id)) { 2450 if (agg && (agg->aggregator_identifier == agg_id)) {
@@ -2464,6 +2476,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2464 } 2476 }
2465 2477
2466out: 2478out:
2479 read_unlock(&bond->lock);
2467 if (res) { 2480 if (res) {
2468 /* no suitable interface, frame not sent */ 2481 /* no suitable interface, frame not sent */
2469 kfree_skb(skb); 2482 kfree_skb(skb);
@@ -2501,18 +2514,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
2501 */ 2514 */
2502void bond_3ad_update_lacp_rate(struct bonding *bond) 2515void bond_3ad_update_lacp_rate(struct bonding *bond)
2503{ 2516{
2504 int i;
2505 struct slave *slave;
2506 struct port *port = NULL; 2517 struct port *port = NULL;
2518 struct slave *slave;
2507 int lacp_fast; 2519 int lacp_fast;
2508 2520
2509 write_lock_bh(&bond->lock);
2510 lacp_fast = bond->params.lacp_fast; 2521 lacp_fast = bond->params.lacp_fast;
2511 2522 bond_for_each_slave(bond, slave) {
2512 bond_for_each_slave(bond, slave, i) {
2513 port = &(SLAVE_AD_INFO(slave).port); 2523 port = &(SLAVE_AD_INFO(slave).port);
2514 if (port->slave == NULL)
2515 continue;
2516 __get_state_machine_lock(port); 2524 __get_state_machine_lock(port);
2517 if (lacp_fast) 2525 if (lacp_fast)
2518 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; 2526 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -2520,6 +2528,4 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
2520 port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; 2528 port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
2521 __release_state_machine_lock(port); 2529 __release_state_machine_lock(port);
2522 } 2530 }
2523
2524 write_unlock_bh(&bond->lock);
2525} 2531}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 4ea8ed150d46..91f179d5135c 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -224,13 +224,12 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
224{ 224{
225 struct slave *slave, *least_loaded; 225 struct slave *slave, *least_loaded;
226 long long max_gap; 226 long long max_gap;
227 int i;
228 227
229 least_loaded = NULL; 228 least_loaded = NULL;
230 max_gap = LLONG_MIN; 229 max_gap = LLONG_MIN;
231 230
232 /* Find the slave with the largest gap */ 231 /* Find the slave with the largest gap */
233 bond_for_each_slave(bond, slave, i) { 232 bond_for_each_slave(bond, slave) {
234 if (SLAVE_IS_OK(slave)) { 233 if (SLAVE_IS_OK(slave)) {
235 long long gap = compute_gap(slave); 234 long long gap = compute_gap(slave);
236 235
@@ -386,11 +385,10 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
386 struct slave *rx_slave, *slave, *start_at; 385 struct slave *rx_slave, *slave, *start_at;
387 int i = 0; 386 int i = 0;
388 387
389 if (bond_info->next_rx_slave) { 388 if (bond_info->next_rx_slave)
390 start_at = bond_info->next_rx_slave; 389 start_at = bond_info->next_rx_slave;
391 } else { 390 else
392 start_at = bond->first_slave; 391 start_at = bond_first_slave(bond);
393 }
394 392
395 rx_slave = NULL; 393 rx_slave = NULL;
396 394
@@ -405,7 +403,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
405 } 403 }
406 404
407 if (rx_slave) { 405 if (rx_slave) {
408 bond_info->next_rx_slave = rx_slave->next; 406 slave = bond_next_slave(bond, rx_slave);
407 bond_info->next_rx_slave = slave;
409 } 408 }
410 409
411 return rx_slave; 410 return rx_slave;
@@ -513,7 +512,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
513 512
514 skb->dev = client_info->slave->dev; 513 skb->dev = client_info->slave->dev;
515 514
516 if (client_info->tag) { 515 if (client_info->vlan_id) {
517 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); 516 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
518 if (!skb) { 517 if (!skb) {
519 pr_err("%s: Error: failed to insert VLAN tag\n", 518 pr_err("%s: Error: failed to insert VLAN tag\n",
@@ -695,10 +694,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
695 client_info->ntt = 0; 694 client_info->ntt = 0;
696 } 695 }
697 696
698 if (bond_vlan_used(bond)) { 697 if (!vlan_get_tag(skb, &client_info->vlan_id))
699 if (!vlan_get_tag(skb, &client_info->vlan_id)) 698 client_info->vlan_id = 0;
700 client_info->tag = 1;
701 }
702 699
703 if (!client_info->assigned) { 700 if (!client_info->assigned) {
704 u32 prev_tbl_head = bond_info->rx_hashtbl_used_head; 701 u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
@@ -804,7 +801,7 @@ static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
804 entry->used_prev = RLB_NULL_INDEX; 801 entry->used_prev = RLB_NULL_INDEX;
805 entry->assigned = 0; 802 entry->assigned = 0;
806 entry->slave = NULL; 803 entry->slave = NULL;
807 entry->tag = 0; 804 entry->vlan_id = 0;
808} 805}
809static void rlb_init_table_entry_src(struct rlb_client_info *entry) 806static void rlb_init_table_entry_src(struct rlb_client_info *entry)
810{ 807{
@@ -961,7 +958,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
961 struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]); 958 struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
962 u32 next_index = bond_info->rx_hashtbl[curr_index].used_next; 959 u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
963 960
964 if (curr->tag && (curr->vlan_id == vlan_id)) 961 if (curr->vlan_id == vlan_id)
965 rlb_delete_table_entry(bond, curr_index); 962 rlb_delete_table_entry(bond, curr_index);
966 963
967 curr_index = next_index; 964 curr_index = next_index;
@@ -972,58 +969,62 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
972 969
973/*********************** tlb/rlb shared functions *********************/ 970/*********************** tlb/rlb shared functions *********************/
974 971
975static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) 972static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
973 u16 vid)
976{ 974{
977 struct bonding *bond = bond_get_bond_by_slave(slave);
978 struct learning_pkt pkt; 975 struct learning_pkt pkt;
976 struct sk_buff *skb;
979 int size = sizeof(struct learning_pkt); 977 int size = sizeof(struct learning_pkt);
980 int i; 978 char *data;
981 979
982 memset(&pkt, 0, size); 980 memset(&pkt, 0, size);
983 memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); 981 memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
984 memcpy(pkt.mac_src, mac_addr, ETH_ALEN); 982 memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
985 pkt.type = cpu_to_be16(ETH_P_LOOP); 983 pkt.type = cpu_to_be16(ETH_P_LOOP);
986 984
987 for (i = 0; i < MAX_LP_BURST; i++) { 985 skb = dev_alloc_skb(size);
988 struct sk_buff *skb; 986 if (!skb)
989 char *data; 987 return;
988
989 data = skb_put(skb, size);
990 memcpy(data, &pkt, size);
991
992 skb_reset_mac_header(skb);
993 skb->network_header = skb->mac_header + ETH_HLEN;
994 skb->protocol = pkt.type;
995 skb->priority = TC_PRIO_CONTROL;
996 skb->dev = slave->dev;
990 997
991 skb = dev_alloc_skb(size); 998 if (vid) {
999 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
992 if (!skb) { 1000 if (!skb) {
1001 pr_err("%s: Error: failed to insert VLAN tag\n",
1002 slave->bond->dev->name);
993 return; 1003 return;
994 } 1004 }
1005 }
995 1006
996 data = skb_put(skb, size); 1007 dev_queue_xmit(skb);
997 memcpy(data, &pkt, size); 1008}
998
999 skb_reset_mac_header(skb);
1000 skb->network_header = skb->mac_header + ETH_HLEN;
1001 skb->protocol = pkt.type;
1002 skb->priority = TC_PRIO_CONTROL;
1003 skb->dev = slave->dev;
1004 1009
1005 if (bond_vlan_used(bond)) {
1006 struct vlan_entry *vlan;
1007 1010
1008 vlan = bond_next_vlan(bond, 1011static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
1009 bond->alb_info.current_alb_vlan); 1012{
1013 struct bonding *bond = bond_get_bond_by_slave(slave);
1014 struct net_device *upper;
1015 struct list_head *iter;
1010 1016
1011 bond->alb_info.current_alb_vlan = vlan; 1017 /* send untagged */
1012 if (!vlan) { 1018 alb_send_lp_vid(slave, mac_addr, 0);
1013 kfree_skb(skb);
1014 continue;
1015 }
1016 1019
1017 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id); 1020 /* loop through vlans and send one packet for each */
1018 if (!skb) { 1021 rcu_read_lock();
1019 pr_err("%s: Error: failed to insert VLAN tag\n", 1022 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
1020 bond->dev->name); 1023 if (upper->priv_flags & IFF_802_1Q_VLAN)
1021 continue; 1024 alb_send_lp_vid(slave, mac_addr,
1022 } 1025 vlan_dev_vlan_id(upper));
1023 }
1024
1025 dev_queue_xmit(skb);
1026 } 1026 }
1027 rcu_read_unlock();
1027} 1028}
1028 1029
1029static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) 1030static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
@@ -1173,9 +1174,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1173{ 1174{
1174 struct slave *tmp_slave1, *free_mac_slave = NULL; 1175 struct slave *tmp_slave1, *free_mac_slave = NULL;
1175 struct slave *has_bond_addr = bond->curr_active_slave; 1176 struct slave *has_bond_addr = bond->curr_active_slave;
1176 int i;
1177 1177
1178 if (bond->slave_cnt == 0) { 1178 if (list_empty(&bond->slave_list)) {
1179 /* this is the first slave */ 1179 /* this is the first slave */
1180 return 0; 1180 return 0;
1181 } 1181 }
@@ -1196,7 +1196,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1196 /* The slave's address is equal to the address of the bond. 1196 /* The slave's address is equal to the address of the bond.
1197 * Search for a spare address in the bond for this slave. 1197 * Search for a spare address in the bond for this slave.
1198 */ 1198 */
1199 bond_for_each_slave(bond, tmp_slave1, i) { 1199 bond_for_each_slave(bond, tmp_slave1) {
1200 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) { 1200 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
1201 /* no slave has tmp_slave1's perm addr 1201 /* no slave has tmp_slave1's perm addr
1202 * as its curr addr 1202 * as its curr addr
@@ -1246,17 +1246,15 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1246 */ 1246 */
1247static int alb_set_mac_address(struct bonding *bond, void *addr) 1247static int alb_set_mac_address(struct bonding *bond, void *addr)
1248{ 1248{
1249 struct sockaddr sa;
1250 struct slave *slave, *stop_at;
1251 char tmp_addr[ETH_ALEN]; 1249 char tmp_addr[ETH_ALEN];
1250 struct slave *slave;
1251 struct sockaddr sa;
1252 int res; 1252 int res;
1253 int i;
1254 1253
1255 if (bond->alb_info.rlb_enabled) { 1254 if (bond->alb_info.rlb_enabled)
1256 return 0; 1255 return 0;
1257 }
1258 1256
1259 bond_for_each_slave(bond, slave, i) { 1257 bond_for_each_slave(bond, slave) {
1260 /* save net_device's current hw address */ 1258 /* save net_device's current hw address */
1261 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); 1259 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
1262 1260
@@ -1276,8 +1274,7 @@ unwind:
1276 sa.sa_family = bond->dev->type; 1274 sa.sa_family = bond->dev->type;
1277 1275
1278 /* unwind from head to the slave that failed */ 1276 /* unwind from head to the slave that failed */
1279 stop_at = slave; 1277 bond_for_each_slave_continue_reverse(bond, slave) {
1280 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
1281 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); 1278 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
1282 dev_set_mac_address(slave->dev, &sa); 1279 dev_set_mac_address(slave->dev, &sa);
1283 memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN); 1280 memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
@@ -1342,6 +1339,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1342 1339
1343 /* make sure that the curr_active_slave do not change during tx 1340 /* make sure that the curr_active_slave do not change during tx
1344 */ 1341 */
1342 read_lock(&bond->lock);
1345 read_lock(&bond->curr_slave_lock); 1343 read_lock(&bond->curr_slave_lock);
1346 1344
1347 switch (ntohs(skb->protocol)) { 1345 switch (ntohs(skb->protocol)) {
@@ -1446,11 +1444,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1446 } 1444 }
1447 1445
1448 read_unlock(&bond->curr_slave_lock); 1446 read_unlock(&bond->curr_slave_lock);
1449 1447 read_unlock(&bond->lock);
1450 if (res) { 1448 if (res) {
1451 /* no suitable interface, frame not sent */ 1449 /* no suitable interface, frame not sent */
1452 kfree_skb(skb); 1450 kfree_skb(skb);
1453 } 1451 }
1452
1454 return NETDEV_TX_OK; 1453 return NETDEV_TX_OK;
1455} 1454}
1456 1455
@@ -1460,11 +1459,10 @@ void bond_alb_monitor(struct work_struct *work)
1460 alb_work.work); 1459 alb_work.work);
1461 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1460 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1462 struct slave *slave; 1461 struct slave *slave;
1463 int i;
1464 1462
1465 read_lock(&bond->lock); 1463 read_lock(&bond->lock);
1466 1464
1467 if (bond->slave_cnt == 0) { 1465 if (list_empty(&bond->slave_list)) {
1468 bond_info->tx_rebalance_counter = 0; 1466 bond_info->tx_rebalance_counter = 0;
1469 bond_info->lp_counter = 0; 1467 bond_info->lp_counter = 0;
1470 goto re_arm; 1468 goto re_arm;
@@ -1482,9 +1480,8 @@ void bond_alb_monitor(struct work_struct *work)
1482 */ 1480 */
1483 read_lock(&bond->curr_slave_lock); 1481 read_lock(&bond->curr_slave_lock);
1484 1482
1485 bond_for_each_slave(bond, slave, i) { 1483 bond_for_each_slave(bond, slave)
1486 alb_send_learning_packets(slave, slave->dev->dev_addr); 1484 alb_send_learning_packets(slave, slave->dev->dev_addr);
1487 }
1488 1485
1489 read_unlock(&bond->curr_slave_lock); 1486 read_unlock(&bond->curr_slave_lock);
1490 1487
@@ -1496,7 +1493,7 @@ void bond_alb_monitor(struct work_struct *work)
1496 1493
1497 read_lock(&bond->curr_slave_lock); 1494 read_lock(&bond->curr_slave_lock);
1498 1495
1499 bond_for_each_slave(bond, slave, i) { 1496 bond_for_each_slave(bond, slave) {
1500 tlb_clear_slave(bond, slave, 1); 1497 tlb_clear_slave(bond, slave, 1);
1501 if (slave == bond->curr_active_slave) { 1498 if (slave == bond->curr_active_slave) {
1502 SLAVE_TLB_INFO(slave).load = 1499 SLAVE_TLB_INFO(slave).load =
@@ -1602,9 +1599,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1602 */ 1599 */
1603void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) 1600void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
1604{ 1601{
1605 if (bond->slave_cnt > 1) { 1602 if (!list_empty(&bond->slave_list))
1606 alb_change_hw_addr_on_detach(bond, slave); 1603 alb_change_hw_addr_on_detach(bond, slave);
1607 }
1608 1604
1609 tlb_clear_slave(bond, slave, 0); 1605 tlb_clear_slave(bond, slave, 0);
1610 1606
@@ -1661,9 +1657,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1661{ 1657{
1662 struct slave *swap_slave; 1658 struct slave *swap_slave;
1663 1659
1664 if (bond->curr_active_slave == new_slave) { 1660 if (bond->curr_active_slave == new_slave)
1665 return; 1661 return;
1666 }
1667 1662
1668 if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) { 1663 if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
1669 dev_set_promiscuity(bond->curr_active_slave->dev, -1); 1664 dev_set_promiscuity(bond->curr_active_slave->dev, -1);
@@ -1672,11 +1667,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1672 } 1667 }
1673 1668
1674 swap_slave = bond->curr_active_slave; 1669 swap_slave = bond->curr_active_slave;
1675 bond->curr_active_slave = new_slave; 1670 rcu_assign_pointer(bond->curr_active_slave, new_slave);
1676 1671
1677 if (!new_slave || (bond->slave_cnt == 0)) { 1672 if (!new_slave || list_empty(&bond->slave_list))
1678 return; 1673 return;
1679 }
1680 1674
1681 /* set the new curr_active_slave to the bonds mac address 1675 /* set the new curr_active_slave to the bonds mac address
1682 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave 1676 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
@@ -1689,9 +1683,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1689 * ignored so we can mess with their MAC addresses without 1683 * ignored so we can mess with their MAC addresses without
1690 * fear of interference from transmit activity. 1684 * fear of interference from transmit activity.
1691 */ 1685 */
1692 if (swap_slave) { 1686 if (swap_slave)
1693 tlb_clear_slave(bond, swap_slave, 1); 1687 tlb_clear_slave(bond, swap_slave, 1);
1694 }
1695 tlb_clear_slave(bond, new_slave, 1); 1688 tlb_clear_slave(bond, new_slave, 1);
1696 1689
1697 write_unlock_bh(&bond->curr_slave_lock); 1690 write_unlock_bh(&bond->curr_slave_lock);
@@ -1768,11 +1761,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1768 1761
1769void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id) 1762void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
1770{ 1763{
1771 if (bond->alb_info.current_alb_vlan &&
1772 (bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
1773 bond->alb_info.current_alb_vlan = NULL;
1774 }
1775
1776 if (bond->alb_info.rlb_enabled) { 1764 if (bond->alb_info.rlb_enabled) {
1777 rlb_clear_vlan(bond, vlan_id); 1765 rlb_clear_vlan(bond, vlan_id);
1778 } 1766 }
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e7a5b8b37ea3..28d8e4c7dc06 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -53,7 +53,6 @@ struct slave;
53 53
54 54
55#define TLB_NULL_INDEX 0xffffffff 55#define TLB_NULL_INDEX 0xffffffff
56#define MAX_LP_BURST 3
57 56
58/* rlb defs */ 57/* rlb defs */
59#define RLB_HASH_TABLE_SIZE 256 58#define RLB_HASH_TABLE_SIZE 256
@@ -126,7 +125,6 @@ struct rlb_client_info {
126 u8 assigned; /* checking whether this entry is assigned */ 125 u8 assigned; /* checking whether this entry is assigned */
127 u8 ntt; /* flag - need to transmit client info */ 126 u8 ntt; /* flag - need to transmit client info */
128 struct slave *slave; /* the slave assigned to this client */ 127 struct slave *slave; /* the slave assigned to this client */
129 u8 tag; /* flag - need to tag skb */
130 unsigned short vlan_id; /* VLAN tag associated with IP address */ 128 unsigned short vlan_id; /* VLAN tag associated with IP address */
131}; 129};
132 130
@@ -170,7 +168,6 @@ struct alb_bond_info {
170 * rx traffic should be 168 * rx traffic should be
171 * rebalanced 169 * rebalanced
172 */ 170 */
173 struct vlan_entry *current_alb_vlan;
174}; 171};
175 172
176int bond_alb_initialize(struct bonding *bond, int rlb_enabled); 173int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e48cb339c0c6..39e5b1c7ffe2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,6 +77,7 @@
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h> 78#include <net/netns/generic.h>
79#include <net/pkt_sched.h> 79#include <net/pkt_sched.h>
80#include <linux/rculist.h>
80#include "bonding.h" 81#include "bonding.h"
81#include "bond_3ad.h" 82#include "bond_3ad.h"
82#include "bond_alb.h" 83#include "bond_alb.h"
@@ -106,7 +107,7 @@ static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
106static char *arp_validate; 107static char *arp_validate;
107static char *arp_all_targets; 108static char *arp_all_targets;
108static char *fail_over_mac; 109static char *fail_over_mac;
109static int all_slaves_active = 0; 110static int all_slaves_active;
110static struct bond_params bonding_defaults; 111static struct bond_params bonding_defaults;
111static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; 112static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
112 113
@@ -273,7 +274,7 @@ const char *bond_mode_name(int mode)
273 [BOND_MODE_ALB] = "adaptive load balancing", 274 [BOND_MODE_ALB] = "adaptive load balancing",
274 }; 275 };
275 276
276 if (mode < 0 || mode > BOND_MODE_ALB) 277 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
277 return "unknown"; 278 return "unknown";
278 279
279 return names[mode]; 280 return names[mode];
@@ -282,116 +283,6 @@ const char *bond_mode_name(int mode)
282/*---------------------------------- VLAN -----------------------------------*/ 283/*---------------------------------- VLAN -----------------------------------*/
283 284
284/** 285/**
285 * bond_add_vlan - add a new vlan id on bond
286 * @bond: bond that got the notification
287 * @vlan_id: the vlan id to add
288 *
289 * Returns -ENOMEM if allocation failed.
290 */
291static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
292{
293 struct vlan_entry *vlan;
294
295 pr_debug("bond: %s, vlan id %d\n",
296 (bond ? bond->dev->name : "None"), vlan_id);
297
298 vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
299 if (!vlan)
300 return -ENOMEM;
301
302 INIT_LIST_HEAD(&vlan->vlan_list);
303 vlan->vlan_id = vlan_id;
304
305 write_lock_bh(&bond->lock);
306
307 list_add_tail(&vlan->vlan_list, &bond->vlan_list);
308
309 write_unlock_bh(&bond->lock);
310
311 pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name);
312
313 return 0;
314}
315
316/**
317 * bond_del_vlan - delete a vlan id from bond
318 * @bond: bond that got the notification
319 * @vlan_id: the vlan id to delete
320 *
321 * returns -ENODEV if @vlan_id was not found in @bond.
322 */
323static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
324{
325 struct vlan_entry *vlan;
326 int res = -ENODEV;
327
328 pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
329
330 block_netpoll_tx();
331 write_lock_bh(&bond->lock);
332
333 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
334 if (vlan->vlan_id == vlan_id) {
335 list_del(&vlan->vlan_list);
336
337 if (bond_is_lb(bond))
338 bond_alb_clear_vlan(bond, vlan_id);
339
340 pr_debug("removed VLAN ID %d from bond %s\n",
341 vlan_id, bond->dev->name);
342
343 kfree(vlan);
344
345 res = 0;
346 goto out;
347 }
348 }
349
350 pr_debug("couldn't find VLAN ID %d in bond %s\n",
351 vlan_id, bond->dev->name);
352
353out:
354 write_unlock_bh(&bond->lock);
355 unblock_netpoll_tx();
356 return res;
357}
358
359/**
360 * bond_next_vlan - safely skip to the next item in the vlans list.
361 * @bond: the bond we're working on
362 * @curr: item we're advancing from
363 *
364 * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL,
365 * or @curr->next otherwise (even if it is @curr itself again).
366 *
367 * Caller must hold bond->lock
368 */
369struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
370{
371 struct vlan_entry *next, *last;
372
373 if (list_empty(&bond->vlan_list))
374 return NULL;
375
376 if (!curr) {
377 next = list_entry(bond->vlan_list.next,
378 struct vlan_entry, vlan_list);
379 } else {
380 last = list_entry(bond->vlan_list.prev,
381 struct vlan_entry, vlan_list);
382 if (last == curr) {
383 next = list_entry(bond->vlan_list.next,
384 struct vlan_entry, vlan_list);
385 } else {
386 next = list_entry(curr->vlan_list.next,
387 struct vlan_entry, vlan_list);
388 }
389 }
390
391 return next;
392}
393
394/**
395 * bond_dev_queue_xmit - Prepare skb for xmit. 286 * bond_dev_queue_xmit - Prepare skb for xmit.
396 * 287 *
397 * @bond: bond device that got this skb for tx. 288 * @bond: bond device that got this skb for tx.
@@ -441,28 +332,20 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
441 __be16 proto, u16 vid) 332 __be16 proto, u16 vid)
442{ 333{
443 struct bonding *bond = netdev_priv(bond_dev); 334 struct bonding *bond = netdev_priv(bond_dev);
444 struct slave *slave, *stop_at; 335 struct slave *slave;
445 int i, res; 336 int res;
446 337
447 bond_for_each_slave(bond, slave, i) { 338 bond_for_each_slave(bond, slave) {
448 res = vlan_vid_add(slave->dev, proto, vid); 339 res = vlan_vid_add(slave->dev, proto, vid);
449 if (res) 340 if (res)
450 goto unwind; 341 goto unwind;
451 } 342 }
452 343
453 res = bond_add_vlan(bond, vid);
454 if (res) {
455 pr_err("%s: Error: Failed to add vlan id %d\n",
456 bond_dev->name, vid);
457 return res;
458 }
459
460 return 0; 344 return 0;
461 345
462unwind: 346unwind:
463 /* unwind from head to the slave that failed */ 347 /* unwind from the slave that failed */
464 stop_at = slave; 348 bond_for_each_slave_continue_reverse(bond, slave)
465 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
466 vlan_vid_del(slave->dev, proto, vid); 349 vlan_vid_del(slave->dev, proto, vid);
467 350
468 return res; 351 return res;
@@ -478,48 +361,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
478{ 361{
479 struct bonding *bond = netdev_priv(bond_dev); 362 struct bonding *bond = netdev_priv(bond_dev);
480 struct slave *slave; 363 struct slave *slave;
481 int i, res;
482 364
483 bond_for_each_slave(bond, slave, i) 365 bond_for_each_slave(bond, slave)
484 vlan_vid_del(slave->dev, proto, vid); 366 vlan_vid_del(slave->dev, proto, vid);
485 367
486 res = bond_del_vlan(bond, vid); 368 if (bond_is_lb(bond))
487 if (res) { 369 bond_alb_clear_vlan(bond, vid);
488 pr_err("%s: Error: Failed to remove vlan id %d\n",
489 bond_dev->name, vid);
490 return res;
491 }
492 370
493 return 0; 371 return 0;
494} 372}
495 373
496static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
497{
498 struct vlan_entry *vlan;
499 int res;
500
501 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
502 res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
503 vlan->vlan_id);
504 if (res)
505 pr_warning("%s: Failed to add vlan id %d to device %s\n",
506 bond->dev->name, vlan->vlan_id,
507 slave_dev->name);
508 }
509}
510
511static void bond_del_vlans_from_slave(struct bonding *bond,
512 struct net_device *slave_dev)
513{
514 struct vlan_entry *vlan;
515
516 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
517 if (!vlan->vlan_id)
518 continue;
519 vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
520 }
521}
522
523/*------------------------------- Link status -------------------------------*/ 374/*------------------------------- Link status -------------------------------*/
524 375
525/* 376/*
@@ -532,15 +383,14 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
532static int bond_set_carrier(struct bonding *bond) 383static int bond_set_carrier(struct bonding *bond)
533{ 384{
534 struct slave *slave; 385 struct slave *slave;
535 int i;
536 386
537 if (bond->slave_cnt == 0) 387 if (list_empty(&bond->slave_list))
538 goto down; 388 goto down;
539 389
540 if (bond->params.mode == BOND_MODE_8023AD) 390 if (bond->params.mode == BOND_MODE_8023AD)
541 return bond_3ad_set_carrier(bond); 391 return bond_3ad_set_carrier(bond);
542 392
543 bond_for_each_slave(bond, slave, i) { 393 bond_for_each_slave(bond, slave) {
544 if (slave->link == BOND_LINK_UP) { 394 if (slave->link == BOND_LINK_UP) {
545 if (!netif_carrier_ok(bond->dev)) { 395 if (!netif_carrier_ok(bond->dev)) {
546 netif_carrier_on(bond->dev); 396 netif_carrier_on(bond->dev);
@@ -681,8 +531,8 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
681 } 531 }
682 } else { 532 } else {
683 struct slave *slave; 533 struct slave *slave;
684 int i; 534
685 bond_for_each_slave(bond, slave, i) { 535 bond_for_each_slave(bond, slave) {
686 err = dev_set_promiscuity(slave->dev, inc); 536 err = dev_set_promiscuity(slave->dev, inc);
687 if (err) 537 if (err)
688 return err; 538 return err;
@@ -705,8 +555,8 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
705 } 555 }
706 } else { 556 } else {
707 struct slave *slave; 557 struct slave *slave;
708 int i; 558
709 bond_for_each_slave(bond, slave, i) { 559 bond_for_each_slave(bond, slave) {
710 err = dev_set_allmulti(slave->dev, inc); 560 err = dev_set_allmulti(slave->dev, inc);
711 if (err) 561 if (err)
712 return err; 562 return err;
@@ -715,15 +565,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
715 return err; 565 return err;
716} 566}
717 567
718static void __bond_resend_igmp_join_requests(struct net_device *dev)
719{
720 struct in_device *in_dev;
721
722 in_dev = __in_dev_get_rcu(dev);
723 if (in_dev)
724 ip_mc_rejoin_groups(in_dev);
725}
726
727/* 568/*
728 * Retrieve the list of registered multicast addresses for the bonding 569 * Retrieve the list of registered multicast addresses for the bonding
729 * device and retransmit an IGMP JOIN request to the current active 570 * device and retransmit an IGMP JOIN request to the current active
@@ -731,33 +572,12 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
731 */ 572 */
732static void bond_resend_igmp_join_requests(struct bonding *bond) 573static void bond_resend_igmp_join_requests(struct bonding *bond)
733{ 574{
734 struct net_device *bond_dev, *vlan_dev, *upper_dev; 575 if (!rtnl_trylock()) {
735 struct vlan_entry *vlan; 576 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
736 577 return;
737 read_lock(&bond->lock);
738 rcu_read_lock();
739
740 bond_dev = bond->dev;
741
742 /* rejoin all groups on bond device */
743 __bond_resend_igmp_join_requests(bond_dev);
744
745 /*
746 * if bond is enslaved to a bridge,
747 * then rejoin all groups on its master
748 */
749 upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
750 if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
751 __bond_resend_igmp_join_requests(upper_dev);
752
753 /* rejoin all groups on vlan devices */
754 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
755 vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
756 vlan->vlan_id);
757 if (vlan_dev)
758 __bond_resend_igmp_join_requests(vlan_dev);
759 } 578 }
760 rcu_read_unlock(); 579 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
580 rtnl_unlock();
761 581
762 /* We use curr_slave_lock to protect against concurrent access to 582 /* We use curr_slave_lock to protect against concurrent access to
763 * igmp_retrans from multiple running instances of this function and 583 * igmp_retrans from multiple running instances of this function and
@@ -769,7 +589,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
769 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 589 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
770 } 590 }
771 write_unlock_bh(&bond->curr_slave_lock); 591 write_unlock_bh(&bond->curr_slave_lock);
772 read_unlock(&bond->lock);
773} 592}
774 593
775static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 594static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -808,6 +627,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
808static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, 627static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
809 struct slave *old_active) 628 struct slave *old_active)
810{ 629{
630 ASSERT_RTNL();
631
811 if (old_active) { 632 if (old_active) {
812 if (bond->dev->flags & IFF_PROMISC) 633 if (bond->dev->flags & IFF_PROMISC)
813 dev_set_promiscuity(old_active->dev, -1); 634 dev_set_promiscuity(old_active->dev, -1);
@@ -966,9 +787,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
966 new_active = bond->curr_active_slave; 787 new_active = bond->curr_active_slave;
967 788
968 if (!new_active) { /* there were no active slaves left */ 789 if (!new_active) { /* there were no active slaves left */
969 if (bond->slave_cnt > 0) /* found one slave */ 790 new_active = bond_first_slave(bond);
970 new_active = bond->first_slave; 791 if (!new_active)
971 else
972 return NULL; /* still no slave, return NULL */ 792 return NULL; /* still no slave, return NULL */
973 } 793 }
974 794
@@ -1008,7 +828,6 @@ static bool bond_should_notify_peers(struct bonding *bond)
1008 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 828 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1009 return false; 829 return false;
1010 830
1011 bond->send_peer_notif--;
1012 return true; 831 return true;
1013} 832}
1014 833
@@ -1071,7 +890,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1071 if (new_active) 890 if (new_active)
1072 bond_set_slave_active_flags(new_active); 891 bond_set_slave_active_flags(new_active);
1073 } else { 892 } else {
1074 bond->curr_active_slave = new_active; 893 rcu_assign_pointer(bond->curr_active_slave, new_active);
1075 } 894 }
1076 895
1077 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 896 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
@@ -1115,7 +934,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1115 ((USES_PRIMARY(bond->params.mode) && new_active) || 934 ((USES_PRIMARY(bond->params.mode) && new_active) ||
1116 bond->params.mode == BOND_MODE_ROUNDROBIN)) { 935 bond->params.mode == BOND_MODE_ROUNDROBIN)) {
1117 bond->igmp_retrans = bond->params.resend_igmp; 936 bond->igmp_retrans = bond->params.resend_igmp;
1118 queue_delayed_work(bond->wq, &bond->mcast_work, 0); 937 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1119 } 938 }
1120} 939}
1121 940
@@ -1161,17 +980,7 @@ void bond_select_active_slave(struct bonding *bond)
1161 */ 980 */
1162static void bond_attach_slave(struct bonding *bond, struct slave *new_slave) 981static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
1163{ 982{
1164 if (bond->first_slave == NULL) { /* attaching the first slave */ 983 list_add_tail_rcu(&new_slave->list, &bond->slave_list);
1165 new_slave->next = new_slave;
1166 new_slave->prev = new_slave;
1167 bond->first_slave = new_slave;
1168 } else {
1169 new_slave->next = bond->first_slave;
1170 new_slave->prev = bond->first_slave->prev;
1171 new_slave->next->prev = new_slave;
1172 new_slave->prev->next = new_slave;
1173 }
1174
1175 bond->slave_cnt++; 984 bond->slave_cnt++;
1176} 985}
1177 986
@@ -1187,22 +996,7 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
1187 */ 996 */
1188static void bond_detach_slave(struct bonding *bond, struct slave *slave) 997static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1189{ 998{
1190 if (slave->next) 999 list_del_rcu(&slave->list);
1191 slave->next->prev = slave->prev;
1192
1193 if (slave->prev)
1194 slave->prev->next = slave->next;
1195
1196 if (bond->first_slave == slave) { /* slave is the first slave */
1197 if (bond->slave_cnt > 1) { /* there are more slave */
1198 bond->first_slave = slave->next;
1199 } else {
1200 bond->first_slave = NULL; /* slave was the last one */
1201 }
1202 }
1203
1204 slave->next = NULL;
1205 slave->prev = NULL;
1206 bond->slave_cnt--; 1000 bond->slave_cnt--;
1207} 1001}
1208 1002
@@ -1249,47 +1043,31 @@ static void bond_poll_controller(struct net_device *bond_dev)
1249{ 1043{
1250} 1044}
1251 1045
1252static void __bond_netpoll_cleanup(struct bonding *bond) 1046static void bond_netpoll_cleanup(struct net_device *bond_dev)
1253{ 1047{
1048 struct bonding *bond = netdev_priv(bond_dev);
1254 struct slave *slave; 1049 struct slave *slave;
1255 int i;
1256 1050
1257 bond_for_each_slave(bond, slave, i) 1051 bond_for_each_slave(bond, slave)
1258 if (IS_UP(slave->dev)) 1052 if (IS_UP(slave->dev))
1259 slave_disable_netpoll(slave); 1053 slave_disable_netpoll(slave);
1260} 1054}
1261static void bond_netpoll_cleanup(struct net_device *bond_dev)
1262{
1263 struct bonding *bond = netdev_priv(bond_dev);
1264
1265 read_lock(&bond->lock);
1266 __bond_netpoll_cleanup(bond);
1267 read_unlock(&bond->lock);
1268}
1269 1055
1270static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) 1056static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
1271{ 1057{
1272 struct bonding *bond = netdev_priv(dev); 1058 struct bonding *bond = netdev_priv(dev);
1273 struct slave *slave; 1059 struct slave *slave;
1274 int i, err = 0; 1060 int err = 0;
1275 1061
1276 read_lock(&bond->lock); 1062 bond_for_each_slave(bond, slave) {
1277 bond_for_each_slave(bond, slave, i) {
1278 err = slave_enable_netpoll(slave); 1063 err = slave_enable_netpoll(slave);
1279 if (err) { 1064 if (err) {
1280 __bond_netpoll_cleanup(bond); 1065 bond_netpoll_cleanup(dev);
1281 break; 1066 break;
1282 } 1067 }
1283 } 1068 }
1284 read_unlock(&bond->lock);
1285 return err; 1069 return err;
1286} 1070}
1287
1288static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1289{
1290 return bond->dev->npinfo;
1291}
1292
1293#else 1071#else
1294static inline int slave_enable_netpoll(struct slave *slave) 1072static inline int slave_enable_netpoll(struct slave *slave)
1295{ 1073{
@@ -1306,34 +1084,29 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
1306/*---------------------------------- IOCTL ----------------------------------*/ 1084/*---------------------------------- IOCTL ----------------------------------*/
1307 1085
1308static netdev_features_t bond_fix_features(struct net_device *dev, 1086static netdev_features_t bond_fix_features(struct net_device *dev,
1309 netdev_features_t features) 1087 netdev_features_t features)
1310{ 1088{
1311 struct slave *slave;
1312 struct bonding *bond = netdev_priv(dev); 1089 struct bonding *bond = netdev_priv(dev);
1313 netdev_features_t mask; 1090 netdev_features_t mask;
1314 int i; 1091 struct slave *slave;
1315
1316 read_lock(&bond->lock);
1317 1092
1318 if (!bond->first_slave) { 1093 if (list_empty(&bond->slave_list)) {
1319 /* Disable adding VLANs to empty bond. But why? --mq */ 1094 /* Disable adding VLANs to empty bond. But why? --mq */
1320 features |= NETIF_F_VLAN_CHALLENGED; 1095 features |= NETIF_F_VLAN_CHALLENGED;
1321 goto out; 1096 return features;
1322 } 1097 }
1323 1098
1324 mask = features; 1099 mask = features;
1325 features &= ~NETIF_F_ONE_FOR_ALL; 1100 features &= ~NETIF_F_ONE_FOR_ALL;
1326 features |= NETIF_F_ALL_FOR_ALL; 1101 features |= NETIF_F_ALL_FOR_ALL;
1327 1102
1328 bond_for_each_slave(bond, slave, i) { 1103 bond_for_each_slave(bond, slave) {
1329 features = netdev_increment_features(features, 1104 features = netdev_increment_features(features,
1330 slave->dev->features, 1105 slave->dev->features,
1331 mask); 1106 mask);
1332 } 1107 }
1333 features = netdev_add_tso_features(features, mask); 1108 features = netdev_add_tso_features(features, mask);
1334 1109
1335out:
1336 read_unlock(&bond->lock);
1337 return features; 1110 return features;
1338} 1111}
1339 1112
@@ -1343,21 +1116,18 @@ out:
1343 1116
1344static void bond_compute_features(struct bonding *bond) 1117static void bond_compute_features(struct bonding *bond)
1345{ 1118{
1346 struct slave *slave; 1119 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1347 struct net_device *bond_dev = bond->dev;
1348 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1120 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1349 unsigned short max_hard_header_len = ETH_HLEN; 1121 unsigned short max_hard_header_len = ETH_HLEN;
1350 unsigned int gso_max_size = GSO_MAX_SIZE; 1122 unsigned int gso_max_size = GSO_MAX_SIZE;
1123 struct net_device *bond_dev = bond->dev;
1351 u16 gso_max_segs = GSO_MAX_SEGS; 1124 u16 gso_max_segs = GSO_MAX_SEGS;
1352 int i; 1125 struct slave *slave;
1353 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1354
1355 read_lock(&bond->lock);
1356 1126
1357 if (!bond->first_slave) 1127 if (list_empty(&bond->slave_list))
1358 goto done; 1128 goto done;
1359 1129
1360 bond_for_each_slave(bond, slave, i) { 1130 bond_for_each_slave(bond, slave) {
1361 vlan_features = netdev_increment_features(vlan_features, 1131 vlan_features = netdev_increment_features(vlan_features,
1362 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1132 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1363 1133
@@ -1378,8 +1148,6 @@ done:
1378 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; 1148 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
1379 bond_dev->priv_flags = flags | dst_release_flag; 1149 bond_dev->priv_flags = flags | dst_release_flag;
1380 1150
1381 read_unlock(&bond->lock);
1382
1383 netdev_change_features(bond_dev); 1151 netdev_change_features(bond_dev);
1384} 1152}
1385 1153
@@ -1545,7 +1313,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1545 * bond ether type mutual exclusion - don't allow slaves of dissimilar 1313 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1546 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond 1314 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1547 */ 1315 */
1548 if (bond->slave_cnt == 0) { 1316 if (list_empty(&bond->slave_list)) {
1549 if (bond_dev->type != slave_dev->type) { 1317 if (bond_dev->type != slave_dev->type) {
1550 pr_debug("%s: change device type from %d to %d\n", 1318 pr_debug("%s: change device type from %d to %d\n",
1551 bond_dev->name, 1319 bond_dev->name,
@@ -1584,7 +1352,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1584 } 1352 }
1585 1353
1586 if (slave_ops->ndo_set_mac_address == NULL) { 1354 if (slave_ops->ndo_set_mac_address == NULL) {
1587 if (bond->slave_cnt == 0) { 1355 if (list_empty(&bond->slave_list)) {
1588 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.", 1356 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
1589 bond_dev->name); 1357 bond_dev->name);
1590 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1358 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1600,7 +1368,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1600 1368
1601 /* If this is the first slave, then we need to set the master's hardware 1369 /* If this is the first slave, then we need to set the master's hardware
1602 * address to be the same as the slave's. */ 1370 * address to be the same as the slave's. */
1603 if (!bond->slave_cnt && bond->dev->addr_assign_type == NET_ADDR_RANDOM) 1371 if (list_empty(&bond->slave_list) &&
1372 bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1604 bond_set_dev_addr(bond->dev, slave_dev); 1373 bond_set_dev_addr(bond->dev, slave_dev);
1605 1374
1606 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1375 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1608,7 +1377,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1608 res = -ENOMEM; 1377 res = -ENOMEM;
1609 goto err_undo_flags; 1378 goto err_undo_flags;
1610 } 1379 }
1611 1380 INIT_LIST_HEAD(&new_slave->list);
1612 /* 1381 /*
1613 * Set the new_slave's queue_id to be zero. Queue ID mapping 1382 * Set the new_slave's queue_id to be zero. Queue ID mapping
1614 * is set via sysfs or module option if desired. 1383 * is set via sysfs or module option if desired.
@@ -1703,7 +1472,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1703 dev_mc_add(slave_dev, lacpdu_multicast); 1472 dev_mc_add(slave_dev, lacpdu_multicast);
1704 } 1473 }
1705 1474
1706 bond_add_vlans_on_slave(bond, slave_dev); 1475 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1476 if (res) {
1477 pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
1478 bond_dev->name, slave_dev->name);
1479 goto err_close;
1480 }
1707 1481
1708 write_lock_bh(&bond->lock); 1482 write_lock_bh(&bond->lock);
1709 1483
@@ -1794,15 +1568,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1794 */ 1568 */
1795 bond_set_slave_inactive_flags(new_slave); 1569 bond_set_slave_inactive_flags(new_slave);
1796 /* if this is the first slave */ 1570 /* if this is the first slave */
1797 if (bond->slave_cnt == 1) { 1571 if (bond_first_slave(bond) == new_slave) {
1798 SLAVE_AD_INFO(new_slave).id = 1; 1572 SLAVE_AD_INFO(new_slave).id = 1;
1799 /* Initialize AD with the number of times that the AD timer is called in 1 second 1573 /* Initialize AD with the number of times that the AD timer is called in 1 second
1800 * can be called only after the mac address of the bond is set 1574 * can be called only after the mac address of the bond is set
1801 */ 1575 */
1802 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); 1576 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1803 } else { 1577 } else {
1578 struct slave *prev_slave;
1579
1580 prev_slave = bond_prev_slave(bond, new_slave);
1804 SLAVE_AD_INFO(new_slave).id = 1581 SLAVE_AD_INFO(new_slave).id =
1805 SLAVE_AD_INFO(new_slave->prev).id + 1; 1582 SLAVE_AD_INFO(prev_slave).id + 1;
1806 } 1583 }
1807 1584
1808 bond_3ad_bind_slave(new_slave); 1585 bond_3ad_bind_slave(new_slave);
@@ -1824,7 +1601,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1824 * so we can change it without calling change_active_interface() 1601 * so we can change it without calling change_active_interface()
1825 */ 1602 */
1826 if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP) 1603 if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
1827 bond->curr_active_slave = new_slave; 1604 rcu_assign_pointer(bond->curr_active_slave, new_slave);
1828 1605
1829 break; 1606 break;
1830 } /* switch(bond_mode) */ 1607 } /* switch(bond_mode) */
@@ -1834,7 +1611,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1834 bond_set_carrier(bond); 1611 bond_set_carrier(bond);
1835 1612
1836#ifdef CONFIG_NET_POLL_CONTROLLER 1613#ifdef CONFIG_NET_POLL_CONTROLLER
1837 slave_dev->npinfo = bond_netpoll_info(bond); 1614 slave_dev->npinfo = bond->dev->npinfo;
1838 if (slave_dev->npinfo) { 1615 if (slave_dev->npinfo) {
1839 if (slave_enable_netpoll(new_slave)) { 1616 if (slave_enable_netpoll(new_slave)) {
1840 read_unlock(&bond->lock); 1617 read_unlock(&bond->lock);
@@ -1876,7 +1653,7 @@ err_detach:
1876 if (!USES_PRIMARY(bond->params.mode)) 1653 if (!USES_PRIMARY(bond->params.mode))
1877 bond_hw_addr_flush(bond_dev, slave_dev); 1654 bond_hw_addr_flush(bond_dev, slave_dev);
1878 1655
1879 bond_del_vlans_from_slave(bond, slave_dev); 1656 vlan_vids_del_by_dev(slave_dev, bond_dev);
1880 write_lock_bh(&bond->lock); 1657 write_lock_bh(&bond->lock);
1881 bond_detach_slave(bond, new_slave); 1658 bond_detach_slave(bond, new_slave);
1882 if (bond->primary_slave == new_slave) 1659 if (bond->primary_slave == new_slave)
@@ -1921,7 +1698,7 @@ err_free:
1921err_undo_flags: 1698err_undo_flags:
1922 bond_compute_features(bond); 1699 bond_compute_features(bond);
1923 /* Enslave of first slave has failed and we need to fix master's mac */ 1700 /* Enslave of first slave has failed and we need to fix master's mac */
1924 if (bond->slave_cnt == 0 && 1701 if (list_empty(&bond->slave_list) &&
1925 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) 1702 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
1926 eth_hw_addr_random(bond_dev); 1703 eth_hw_addr_random(bond_dev);
1927 1704
@@ -1977,15 +1754,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1977 netdev_rx_handler_unregister(slave_dev); 1754 netdev_rx_handler_unregister(slave_dev);
1978 write_lock_bh(&bond->lock); 1755 write_lock_bh(&bond->lock);
1979 1756
1980 if (!all && !bond->params.fail_over_mac) {
1981 if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
1982 bond->slave_cnt > 1)
1983 pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
1984 bond_dev->name, slave_dev->name,
1985 slave->perm_hwaddr,
1986 bond_dev->name, slave_dev->name);
1987 }
1988
1989 /* Inform AD package of unbinding of slave. */ 1757 /* Inform AD package of unbinding of slave. */
1990 if (bond->params.mode == BOND_MODE_8023AD) { 1758 if (bond->params.mode == BOND_MODE_8023AD) {
1991 /* must be called before the slave is 1759 /* must be called before the slave is
@@ -2006,6 +1774,15 @@ static int __bond_release_one(struct net_device *bond_dev,
2006 /* release the slave from its bond */ 1774 /* release the slave from its bond */
2007 bond_detach_slave(bond, slave); 1775 bond_detach_slave(bond, slave);
2008 1776
1777 if (!all && !bond->params.fail_over_mac) {
1778 if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
1779 !list_empty(&bond->slave_list))
1780 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
1781 bond_dev->name, slave_dev->name,
1782 slave->perm_hwaddr,
1783 bond_dev->name, slave_dev->name);
1784 }
1785
2009 if (bond->primary_slave == slave) 1786 if (bond->primary_slave == slave)
2010 bond->primary_slave = NULL; 1787 bond->primary_slave = NULL;
2011 1788
@@ -2024,7 +1801,7 @@ static int __bond_release_one(struct net_device *bond_dev,
2024 } 1801 }
2025 1802
2026 if (all) { 1803 if (all) {
2027 bond->curr_active_slave = NULL; 1804 rcu_assign_pointer(bond->curr_active_slave, NULL);
2028 } else if (oldcurrent == slave) { 1805 } else if (oldcurrent == slave) {
2029 /* 1806 /*
2030 * Note that we hold RTNL over this sequence, so there 1807 * Note that we hold RTNL over this sequence, so there
@@ -2042,11 +1819,11 @@ static int __bond_release_one(struct net_device *bond_dev,
2042 write_lock_bh(&bond->lock); 1819 write_lock_bh(&bond->lock);
2043 } 1820 }
2044 1821
2045 if (bond->slave_cnt == 0) { 1822 if (list_empty(&bond->slave_list)) {
2046 bond_set_carrier(bond); 1823 bond_set_carrier(bond);
2047 eth_hw_addr_random(bond_dev); 1824 eth_hw_addr_random(bond_dev);
2048 1825
2049 if (bond_vlan_used(bond)) { 1826 if (vlan_uses_dev(bond_dev)) {
2050 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 1827 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2051 bond_dev->name, bond_dev->name); 1828 bond_dev->name, bond_dev->name);
2052 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 1829 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2056,8 +1833,9 @@ static int __bond_release_one(struct net_device *bond_dev,
2056 1833
2057 write_unlock_bh(&bond->lock); 1834 write_unlock_bh(&bond->lock);
2058 unblock_netpoll_tx(); 1835 unblock_netpoll_tx();
1836 synchronize_rcu();
2059 1837
2060 if (bond->slave_cnt == 0) { 1838 if (list_empty(&bond->slave_list)) {
2061 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 1839 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2062 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); 1840 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2063 } 1841 }
@@ -2071,7 +1849,7 @@ static int __bond_release_one(struct net_device *bond_dev,
2071 /* must do this from outside any spinlocks */ 1849 /* must do this from outside any spinlocks */
2072 bond_destroy_slave_symlinks(bond_dev, slave_dev); 1850 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2073 1851
2074 bond_del_vlans_from_slave(bond, slave_dev); 1852 vlan_vids_del_by_dev(slave_dev, bond_dev);
2075 1853
2076 /* If the mode USES_PRIMARY, then this cases was handled above by 1854 /* If the mode USES_PRIMARY, then this cases was handled above by
2077 * bond_change_active_slave(..., NULL) 1855 * bond_change_active_slave(..., NULL)
@@ -2128,7 +1906,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
2128 int ret; 1906 int ret;
2129 1907
2130 ret = bond_release(bond_dev, slave_dev); 1908 ret = bond_release(bond_dev, slave_dev);
2131 if ((ret == 0) && (bond->slave_cnt == 0)) { 1909 if (ret == 0 && list_empty(&bond->slave_list)) {
2132 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1910 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2133 pr_info("%s: destroying bond %s.\n", 1911 pr_info("%s: destroying bond %s.\n",
2134 bond_dev->name, bond_dev->name); 1912 bond_dev->name, bond_dev->name);
@@ -2165,23 +1943,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2165 1943
2166 read_lock(&bond->lock); 1944 read_lock(&bond->lock);
2167 1945
2168 read_lock(&bond->curr_slave_lock);
2169 old_active = bond->curr_active_slave; 1946 old_active = bond->curr_active_slave;
2170 read_unlock(&bond->curr_slave_lock);
2171
2172 new_active = bond_get_slave_by_dev(bond, slave_dev); 1947 new_active = bond_get_slave_by_dev(bond, slave_dev);
2173
2174 /* 1948 /*
2175 * Changing to the current active: do nothing; return success. 1949 * Changing to the current active: do nothing; return success.
2176 */ 1950 */
2177 if (new_active && (new_active == old_active)) { 1951 if (new_active && new_active == old_active) {
2178 read_unlock(&bond->lock); 1952 read_unlock(&bond->lock);
2179 return 0; 1953 return 0;
2180 } 1954 }
2181 1955
2182 if ((new_active) && 1956 if (new_active &&
2183 (old_active) && 1957 old_active &&
2184 (new_active->link == BOND_LINK_UP) && 1958 new_active->link == BOND_LINK_UP &&
2185 IS_UP(new_active->dev)) { 1959 IS_UP(new_active->dev)) {
2186 block_netpoll_tx(); 1960 block_netpoll_tx();
2187 write_lock_bh(&bond->curr_slave_lock); 1961 write_lock_bh(&bond->curr_slave_lock);
@@ -2213,13 +1987,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2213static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) 1987static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2214{ 1988{
2215 struct bonding *bond = netdev_priv(bond_dev); 1989 struct bonding *bond = netdev_priv(bond_dev);
1990 int i = 0, res = -ENODEV;
2216 struct slave *slave; 1991 struct slave *slave;
2217 int i, res = -ENODEV;
2218 1992
2219 read_lock(&bond->lock); 1993 read_lock(&bond->lock);
2220 1994 bond_for_each_slave(bond, slave) {
2221 bond_for_each_slave(bond, slave, i) { 1995 if (i++ == (int)info->slave_id) {
2222 if (i == (int)info->slave_id) {
2223 res = 0; 1996 res = 0;
2224 strcpy(info->slave_name, slave->dev->name); 1997 strcpy(info->slave_name, slave->dev->name);
2225 info->link = slave->link; 1998 info->link = slave->link;
@@ -2228,7 +2001,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2228 break; 2001 break;
2229 } 2002 }
2230 } 2003 }
2231
2232 read_unlock(&bond->lock); 2004 read_unlock(&bond->lock);
2233 2005
2234 return res; 2006 return res;
@@ -2239,13 +2011,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2239 2011
2240static int bond_miimon_inspect(struct bonding *bond) 2012static int bond_miimon_inspect(struct bonding *bond)
2241{ 2013{
2014 int link_state, commit = 0;
2242 struct slave *slave; 2015 struct slave *slave;
2243 int i, link_state, commit = 0;
2244 bool ignore_updelay; 2016 bool ignore_updelay;
2245 2017
2246 ignore_updelay = !bond->curr_active_slave ? true : false; 2018 ignore_updelay = !bond->curr_active_slave ? true : false;
2247 2019
2248 bond_for_each_slave(bond, slave, i) { 2020 bond_for_each_slave(bond, slave) {
2249 slave->new_link = BOND_LINK_NOCHANGE; 2021 slave->new_link = BOND_LINK_NOCHANGE;
2250 2022
2251 link_state = bond_check_dev_link(bond, slave->dev, 0); 2023 link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2340,9 +2112,8 @@ static int bond_miimon_inspect(struct bonding *bond)
2340static void bond_miimon_commit(struct bonding *bond) 2112static void bond_miimon_commit(struct bonding *bond)
2341{ 2113{
2342 struct slave *slave; 2114 struct slave *slave;
2343 int i;
2344 2115
2345 bond_for_each_slave(bond, slave, i) { 2116 bond_for_each_slave(bond, slave) {
2346 switch (slave->new_link) { 2117 switch (slave->new_link) {
2347 case BOND_LINK_NOCHANGE: 2118 case BOND_LINK_NOCHANGE:
2348 continue; 2119 continue;
@@ -2447,7 +2218,7 @@ void bond_mii_monitor(struct work_struct *work)
2447 2218
2448 delay = msecs_to_jiffies(bond->params.miimon); 2219 delay = msecs_to_jiffies(bond->params.miimon);
2449 2220
2450 if (bond->slave_cnt == 0) 2221 if (list_empty(&bond->slave_list))
2451 goto re_arm; 2222 goto re_arm;
2452 2223
2453 should_notify_peers = bond_should_notify_peers(bond); 2224 should_notify_peers = bond_should_notify_peers(bond);
@@ -2479,35 +2250,32 @@ re_arm:
2479 read_unlock(&bond->lock); 2250 read_unlock(&bond->lock);
2480 2251
2481 if (should_notify_peers) { 2252 if (should_notify_peers) {
2482 if (!rtnl_trylock()) { 2253 if (!rtnl_trylock())
2483 read_lock(&bond->lock);
2484 bond->send_peer_notif++;
2485 read_unlock(&bond->lock);
2486 return; 2254 return;
2487 }
2488 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2255 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2489 rtnl_unlock(); 2256 rtnl_unlock();
2490 } 2257 }
2491} 2258}
2492 2259
2493static int bond_has_this_ip(struct bonding *bond, __be32 ip) 2260static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2494{ 2261{
2495 struct vlan_entry *vlan; 2262 struct net_device *upper;
2496 struct net_device *vlan_dev; 2263 struct list_head *iter;
2264 bool ret = false;
2497 2265
2498 if (ip == bond_confirm_addr(bond->dev, 0, ip)) 2266 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2499 return 1; 2267 return true;
2500 2268
2501 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2269 rcu_read_lock();
2502 rcu_read_lock(); 2270 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
2503 vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q), 2271 if (ip == bond_confirm_addr(upper, 0, ip)) {
2504 vlan->vlan_id); 2272 ret = true;
2505 rcu_read_unlock(); 2273 break;
2506 if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip)) 2274 }
2507 return 1;
2508 } 2275 }
2276 rcu_read_unlock();
2509 2277
2510 return 0; 2278 return ret;
2511} 2279}
2512 2280
2513/* 2281/*
@@ -2542,81 +2310,79 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2542 2310
2543static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2311static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2544{ 2312{
2545 int i, vlan_id; 2313 struct net_device *upper, *vlan_upper;
2546 __be32 *targets = bond->params.arp_targets; 2314 struct list_head *iter, *vlan_iter;
2547 struct vlan_entry *vlan;
2548 struct net_device *vlan_dev = NULL;
2549 struct rtable *rt; 2315 struct rtable *rt;
2316 __be32 *targets = bond->params.arp_targets, addr;
2317 int i, vlan_id;
2550 2318
2551 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 2319 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2552 __be32 addr;
2553 if (!targets[i])
2554 break;
2555 pr_debug("basa: target %pI4\n", &targets[i]); 2320 pr_debug("basa: target %pI4\n", &targets[i]);
2556 if (!bond_vlan_used(bond)) {
2557 pr_debug("basa: empty vlan: arp_send\n");
2558 addr = bond_confirm_addr(bond->dev, targets[i], 0);
2559 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2560 addr, 0);
2561 continue;
2562 }
2563 2321
2564 /* 2322 /* Find out through which dev should the packet go */
2565 * If VLANs are configured, we do a route lookup to
2566 * determine which VLAN interface would be used, so we
2567 * can tag the ARP with the proper VLAN tag.
2568 */
2569 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2323 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2570 RTO_ONLINK, 0); 2324 RTO_ONLINK, 0);
2571 if (IS_ERR(rt)) { 2325 if (IS_ERR(rt)) {
2572 if (net_ratelimit()) { 2326 pr_debug("%s: no route to arp_ip_target %pI4\n",
2573 pr_warning("%s: no route to arp_ip_target %pI4\n", 2327 bond->dev->name, &targets[i]);
2574 bond->dev->name, &targets[i]);
2575 }
2576 continue; 2328 continue;
2577 } 2329 }
2578 2330
2579 /* 2331 vlan_id = 0;
2580 * This target is not on a VLAN 2332
2333 /* bond device itself */
2334 if (rt->dst.dev == bond->dev)
2335 goto found;
2336
2337 rcu_read_lock();
2338 /* first we search only for vlan devices. for every vlan
2339 * found we verify its upper dev list, searching for the
2340 * rt->dst.dev. If found we save the tag of the vlan and
2341 * proceed to send the packet.
2342 *
2343 * TODO: QinQ?
2581 */ 2344 */
2582 if (rt->dst.dev == bond->dev) { 2345 netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
2583 ip_rt_put(rt); 2346 if (!is_vlan_dev(vlan_upper))
2584 pr_debug("basa: rtdev == bond->dev: arp_send\n"); 2347 continue;
2585 addr = bond_confirm_addr(bond->dev, targets[i], 0); 2348 netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
2586 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2349 if (upper == rt->dst.dev) {
2587 addr, 0); 2350 vlan_id = vlan_dev_vlan_id(vlan_upper);
2588 continue; 2351 rcu_read_unlock();
2352 goto found;
2353 }
2354 }
2589 } 2355 }
2590 2356
2591 vlan_id = 0; 2357 /* if the device we're looking for is not on top of any of
2592 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2358 * our upper vlans, then just search for any dev that
2593 rcu_read_lock(); 2359 * matches, and in case it's a vlan - save the id
2594 vlan_dev = __vlan_find_dev_deep(bond->dev, 2360 */
2595 htons(ETH_P_8021Q), 2361 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
2596 vlan->vlan_id); 2362 if (upper == rt->dst.dev) {
2597 rcu_read_unlock(); 2363 /* if it's a vlan - get its VID */
2598 if (vlan_dev == rt->dst.dev) { 2364 if (is_vlan_dev(upper))
2599 vlan_id = vlan->vlan_id; 2365 vlan_id = vlan_dev_vlan_id(upper);
2600 pr_debug("basa: vlan match on %s %d\n", 2366
2601 vlan_dev->name, vlan_id); 2367 rcu_read_unlock();
2602 break; 2368 goto found;
2603 } 2369 }
2604 } 2370 }
2371 rcu_read_unlock();
2605 2372
2606 if (vlan_id && vlan_dev) { 2373 /* Not our device - skip */
2607 ip_rt_put(rt); 2374 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2608 addr = bond_confirm_addr(vlan_dev, targets[i], 0); 2375 bond->dev->name, &targets[i],
2609 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2376 rt->dst.dev ? rt->dst.dev->name : "NULL");
2610 addr, vlan_id);
2611 continue;
2612 }
2613 2377
2614 if (net_ratelimit()) {
2615 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2616 bond->dev->name, &targets[i],
2617 rt->dst.dev ? rt->dst.dev->name : "NULL");
2618 }
2619 ip_rt_put(rt); 2378 ip_rt_put(rt);
2379 continue;
2380
2381found:
2382 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2383 ip_rt_put(rt);
2384 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2385 addr, vlan_id);
2620 } 2386 }
2621} 2387}
2622 2388
@@ -2713,6 +2479,20 @@ out_unlock:
2713 return RX_HANDLER_ANOTHER; 2479 return RX_HANDLER_ANOTHER;
2714} 2480}
2715 2481
2482/* function to verify if we're in the arp_interval timeslice, returns true if
2483 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
2484 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
2485 */
2486static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
2487 int mod)
2488{
2489 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2490
2491 return time_in_range(jiffies,
2492 last_act - delta_in_ticks,
2493 last_act + mod * delta_in_ticks + delta_in_ticks/2);
2494}
2495
2716/* 2496/*
2717 * this function is called regularly to monitor each slave's link 2497 * this function is called regularly to monitor each slave's link
2718 * ensuring that traffic is being sent and received when arp monitoring 2498 * ensuring that traffic is being sent and received when arp monitoring
@@ -2726,21 +2506,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2726 arp_work.work); 2506 arp_work.work);
2727 struct slave *slave, *oldcurrent; 2507 struct slave *slave, *oldcurrent;
2728 int do_failover = 0; 2508 int do_failover = 0;
2729 int delta_in_ticks, extra_ticks;
2730 int i;
2731 2509
2732 read_lock(&bond->lock); 2510 read_lock(&bond->lock);
2733 2511
2734 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2512 if (list_empty(&bond->slave_list))
2735 extra_ticks = delta_in_ticks / 2;
2736
2737 if (bond->slave_cnt == 0)
2738 goto re_arm; 2513 goto re_arm;
2739 2514
2740 read_lock(&bond->curr_slave_lock);
2741 oldcurrent = bond->curr_active_slave; 2515 oldcurrent = bond->curr_active_slave;
2742 read_unlock(&bond->curr_slave_lock);
2743
2744 /* see if any of the previous devices are up now (i.e. they have 2516 /* see if any of the previous devices are up now (i.e. they have
2745 * xmt and rcv traffic). the curr_active_slave does not come into 2517 * xmt and rcv traffic). the curr_active_slave does not come into
2746 * the picture unless it is null. also, slave->jiffies is not needed 2518 * the picture unless it is null. also, slave->jiffies is not needed
@@ -2749,16 +2521,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2749 * TODO: what about up/down delay in arp mode? it wasn't here before 2521 * TODO: what about up/down delay in arp mode? it wasn't here before
2750 * so it can wait 2522 * so it can wait
2751 */ 2523 */
2752 bond_for_each_slave(bond, slave, i) { 2524 bond_for_each_slave(bond, slave) {
2753 unsigned long trans_start = dev_trans_start(slave->dev); 2525 unsigned long trans_start = dev_trans_start(slave->dev);
2754 2526
2755 if (slave->link != BOND_LINK_UP) { 2527 if (slave->link != BOND_LINK_UP) {
2756 if (time_in_range(jiffies, 2528 if (bond_time_in_interval(bond, trans_start, 1) &&
2757 trans_start - delta_in_ticks, 2529 bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
2758 trans_start + delta_in_ticks + extra_ticks) &&
2759 time_in_range(jiffies,
2760 slave->dev->last_rx - delta_in_ticks,
2761 slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
2762 2530
2763 slave->link = BOND_LINK_UP; 2531 slave->link = BOND_LINK_UP;
2764 bond_set_active_slave(slave); 2532 bond_set_active_slave(slave);
@@ -2786,12 +2554,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2786 * when the source ip is 0, so don't take the link down 2554 * when the source ip is 0, so don't take the link down
2787 * if we don't know our ip yet 2555 * if we don't know our ip yet
2788 */ 2556 */
2789 if (!time_in_range(jiffies, 2557 if (!bond_time_in_interval(bond, trans_start, 2) ||
2790 trans_start - delta_in_ticks, 2558 !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
2791 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2792 !time_in_range(jiffies,
2793 slave->dev->last_rx - delta_in_ticks,
2794 slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
2795 2559
2796 slave->link = BOND_LINK_DOWN; 2560 slave->link = BOND_LINK_DOWN;
2797 bond_set_backup_slave(slave); 2561 bond_set_backup_slave(slave);
@@ -2831,7 +2595,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2831 2595
2832re_arm: 2596re_arm:
2833 if (bond->params.arp_interval) 2597 if (bond->params.arp_interval)
2834 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2598 queue_delayed_work(bond->wq, &bond->arp_work,
2599 msecs_to_jiffies(bond->params.arp_interval));
2835 2600
2836 read_unlock(&bond->lock); 2601 read_unlock(&bond->lock);
2837} 2602}
@@ -2844,32 +2609,21 @@ re_arm:
2844 * 2609 *
2845 * Called with bond->lock held for read. 2610 * Called with bond->lock held for read.
2846 */ 2611 */
2847static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) 2612static int bond_ab_arp_inspect(struct bonding *bond)
2848{ 2613{
2614 unsigned long trans_start, last_rx;
2849 struct slave *slave; 2615 struct slave *slave;
2850 int i, commit = 0; 2616 int commit = 0;
2851 unsigned long trans_start;
2852 int extra_ticks;
2853
2854 /* All the time comparisons below need some extra time. Otherwise, on
2855 * fast networks the ARP probe/reply may arrive within the same jiffy
2856 * as it was sent. Then, the next time the ARP monitor is run, one
2857 * arp_interval will already have passed in the comparisons.
2858 */
2859 extra_ticks = delta_in_ticks / 2;
2860 2617
2861 bond_for_each_slave(bond, slave, i) { 2618 bond_for_each_slave(bond, slave) {
2862 slave->new_link = BOND_LINK_NOCHANGE; 2619 slave->new_link = BOND_LINK_NOCHANGE;
2620 last_rx = slave_last_rx(bond, slave);
2863 2621
2864 if (slave->link != BOND_LINK_UP) { 2622 if (slave->link != BOND_LINK_UP) {
2865 if (time_in_range(jiffies, 2623 if (bond_time_in_interval(bond, last_rx, 1)) {
2866 slave_last_rx(bond, slave) - delta_in_ticks,
2867 slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
2868
2869 slave->new_link = BOND_LINK_UP; 2624 slave->new_link = BOND_LINK_UP;
2870 commit++; 2625 commit++;
2871 } 2626 }
2872
2873 continue; 2627 continue;
2874 } 2628 }
2875 2629
@@ -2878,9 +2632,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2878 * active. This avoids bouncing, as the last receive 2632 * active. This avoids bouncing, as the last receive
2879 * times need a full ARP monitor cycle to be updated. 2633 * times need a full ARP monitor cycle to be updated.
2880 */ 2634 */
2881 if (time_in_range(jiffies, 2635 if (bond_time_in_interval(bond, slave->jiffies, 2))
2882 slave->jiffies - delta_in_ticks,
2883 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
2884 continue; 2636 continue;
2885 2637
2886 /* 2638 /*
@@ -2898,10 +2650,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2898 */ 2650 */
2899 if (!bond_is_active_slave(slave) && 2651 if (!bond_is_active_slave(slave) &&
2900 !bond->current_arp_slave && 2652 !bond->current_arp_slave &&
2901 !time_in_range(jiffies, 2653 !bond_time_in_interval(bond, last_rx, 3)) {
2902 slave_last_rx(bond, slave) - delta_in_ticks,
2903 slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
2904
2905 slave->new_link = BOND_LINK_DOWN; 2654 slave->new_link = BOND_LINK_DOWN;
2906 commit++; 2655 commit++;
2907 } 2656 }
@@ -2914,13 +2663,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2914 */ 2663 */
2915 trans_start = dev_trans_start(slave->dev); 2664 trans_start = dev_trans_start(slave->dev);
2916 if (bond_is_active_slave(slave) && 2665 if (bond_is_active_slave(slave) &&
2917 (!time_in_range(jiffies, 2666 (!bond_time_in_interval(bond, trans_start, 2) ||
2918 trans_start - delta_in_ticks, 2667 !bond_time_in_interval(bond, last_rx, 2))) {
2919 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2920 !time_in_range(jiffies,
2921 slave_last_rx(bond, slave) - delta_in_ticks,
2922 slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
2923
2924 slave->new_link = BOND_LINK_DOWN; 2668 slave->new_link = BOND_LINK_DOWN;
2925 commit++; 2669 commit++;
2926 } 2670 }
@@ -2935,24 +2679,21 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2935 * 2679 *
2936 * Called with RTNL and bond->lock for read. 2680 * Called with RTNL and bond->lock for read.
2937 */ 2681 */
2938static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) 2682static void bond_ab_arp_commit(struct bonding *bond)
2939{ 2683{
2940 struct slave *slave;
2941 int i;
2942 unsigned long trans_start; 2684 unsigned long trans_start;
2685 struct slave *slave;
2943 2686
2944 bond_for_each_slave(bond, slave, i) { 2687 bond_for_each_slave(bond, slave) {
2945 switch (slave->new_link) { 2688 switch (slave->new_link) {
2946 case BOND_LINK_NOCHANGE: 2689 case BOND_LINK_NOCHANGE:
2947 continue; 2690 continue;
2948 2691
2949 case BOND_LINK_UP: 2692 case BOND_LINK_UP:
2950 trans_start = dev_trans_start(slave->dev); 2693 trans_start = dev_trans_start(slave->dev);
2951 if ((!bond->curr_active_slave && 2694 if (bond->curr_active_slave != slave ||
2952 time_in_range(jiffies, 2695 (!bond->curr_active_slave &&
2953 trans_start - delta_in_ticks, 2696 bond_time_in_interval(bond, trans_start, 1))) {
2954 trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
2955 bond->curr_active_slave != slave) {
2956 slave->link = BOND_LINK_UP; 2697 slave->link = BOND_LINK_UP;
2957 if (bond->current_arp_slave) { 2698 if (bond->current_arp_slave) {
2958 bond_set_slave_inactive_flags( 2699 bond_set_slave_inactive_flags(
@@ -3014,7 +2755,7 @@ do_failover:
3014 */ 2755 */
3015static void bond_ab_arp_probe(struct bonding *bond) 2756static void bond_ab_arp_probe(struct bonding *bond)
3016{ 2757{
3017 struct slave *slave; 2758 struct slave *slave, *next_slave;
3018 int i; 2759 int i;
3019 2760
3020 read_lock(&bond->curr_slave_lock); 2761 read_lock(&bond->curr_slave_lock);
@@ -3038,7 +2779,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
3038 */ 2779 */
3039 2780
3040 if (!bond->current_arp_slave) { 2781 if (!bond->current_arp_slave) {
3041 bond->current_arp_slave = bond->first_slave; 2782 bond->current_arp_slave = bond_first_slave(bond);
3042 if (!bond->current_arp_slave) 2783 if (!bond->current_arp_slave)
3043 return; 2784 return;
3044 } 2785 }
@@ -3046,7 +2787,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
3046 bond_set_slave_inactive_flags(bond->current_arp_slave); 2787 bond_set_slave_inactive_flags(bond->current_arp_slave);
3047 2788
3048 /* search for next candidate */ 2789 /* search for next candidate */
3049 bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) { 2790 next_slave = bond_next_slave(bond, bond->current_arp_slave);
2791 bond_for_each_slave_from(bond, slave, i, next_slave) {
3050 if (IS_UP(slave->dev)) { 2792 if (IS_UP(slave->dev)) {
3051 slave->link = BOND_LINK_BACK; 2793 slave->link = BOND_LINK_BACK;
3052 bond_set_slave_active_flags(slave); 2794 bond_set_slave_active_flags(slave);
@@ -3087,12 +2829,12 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3087 2829
3088 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2830 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3089 2831
3090 if (bond->slave_cnt == 0) 2832 if (list_empty(&bond->slave_list))
3091 goto re_arm; 2833 goto re_arm;
3092 2834
3093 should_notify_peers = bond_should_notify_peers(bond); 2835 should_notify_peers = bond_should_notify_peers(bond);
3094 2836
3095 if (bond_ab_arp_inspect(bond, delta_in_ticks)) { 2837 if (bond_ab_arp_inspect(bond)) {
3096 read_unlock(&bond->lock); 2838 read_unlock(&bond->lock);
3097 2839
3098 /* Race avoidance with bond_close flush of workqueue */ 2840 /* Race avoidance with bond_close flush of workqueue */
@@ -3105,7 +2847,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3105 2847
3106 read_lock(&bond->lock); 2848 read_lock(&bond->lock);
3107 2849
3108 bond_ab_arp_commit(bond, delta_in_ticks); 2850 bond_ab_arp_commit(bond);
3109 2851
3110 read_unlock(&bond->lock); 2852 read_unlock(&bond->lock);
3111 rtnl_unlock(); 2853 rtnl_unlock();
@@ -3121,12 +2863,8 @@ re_arm:
3121 read_unlock(&bond->lock); 2863 read_unlock(&bond->lock);
3122 2864
3123 if (should_notify_peers) { 2865 if (should_notify_peers) {
3124 if (!rtnl_trylock()) { 2866 if (!rtnl_trylock())
3125 read_lock(&bond->lock);
3126 bond->send_peer_notif++;
3127 read_unlock(&bond->lock);
3128 return; 2867 return;
3129 }
3130 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2868 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
3131 rtnl_unlock(); 2869 rtnl_unlock();
3132 } 2870 }
@@ -3161,6 +2899,10 @@ static int bond_master_netdev_event(unsigned long event,
3161 case NETDEV_REGISTER: 2899 case NETDEV_REGISTER:
3162 bond_create_proc_entry(event_bond); 2900 bond_create_proc_entry(event_bond);
3163 break; 2901 break;
2902 case NETDEV_NOTIFY_PEERS:
2903 if (event_bond->send_peer_notif)
2904 event_bond->send_peer_notif--;
2905 break;
3164 default: 2906 default:
3165 break; 2907 break;
3166 } 2908 }
@@ -3234,6 +2976,10 @@ static int bond_slave_netdev_event(unsigned long event,
3234 case NETDEV_FEAT_CHANGE: 2976 case NETDEV_FEAT_CHANGE:
3235 bond_compute_features(bond); 2977 bond_compute_features(bond);
3236 break; 2978 break;
2979 case NETDEV_RESEND_IGMP:
2980 /* Propagate to master device */
2981 call_netdevice_notifiers(event, slave->bond->dev);
2982 break;
3237 default: 2983 default:
3238 break; 2984 break;
3239 } 2985 }
@@ -3403,13 +3149,12 @@ static int bond_open(struct net_device *bond_dev)
3403{ 3149{
3404 struct bonding *bond = netdev_priv(bond_dev); 3150 struct bonding *bond = netdev_priv(bond_dev);
3405 struct slave *slave; 3151 struct slave *slave;
3406 int i;
3407 3152
3408 /* reset slave->backup and slave->inactive */ 3153 /* reset slave->backup and slave->inactive */
3409 read_lock(&bond->lock); 3154 read_lock(&bond->lock);
3410 if (bond->slave_cnt > 0) { 3155 if (!list_empty(&bond->slave_list)) {
3411 read_lock(&bond->curr_slave_lock); 3156 read_lock(&bond->curr_slave_lock);
3412 bond_for_each_slave(bond, slave, i) { 3157 bond_for_each_slave(bond, slave) {
3413 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3158 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3414 && (slave != bond->curr_active_slave)) { 3159 && (slave != bond->curr_active_slave)) {
3415 bond_set_slave_inactive_flags(slave); 3160 bond_set_slave_inactive_flags(slave);
@@ -3455,17 +3200,10 @@ static int bond_close(struct net_device *bond_dev)
3455{ 3200{
3456 struct bonding *bond = netdev_priv(bond_dev); 3201 struct bonding *bond = netdev_priv(bond_dev);
3457 3202
3458 write_lock_bh(&bond->lock);
3459 bond->send_peer_notif = 0;
3460 write_unlock_bh(&bond->lock);
3461
3462 bond_work_cancel_all(bond); 3203 bond_work_cancel_all(bond);
3463 if (bond_is_lb(bond)) { 3204 bond->send_peer_notif = 0;
3464 /* Must be called only after all 3205 if (bond_is_lb(bond))
3465 * slaves have been released
3466 */
3467 bond_alb_deinitialize(bond); 3206 bond_alb_deinitialize(bond);
3468 }
3469 bond->recv_probe = NULL; 3207 bond->recv_probe = NULL;
3470 3208
3471 return 0; 3209 return 0;
@@ -3477,13 +3215,11 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3477 struct bonding *bond = netdev_priv(bond_dev); 3215 struct bonding *bond = netdev_priv(bond_dev);
3478 struct rtnl_link_stats64 temp; 3216 struct rtnl_link_stats64 temp;
3479 struct slave *slave; 3217 struct slave *slave;
3480 int i;
3481 3218
3482 memset(stats, 0, sizeof(*stats)); 3219 memset(stats, 0, sizeof(*stats));
3483 3220
3484 read_lock_bh(&bond->lock); 3221 read_lock_bh(&bond->lock);
3485 3222 bond_for_each_slave(bond, slave) {
3486 bond_for_each_slave(bond, slave, i) {
3487 const struct rtnl_link_stats64 *sstats = 3223 const struct rtnl_link_stats64 *sstats =
3488 dev_get_stats(slave->dev, &temp); 3224 dev_get_stats(slave->dev, &temp);
3489 3225
@@ -3513,7 +3249,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3513 stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; 3249 stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
3514 stats->tx_window_errors += sstats->tx_window_errors; 3250 stats->tx_window_errors += sstats->tx_window_errors;
3515 } 3251 }
3516
3517 read_unlock_bh(&bond->lock); 3252 read_unlock_bh(&bond->lock);
3518 3253
3519 return stats; 3254 return stats;
@@ -3652,41 +3387,35 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
3652{ 3387{
3653 struct bonding *bond = netdev_priv(bond_dev); 3388 struct bonding *bond = netdev_priv(bond_dev);
3654 struct slave *slave; 3389 struct slave *slave;
3655 int i;
3656 3390
3657 read_lock(&bond->lock); 3391 ASSERT_RTNL();
3658 3392
3659 if (USES_PRIMARY(bond->params.mode)) { 3393 if (USES_PRIMARY(bond->params.mode)) {
3660 read_lock(&bond->curr_slave_lock); 3394 slave = rtnl_dereference(bond->curr_active_slave);
3661 slave = bond->curr_active_slave;
3662 if (slave) { 3395 if (slave) {
3663 dev_uc_sync(slave->dev, bond_dev); 3396 dev_uc_sync(slave->dev, bond_dev);
3664 dev_mc_sync(slave->dev, bond_dev); 3397 dev_mc_sync(slave->dev, bond_dev);
3665 } 3398 }
3666 read_unlock(&bond->curr_slave_lock);
3667 } else { 3399 } else {
3668 bond_for_each_slave(bond, slave, i) { 3400 bond_for_each_slave(bond, slave) {
3669 dev_uc_sync_multiple(slave->dev, bond_dev); 3401 dev_uc_sync_multiple(slave->dev, bond_dev);
3670 dev_mc_sync_multiple(slave->dev, bond_dev); 3402 dev_mc_sync_multiple(slave->dev, bond_dev);
3671 } 3403 }
3672 } 3404 }
3673
3674 read_unlock(&bond->lock);
3675} 3405}
3676 3406
3677static int bond_neigh_init(struct neighbour *n) 3407static int bond_neigh_init(struct neighbour *n)
3678{ 3408{
3679 struct bonding *bond = netdev_priv(n->dev); 3409 struct bonding *bond = netdev_priv(n->dev);
3680 struct slave *slave = bond->first_slave;
3681 const struct net_device_ops *slave_ops; 3410 const struct net_device_ops *slave_ops;
3682 struct neigh_parms parms; 3411 struct neigh_parms parms;
3412 struct slave *slave;
3683 int ret; 3413 int ret;
3684 3414
3415 slave = bond_first_slave(bond);
3685 if (!slave) 3416 if (!slave)
3686 return 0; 3417 return 0;
3687
3688 slave_ops = slave->dev->netdev_ops; 3418 slave_ops = slave->dev->netdev_ops;
3689
3690 if (!slave_ops->ndo_neigh_setup) 3419 if (!slave_ops->ndo_neigh_setup)
3691 return 0; 3420 return 0;
3692 3421
@@ -3735,9 +3464,8 @@ static int bond_neigh_setup(struct net_device *dev,
3735static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) 3464static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3736{ 3465{
3737 struct bonding *bond = netdev_priv(bond_dev); 3466 struct bonding *bond = netdev_priv(bond_dev);
3738 struct slave *slave, *stop_at; 3467 struct slave *slave;
3739 int res = 0; 3468 int res = 0;
3740 int i;
3741 3469
3742 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond, 3470 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
3743 (bond_dev ? bond_dev->name : "None"), new_mtu); 3471 (bond_dev ? bond_dev->name : "None"), new_mtu);
@@ -3757,10 +3485,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3757 * call to the base driver. 3485 * call to the base driver.
3758 */ 3486 */
3759 3487
3760 bond_for_each_slave(bond, slave, i) { 3488 bond_for_each_slave(bond, slave) {
3761 pr_debug("s %p s->p %p c_m %p\n", 3489 pr_debug("s %p s->p %p c_m %p\n",
3762 slave, 3490 slave,
3763 slave->prev, 3491 bond_prev_slave(bond, slave),
3764 slave->dev->netdev_ops->ndo_change_mtu); 3492 slave->dev->netdev_ops->ndo_change_mtu);
3765 3493
3766 res = dev_set_mtu(slave->dev, new_mtu); 3494 res = dev_set_mtu(slave->dev, new_mtu);
@@ -3785,8 +3513,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3785 3513
3786unwind: 3514unwind:
3787 /* unwind from head to the slave that failed */ 3515 /* unwind from head to the slave that failed */
3788 stop_at = slave; 3516 bond_for_each_slave_continue_reverse(bond, slave) {
3789 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
3790 int tmp_res; 3517 int tmp_res;
3791 3518
3792 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu); 3519 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
@@ -3810,9 +3537,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3810{ 3537{
3811 struct bonding *bond = netdev_priv(bond_dev); 3538 struct bonding *bond = netdev_priv(bond_dev);
3812 struct sockaddr *sa = addr, tmp_sa; 3539 struct sockaddr *sa = addr, tmp_sa;
3813 struct slave *slave, *stop_at; 3540 struct slave *slave;
3814 int res = 0; 3541 int res = 0;
3815 int i;
3816 3542
3817 if (bond->params.mode == BOND_MODE_ALB) 3543 if (bond->params.mode == BOND_MODE_ALB)
3818 return bond_alb_set_mac_address(bond_dev, addr); 3544 return bond_alb_set_mac_address(bond_dev, addr);
@@ -3845,7 +3571,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3845 * call to the base driver. 3571 * call to the base driver.
3846 */ 3572 */
3847 3573
3848 bond_for_each_slave(bond, slave, i) { 3574 bond_for_each_slave(bond, slave) {
3849 const struct net_device_ops *slave_ops = slave->dev->netdev_ops; 3575 const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
3850 pr_debug("slave %p %s\n", slave, slave->dev->name); 3576 pr_debug("slave %p %s\n", slave, slave->dev->name);
3851 3577
@@ -3877,8 +3603,7 @@ unwind:
3877 tmp_sa.sa_family = bond_dev->type; 3603 tmp_sa.sa_family = bond_dev->type;
3878 3604
3879 /* unwind from head to the slave that failed */ 3605 /* unwind from head to the slave that failed */
3880 stop_at = slave; 3606 bond_for_each_slave_continue_reverse(bond, slave) {
3881 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
3882 int tmp_res; 3607 int tmp_res;
3883 3608
3884 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa); 3609 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
@@ -3891,12 +3616,50 @@ unwind:
3891 return res; 3616 return res;
3892} 3617}
3893 3618
3619/**
3620 * bond_xmit_slave_id - transmit skb through slave with slave_id
3621 * @bond: bonding device that is transmitting
3622 * @skb: buffer to transmit
3623 * @slave_id: slave id up to slave_cnt-1 through which to transmit
3624 *
3625 * This function tries to transmit through slave with slave_id but in case
3626 * it fails, it tries to find the first available slave for transmission.
3627 * The skb is consumed in all cases, thus the function is void.
3628 */
3629void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3630{
3631 struct slave *slave;
3632 int i = slave_id;
3633
3634 /* Here we start from the slave with slave_id */
3635 bond_for_each_slave_rcu(bond, slave) {
3636 if (--i < 0) {
3637 if (slave_can_tx(slave)) {
3638 bond_dev_queue_xmit(bond, skb, slave->dev);
3639 return;
3640 }
3641 }
3642 }
3643
3644 /* Here we start from the first slave up to slave_id */
3645 i = slave_id;
3646 bond_for_each_slave_rcu(bond, slave) {
3647 if (--i < 0)
3648 break;
3649 if (slave_can_tx(slave)) {
3650 bond_dev_queue_xmit(bond, skb, slave->dev);
3651 return;
3652 }
3653 }
3654 /* no slave that can tx has been found */
3655 kfree_skb(skb);
3656}
3657
3894static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) 3658static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
3895{ 3659{
3896 struct bonding *bond = netdev_priv(bond_dev); 3660 struct bonding *bond = netdev_priv(bond_dev);
3897 struct slave *slave, *start_at;
3898 int i, slave_no, res = 1;
3899 struct iphdr *iph = ip_hdr(skb); 3661 struct iphdr *iph = ip_hdr(skb);
3662 struct slave *slave;
3900 3663
3901 /* 3664 /*
3902 * Start with the curr_active_slave that joined the bond as the 3665 * Start with the curr_active_slave that joined the bond as the
@@ -3905,50 +3668,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3905 * send the join/membership reports. The curr_active_slave found 3668 * send the join/membership reports. The curr_active_slave found
3906 * will send all of this type of traffic. 3669 * will send all of this type of traffic.
3907 */ 3670 */
3908 if ((iph->protocol == IPPROTO_IGMP) && 3671 if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3909 (skb->protocol == htons(ETH_P_IP))) { 3672 slave = rcu_dereference(bond->curr_active_slave);
3910 3673 if (slave && slave_can_tx(slave))
3911 read_lock(&bond->curr_slave_lock); 3674 bond_dev_queue_xmit(bond, skb, slave->dev);
3912 slave = bond->curr_active_slave; 3675 else
3913 read_unlock(&bond->curr_slave_lock); 3676 bond_xmit_slave_id(bond, skb, 0);
3914
3915 if (!slave)
3916 goto out;
3917 } else { 3677 } else {
3918 /* 3678 bond_xmit_slave_id(bond, skb,
3919 * Concurrent TX may collide on rr_tx_counter; we accept 3679 bond->rr_tx_counter++ % bond->slave_cnt);
3920 * that as being rare enough not to justify using an
3921 * atomic op here.
3922 */
3923 slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
3924
3925 bond_for_each_slave(bond, slave, i) {
3926 slave_no--;
3927 if (slave_no < 0)
3928 break;
3929 }
3930 }
3931
3932 start_at = slave;
3933 bond_for_each_slave_from(bond, slave, i, start_at) {
3934 if (IS_UP(slave->dev) &&
3935 (slave->link == BOND_LINK_UP) &&
3936 bond_is_active_slave(slave)) {
3937 res = bond_dev_queue_xmit(bond, skb, slave->dev);
3938 break;
3939 }
3940 }
3941
3942out:
3943 if (res) {
3944 /* no suitable interface, frame not sent */
3945 kfree_skb(skb);
3946 } 3680 }
3947 3681
3948 return NETDEV_TX_OK; 3682 return NETDEV_TX_OK;
3949} 3683}
3950 3684
3951
3952/* 3685/*
3953 * in active-backup mode, we know that bond->curr_active_slave is always valid if 3686 * in active-backup mode, we know that bond->curr_active_slave is always valid if
3954 * the bond has a usable interface. 3687 * the bond has a usable interface.
@@ -3956,18 +3689,12 @@ out:
3956static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) 3689static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
3957{ 3690{
3958 struct bonding *bond = netdev_priv(bond_dev); 3691 struct bonding *bond = netdev_priv(bond_dev);
3959 int res = 1; 3692 struct slave *slave;
3960
3961 read_lock(&bond->curr_slave_lock);
3962
3963 if (bond->curr_active_slave)
3964 res = bond_dev_queue_xmit(bond, skb,
3965 bond->curr_active_slave->dev);
3966
3967 read_unlock(&bond->curr_slave_lock);
3968 3693
3969 if (res) 3694 slave = rcu_dereference(bond->curr_active_slave);
3970 /* no suitable interface, frame not sent */ 3695 if (slave)
3696 bond_dev_queue_xmit(bond, skb, slave->dev);
3697 else
3971 kfree_skb(skb); 3698 kfree_skb(skb);
3972 3699
3973 return NETDEV_TX_OK; 3700 return NETDEV_TX_OK;
@@ -3981,87 +3708,39 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
3981static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) 3708static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
3982{ 3709{
3983 struct bonding *bond = netdev_priv(bond_dev); 3710 struct bonding *bond = netdev_priv(bond_dev);
3984 struct slave *slave, *start_at;
3985 int slave_no;
3986 int i;
3987 int res = 1;
3988 3711
3989 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); 3712 bond_xmit_slave_id(bond, skb,
3990 3713 bond->xmit_hash_policy(skb, bond->slave_cnt));
3991 bond_for_each_slave(bond, slave, i) {
3992 slave_no--;
3993 if (slave_no < 0)
3994 break;
3995 }
3996
3997 start_at = slave;
3998
3999 bond_for_each_slave_from(bond, slave, i, start_at) {
4000 if (IS_UP(slave->dev) &&
4001 (slave->link == BOND_LINK_UP) &&
4002 bond_is_active_slave(slave)) {
4003 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4004 break;
4005 }
4006 }
4007
4008 if (res) {
4009 /* no suitable interface, frame not sent */
4010 kfree_skb(skb);
4011 }
4012 3714
4013 return NETDEV_TX_OK; 3715 return NETDEV_TX_OK;
4014} 3716}
4015 3717
4016/* 3718/* in broadcast mode, we send everything to all usable interfaces. */
4017 * in broadcast mode, we send everything to all usable interfaces.
4018 */
4019static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) 3719static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4020{ 3720{
4021 struct bonding *bond = netdev_priv(bond_dev); 3721 struct bonding *bond = netdev_priv(bond_dev);
4022 struct slave *slave, *start_at; 3722 struct slave *slave = NULL;
4023 struct net_device *tx_dev = NULL;
4024 int i;
4025 int res = 1;
4026
4027 read_lock(&bond->curr_slave_lock);
4028 start_at = bond->curr_active_slave;
4029 read_unlock(&bond->curr_slave_lock);
4030
4031 if (!start_at)
4032 goto out;
4033 3723
4034 bond_for_each_slave_from(bond, slave, i, start_at) { 3724 bond_for_each_slave_rcu(bond, slave) {
4035 if (IS_UP(slave->dev) && 3725 if (bond_is_last_slave(bond, slave))
4036 (slave->link == BOND_LINK_UP) && 3726 break;
4037 bond_is_active_slave(slave)) { 3727 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
4038 if (tx_dev) { 3728 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4039 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4040 if (!skb2) {
4041 pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
4042 bond_dev->name);
4043 continue;
4044 }
4045 3729
4046 res = bond_dev_queue_xmit(bond, skb2, tx_dev); 3730 if (!skb2) {
4047 if (res) { 3731 pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
4048 kfree_skb(skb2); 3732 bond_dev->name);
4049 continue; 3733 continue;
4050 }
4051 } 3734 }
4052 tx_dev = slave->dev; 3735 /* bond_dev_queue_xmit always returns 0 */
3736 bond_dev_queue_xmit(bond, skb2, slave->dev);
4053 } 3737 }
4054 } 3738 }
4055 3739 if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
4056 if (tx_dev) 3740 bond_dev_queue_xmit(bond, skb, slave->dev);
4057 res = bond_dev_queue_xmit(bond, skb, tx_dev); 3741 else
4058
4059out:
4060 if (res)
4061 /* no suitable interface, frame not sent */
4062 kfree_skb(skb); 3742 kfree_skb(skb);
4063 3743
4064 /* frame sent to all suitable interfaces */
4065 return NETDEV_TX_OK; 3744 return NETDEV_TX_OK;
4066} 3745}
4067 3746
@@ -4089,15 +3768,15 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
4089static inline int bond_slave_override(struct bonding *bond, 3768static inline int bond_slave_override(struct bonding *bond,
4090 struct sk_buff *skb) 3769 struct sk_buff *skb)
4091{ 3770{
4092 int i, res = 1;
4093 struct slave *slave = NULL; 3771 struct slave *slave = NULL;
4094 struct slave *check_slave; 3772 struct slave *check_slave;
3773 int res = 1;
4095 3774
4096 if (!skb->queue_mapping) 3775 if (!skb->queue_mapping)
4097 return 1; 3776 return 1;
4098 3777
4099 /* Find out if any slaves have the same mapping as this skb. */ 3778 /* Find out if any slaves have the same mapping as this skb. */
4100 bond_for_each_slave(bond, check_slave, i) { 3779 bond_for_each_slave_rcu(bond, check_slave) {
4101 if (check_slave->queue_id == skb->queue_mapping) { 3780 if (check_slave->queue_id == skb->queue_mapping) {
4102 slave = check_slave; 3781 slave = check_slave;
4103 break; 3782 break;
@@ -4182,14 +3861,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4182 if (is_netpoll_tx_blocked(dev)) 3861 if (is_netpoll_tx_blocked(dev))
4183 return NETDEV_TX_BUSY; 3862 return NETDEV_TX_BUSY;
4184 3863
4185 read_lock(&bond->lock); 3864 rcu_read_lock();
4186 3865 if (!list_empty(&bond->slave_list))
4187 if (bond->slave_cnt)
4188 ret = __bond_start_xmit(skb, dev); 3866 ret = __bond_start_xmit(skb, dev);
4189 else 3867 else
4190 kfree_skb(skb); 3868 kfree_skb(skb);
4191 3869 rcu_read_unlock();
4192 read_unlock(&bond->lock);
4193 3870
4194 return ret; 3871 return ret;
4195} 3872}
@@ -4230,9 +3907,8 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
4230 struct ethtool_cmd *ecmd) 3907 struct ethtool_cmd *ecmd)
4231{ 3908{
4232 struct bonding *bond = netdev_priv(bond_dev); 3909 struct bonding *bond = netdev_priv(bond_dev);
4233 struct slave *slave;
4234 int i;
4235 unsigned long speed = 0; 3910 unsigned long speed = 0;
3911 struct slave *slave;
4236 3912
4237 ecmd->duplex = DUPLEX_UNKNOWN; 3913 ecmd->duplex = DUPLEX_UNKNOWN;
4238 ecmd->port = PORT_OTHER; 3914 ecmd->port = PORT_OTHER;
@@ -4243,7 +3919,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
4243 * this is an accurate maximum. 3919 * this is an accurate maximum.
4244 */ 3920 */
4245 read_lock(&bond->lock); 3921 read_lock(&bond->lock);
4246 bond_for_each_slave(bond, slave, i) { 3922 bond_for_each_slave(bond, slave) {
4247 if (SLAVE_IS_OK(slave)) { 3923 if (SLAVE_IS_OK(slave)) {
4248 if (slave->speed != SPEED_UNKNOWN) 3924 if (slave->speed != SPEED_UNKNOWN)
4249 speed += slave->speed; 3925 speed += slave->speed;
@@ -4254,6 +3930,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
4254 } 3930 }
4255 ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN); 3931 ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
4256 read_unlock(&bond->lock); 3932 read_unlock(&bond->lock);
3933
4257 return 0; 3934 return 0;
4258} 3935}
4259 3936
@@ -4317,12 +3994,11 @@ static void bond_setup(struct net_device *bond_dev)
4317 /* initialize rwlocks */ 3994 /* initialize rwlocks */
4318 rwlock_init(&bond->lock); 3995 rwlock_init(&bond->lock);
4319 rwlock_init(&bond->curr_slave_lock); 3996 rwlock_init(&bond->curr_slave_lock);
4320 3997 INIT_LIST_HEAD(&bond->slave_list);
4321 bond->params = bonding_defaults; 3998 bond->params = bonding_defaults;
4322 3999
4323 /* Initialize pointers */ 4000 /* Initialize pointers */
4324 bond->dev = bond_dev; 4001 bond->dev = bond_dev;
4325 INIT_LIST_HEAD(&bond->vlan_list);
4326 4002
4327 /* Initialize the device entry points */ 4003 /* Initialize the device entry points */
4328 ether_setup(bond_dev); 4004 ether_setup(bond_dev);
@@ -4374,23 +4050,18 @@ static void bond_setup(struct net_device *bond_dev)
4374static void bond_uninit(struct net_device *bond_dev) 4050static void bond_uninit(struct net_device *bond_dev)
4375{ 4051{
4376 struct bonding *bond = netdev_priv(bond_dev); 4052 struct bonding *bond = netdev_priv(bond_dev);
4377 struct vlan_entry *vlan, *tmp; 4053 struct slave *slave, *tmp_slave;
4378 4054
4379 bond_netpoll_cleanup(bond_dev); 4055 bond_netpoll_cleanup(bond_dev);
4380 4056
4381 /* Release the bonded slaves */ 4057 /* Release the bonded slaves */
4382 while (bond->first_slave != NULL) 4058 list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
4383 __bond_release_one(bond_dev, bond->first_slave->dev, true); 4059 __bond_release_one(bond_dev, slave->dev, true);
4384 pr_info("%s: released all slaves\n", bond_dev->name); 4060 pr_info("%s: released all slaves\n", bond_dev->name);
4385 4061
4386 list_del(&bond->bond_list); 4062 list_del(&bond->bond_list);
4387 4063
4388 bond_debug_unregister(bond); 4064 bond_debug_unregister(bond);
4389
4390 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
4391 list_del(&vlan->vlan_list);
4392 kfree(vlan);
4393 }
4394} 4065}
4395 4066
4396/*------------------------- Module initialization ---------------------------*/ 4067/*------------------------- Module initialization ---------------------------*/
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 4060d41f0ee7..20a6ee25bb63 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -12,7 +12,6 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
12 struct bonding *bond = seq->private; 12 struct bonding *bond = seq->private;
13 loff_t off = 0; 13 loff_t off = 0;
14 struct slave *slave; 14 struct slave *slave;
15 int i;
16 15
17 /* make sure the bond won't be taken away */ 16 /* make sure the bond won't be taken away */
18 rcu_read_lock(); 17 rcu_read_lock();
@@ -21,10 +20,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
21 if (*pos == 0) 20 if (*pos == 0)
22 return SEQ_START_TOKEN; 21 return SEQ_START_TOKEN;
23 22
24 bond_for_each_slave(bond, slave, i) { 23 bond_for_each_slave(bond, slave)
25 if (++off == *pos) 24 if (++off == *pos)
26 return slave; 25 return slave;
27 }
28 26
29 return NULL; 27 return NULL;
30} 28}
@@ -36,11 +34,13 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
36 34
37 ++*pos; 35 ++*pos;
38 if (v == SEQ_START_TOKEN) 36 if (v == SEQ_START_TOKEN)
39 return bond->first_slave; 37 return bond_first_slave(bond);
40 38
41 slave = slave->next; 39 if (bond_is_last_slave(bond, slave))
40 return NULL;
41 slave = bond_next_slave(bond, slave);
42 42
43 return (slave == bond->first_slave) ? NULL : slave; 43 return slave;
44} 44}
45 45
46static void bond_info_seq_stop(struct seq_file *seq, void *v) 46static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dc36a3d7d9e9..ce4677668e2c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -209,12 +209,12 @@ void bond_destroy_slave_symlinks(struct net_device *master,
209static ssize_t bonding_show_slaves(struct device *d, 209static ssize_t bonding_show_slaves(struct device *d,
210 struct device_attribute *attr, char *buf) 210 struct device_attribute *attr, char *buf)
211{ 211{
212 struct slave *slave;
213 int i, res = 0;
214 struct bonding *bond = to_bond(d); 212 struct bonding *bond = to_bond(d);
213 struct slave *slave;
214 int res = 0;
215 215
216 read_lock(&bond->lock); 216 read_lock(&bond->lock);
217 bond_for_each_slave(bond, slave, i) { 217 bond_for_each_slave(bond, slave) {
218 if (res > (PAGE_SIZE - IFNAMSIZ)) { 218 if (res > (PAGE_SIZE - IFNAMSIZ)) {
219 /* not enough space for another interface name */ 219 /* not enough space for another interface name */
220 if ((PAGE_SIZE - res) > 10) 220 if ((PAGE_SIZE - res) > 10)
@@ -227,6 +227,7 @@ static ssize_t bonding_show_slaves(struct device *d,
227 read_unlock(&bond->lock); 227 read_unlock(&bond->lock);
228 if (res) 228 if (res)
229 buf[res-1] = '\n'; /* eat the leftover space */ 229 buf[res-1] = '\n'; /* eat the leftover space */
230
230 return res; 231 return res;
231} 232}
232 233
@@ -325,7 +326,7 @@ static ssize_t bonding_store_mode(struct device *d,
325 goto out; 326 goto out;
326 } 327 }
327 328
328 if (bond->slave_cnt > 0) { 329 if (!list_empty(&bond->slave_list)) {
329 pr_err("unable to update mode of %s because it has slaves.\n", 330 pr_err("unable to update mode of %s because it has slaves.\n",
330 bond->dev->name); 331 bond->dev->name);
331 ret = -EPERM; 332 ret = -EPERM;
@@ -501,20 +502,25 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
501 struct device_attribute *attr, 502 struct device_attribute *attr,
502 const char *buf, size_t count) 503 const char *buf, size_t count)
503{ 504{
504 int new_value; 505 int new_value, ret = count;
505 struct bonding *bond = to_bond(d); 506 struct bonding *bond = to_bond(d);
506 507
507 if (bond->slave_cnt != 0) { 508 if (!rtnl_trylock())
509 return restart_syscall();
510
511 if (!list_empty(&bond->slave_list)) {
508 pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n", 512 pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
509 bond->dev->name); 513 bond->dev->name);
510 return -EPERM; 514 ret = -EPERM;
515 goto out;
511 } 516 }
512 517
513 new_value = bond_parse_parm(buf, fail_over_mac_tbl); 518 new_value = bond_parse_parm(buf, fail_over_mac_tbl);
514 if (new_value < 0) { 519 if (new_value < 0) {
515 pr_err("%s: Ignoring invalid fail_over_mac value %s.\n", 520 pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
516 bond->dev->name, buf); 521 bond->dev->name, buf);
517 return -EINVAL; 522 ret = -EINVAL;
523 goto out;
518 } 524 }
519 525
520 bond->params.fail_over_mac = new_value; 526 bond->params.fail_over_mac = new_value;
@@ -522,7 +528,9 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
522 bond->dev->name, fail_over_mac_tbl[new_value].modename, 528 bond->dev->name, fail_over_mac_tbl[new_value].modename,
523 new_value); 529 new_value);
524 530
525 return count; 531out:
532 rtnl_unlock();
533 return ret;
526} 534}
527 535
528static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, 536static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
@@ -661,7 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
661 &newtarget); 669 &newtarget);
662 /* not to race with bond_arp_rcv */ 670 /* not to race with bond_arp_rcv */
663 write_lock_bh(&bond->lock); 671 write_lock_bh(&bond->lock);
664 bond_for_each_slave(bond, slave, i) 672 bond_for_each_slave(bond, slave)
665 slave->target_last_arp_rx[ind] = jiffies; 673 slave->target_last_arp_rx[ind] = jiffies;
666 targets[ind] = newtarget; 674 targets[ind] = newtarget;
667 write_unlock_bh(&bond->lock); 675 write_unlock_bh(&bond->lock);
@@ -687,7 +695,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
687 &newtarget); 695 &newtarget);
688 696
689 write_lock_bh(&bond->lock); 697 write_lock_bh(&bond->lock);
690 bond_for_each_slave(bond, slave, i) { 698 bond_for_each_slave(bond, slave) {
691 targets_rx = slave->target_last_arp_rx; 699 targets_rx = slave->target_last_arp_rx;
692 j = ind; 700 j = ind;
693 for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++) 701 for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -844,8 +852,11 @@ static ssize_t bonding_store_lacp(struct device *d,
844 struct device_attribute *attr, 852 struct device_attribute *attr,
845 const char *buf, size_t count) 853 const char *buf, size_t count)
846{ 854{
847 int new_value, ret = count;
848 struct bonding *bond = to_bond(d); 855 struct bonding *bond = to_bond(d);
856 int new_value, ret = count;
857
858 if (!rtnl_trylock())
859 return restart_syscall();
849 860
850 if (bond->dev->flags & IFF_UP) { 861 if (bond->dev->flags & IFF_UP) {
851 pr_err("%s: Unable to update LACP rate because interface is up.\n", 862 pr_err("%s: Unable to update LACP rate because interface is up.\n",
@@ -875,6 +886,8 @@ static ssize_t bonding_store_lacp(struct device *d,
875 ret = -EINVAL; 886 ret = -EINVAL;
876 } 887 }
877out: 888out:
889 rtnl_unlock();
890
878 return ret; 891 return ret;
879} 892}
880static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, 893static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
@@ -1078,10 +1091,9 @@ static ssize_t bonding_store_primary(struct device *d,
1078 struct device_attribute *attr, 1091 struct device_attribute *attr,
1079 const char *buf, size_t count) 1092 const char *buf, size_t count)
1080{ 1093{
1081 int i;
1082 struct slave *slave;
1083 struct bonding *bond = to_bond(d); 1094 struct bonding *bond = to_bond(d);
1084 char ifname[IFNAMSIZ]; 1095 char ifname[IFNAMSIZ];
1096 struct slave *slave;
1085 1097
1086 if (!rtnl_trylock()) 1098 if (!rtnl_trylock())
1087 return restart_syscall(); 1099 return restart_syscall();
@@ -1107,7 +1119,7 @@ static ssize_t bonding_store_primary(struct device *d,
1107 goto out; 1119 goto out;
1108 } 1120 }
1109 1121
1110 bond_for_each_slave(bond, slave, i) { 1122 bond_for_each_slave(bond, slave) {
1111 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { 1123 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
1112 pr_info("%s: Setting %s as primary slave.\n", 1124 pr_info("%s: Setting %s as primary slave.\n",
1113 bond->dev->name, slave->dev->name); 1125 bond->dev->name, slave->dev->name);
@@ -1236,16 +1248,16 @@ static ssize_t bonding_show_active_slave(struct device *d,
1236 struct device_attribute *attr, 1248 struct device_attribute *attr,
1237 char *buf) 1249 char *buf)
1238{ 1250{
1239 struct slave *curr;
1240 struct bonding *bond = to_bond(d); 1251 struct bonding *bond = to_bond(d);
1252 struct slave *curr;
1241 int count = 0; 1253 int count = 0;
1242 1254
1243 read_lock(&bond->curr_slave_lock); 1255 rcu_read_lock();
1244 curr = bond->curr_active_slave; 1256 curr = rcu_dereference(bond->curr_active_slave);
1245 read_unlock(&bond->curr_slave_lock);
1246
1247 if (USES_PRIMARY(bond->params.mode) && curr) 1257 if (USES_PRIMARY(bond->params.mode) && curr)
1248 count = sprintf(buf, "%s\n", curr->dev->name); 1258 count = sprintf(buf, "%s\n", curr->dev->name);
1259 rcu_read_unlock();
1260
1249 return count; 1261 return count;
1250} 1262}
1251 1263
@@ -1253,16 +1265,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
1253 struct device_attribute *attr, 1265 struct device_attribute *attr,
1254 const char *buf, size_t count) 1266 const char *buf, size_t count)
1255{ 1267{
1256 int i; 1268 struct slave *slave, *old_active, *new_active;
1257 struct slave *slave;
1258 struct slave *old_active = NULL;
1259 struct slave *new_active = NULL;
1260 struct bonding *bond = to_bond(d); 1269 struct bonding *bond = to_bond(d);
1261 char ifname[IFNAMSIZ]; 1270 char ifname[IFNAMSIZ];
1262 1271
1263 if (!rtnl_trylock()) 1272 if (!rtnl_trylock())
1264 return restart_syscall(); 1273 return restart_syscall();
1265 1274
1275 old_active = new_active = NULL;
1266 block_netpoll_tx(); 1276 block_netpoll_tx();
1267 read_lock(&bond->lock); 1277 read_lock(&bond->lock);
1268 write_lock_bh(&bond->curr_slave_lock); 1278 write_lock_bh(&bond->curr_slave_lock);
@@ -1279,12 +1289,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
1279 if (!strlen(ifname) || buf[0] == '\n') { 1289 if (!strlen(ifname) || buf[0] == '\n') {
1280 pr_info("%s: Clearing current active slave.\n", 1290 pr_info("%s: Clearing current active slave.\n",
1281 bond->dev->name); 1291 bond->dev->name);
1282 bond->curr_active_slave = NULL; 1292 rcu_assign_pointer(bond->curr_active_slave, NULL);
1283 bond_select_active_slave(bond); 1293 bond_select_active_slave(bond);
1284 goto out; 1294 goto out;
1285 } 1295 }
1286 1296
1287 bond_for_each_slave(bond, slave, i) { 1297 bond_for_each_slave(bond, slave) {
1288 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { 1298 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
1289 old_active = bond->curr_active_slave; 1299 old_active = bond->curr_active_slave;
1290 new_active = slave; 1300 new_active = slave;
@@ -1295,8 +1305,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
1295 bond->dev->name, 1305 bond->dev->name,
1296 slave->dev->name); 1306 slave->dev->name);
1297 goto out; 1307 goto out;
1298 } 1308 } else {
1299 else {
1300 if ((new_active) && 1309 if ((new_active) &&
1301 (old_active) && 1310 (old_active) &&
1302 (new_active->link == BOND_LINK_UP) && 1311 (new_active->link == BOND_LINK_UP) &&
@@ -1307,8 +1316,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
1307 slave->dev->name); 1316 slave->dev->name);
1308 bond_change_active_slave(bond, 1317 bond_change_active_slave(bond,
1309 new_active); 1318 new_active);
1310 } 1319 } else {
1311 else {
1312 pr_info("%s: Could not set %s as" 1320 pr_info("%s: Could not set %s as"
1313 " active slave; either %s is" 1321 " active slave; either %s is"
1314 " down or the link is down.\n", 1322 " down or the link is down.\n",
@@ -1344,14 +1352,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
1344 struct device_attribute *attr, 1352 struct device_attribute *attr,
1345 char *buf) 1353 char *buf)
1346{ 1354{
1347 struct slave *curr;
1348 struct bonding *bond = to_bond(d); 1355 struct bonding *bond = to_bond(d);
1349 1356
1350 read_lock(&bond->curr_slave_lock); 1357 return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
1351 curr = bond->curr_active_slave;
1352 read_unlock(&bond->curr_slave_lock);
1353
1354 return sprintf(buf, "%s\n", curr ? "up" : "down");
1355} 1358}
1356static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); 1359static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1357 1360
@@ -1470,15 +1473,15 @@ static ssize_t bonding_show_queue_id(struct device *d,
1470 struct device_attribute *attr, 1473 struct device_attribute *attr,
1471 char *buf) 1474 char *buf)
1472{ 1475{
1473 struct slave *slave;
1474 int i, res = 0;
1475 struct bonding *bond = to_bond(d); 1476 struct bonding *bond = to_bond(d);
1477 struct slave *slave;
1478 int res = 0;
1476 1479
1477 if (!rtnl_trylock()) 1480 if (!rtnl_trylock())
1478 return restart_syscall(); 1481 return restart_syscall();
1479 1482
1480 read_lock(&bond->lock); 1483 read_lock(&bond->lock);
1481 bond_for_each_slave(bond, slave, i) { 1484 bond_for_each_slave(bond, slave) {
1482 if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { 1485 if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
1483 /* not enough space for another interface_name:queue_id pair */ 1486 /* not enough space for another interface_name:queue_id pair */
1484 if ((PAGE_SIZE - res) > 10) 1487 if ((PAGE_SIZE - res) > 10)
@@ -1493,6 +1496,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
1493 if (res) 1496 if (res)
1494 buf[res-1] = '\n'; /* eat the leftover space */ 1497 buf[res-1] = '\n'; /* eat the leftover space */
1495 rtnl_unlock(); 1498 rtnl_unlock();
1499
1496 return res; 1500 return res;
1497} 1501}
1498 1502
@@ -1507,7 +1511,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
1507 struct slave *slave, *update_slave; 1511 struct slave *slave, *update_slave;
1508 struct bonding *bond = to_bond(d); 1512 struct bonding *bond = to_bond(d);
1509 u16 qid; 1513 u16 qid;
1510 int i, ret = count; 1514 int ret = count;
1511 char *delim; 1515 char *delim;
1512 struct net_device *sdev = NULL; 1516 struct net_device *sdev = NULL;
1513 1517
@@ -1542,7 +1546,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
1542 1546
1543 /* Search for thes slave and check for duplicate qids */ 1547 /* Search for thes slave and check for duplicate qids */
1544 update_slave = NULL; 1548 update_slave = NULL;
1545 bond_for_each_slave(bond, slave, i) { 1549 bond_for_each_slave(bond, slave) {
1546 if (sdev == slave->dev) 1550 if (sdev == slave->dev)
1547 /* 1551 /*
1548 * We don't need to check the matching 1552 * We don't need to check the matching
@@ -1594,8 +1598,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1594 struct device_attribute *attr, 1598 struct device_attribute *attr,
1595 const char *buf, size_t count) 1599 const char *buf, size_t count)
1596{ 1600{
1597 int i, new_value, ret = count;
1598 struct bonding *bond = to_bond(d); 1601 struct bonding *bond = to_bond(d);
1602 int new_value, ret = count;
1599 struct slave *slave; 1603 struct slave *slave;
1600 1604
1601 if (sscanf(buf, "%d", &new_value) != 1) { 1605 if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1618,7 +1622,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1618 } 1622 }
1619 1623
1620 read_lock(&bond->lock); 1624 read_lock(&bond->lock);
1621 bond_for_each_slave(bond, slave, i) { 1625 bond_for_each_slave(bond, slave) {
1622 if (!bond_is_active_slave(slave)) { 1626 if (!bond_is_active_slave(slave)) {
1623 if (new_value) 1627 if (new_value)
1624 slave->inactive = 0; 1628 slave->inactive = 0;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 42d1c6599cba..f7ab16185f68 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -71,6 +71,28 @@
71 set_fs(fs); \ 71 set_fs(fs); \
72 res; }) 72 res; })
73 73
74/* slave list primitives */
75#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
76
77/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
78#define bond_first_slave(bond) \
79 list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
80#define bond_last_slave(bond) \
81 (list_empty(&(bond)->slave_list) ? NULL : \
82 bond_to_slave((bond)->slave_list.prev))
83
84#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
85#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
86
87/* Since bond_first/last_slave can return NULL, these can return NULL too */
88#define bond_next_slave(bond, pos) \
89 (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
90 bond_to_slave((pos)->list.next))
91
92#define bond_prev_slave(bond, pos) \
93 (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
94 bond_to_slave((pos)->list.prev))
95
74/** 96/**
75 * bond_for_each_slave_from - iterate the slaves list from a starting point 97 * bond_for_each_slave_from - iterate the slaves list from a starting point
76 * @bond: the bond holding this list. 98 * @bond: the bond holding this list.
@@ -80,37 +102,33 @@
80 * 102 *
81 * Caller must hold bond->lock 103 * Caller must hold bond->lock
82 */ 104 */
83#define bond_for_each_slave_from(bond, pos, cnt, start) \ 105#define bond_for_each_slave_from(bond, pos, cnt, start) \
84 for (cnt = 0, pos = start; \ 106 for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
85 cnt < (bond)->slave_cnt; \ 107 cnt++, pos = bond_next_slave(bond, pos))
86 cnt++, pos = (pos)->next)
87 108
88/** 109/**
89 * bond_for_each_slave_from_to - iterate the slaves list from start point to stop point 110 * bond_for_each_slave - iterate over all slaves
90 * @bond: the bond holding this list. 111 * @bond: the bond holding this list
91 * @pos: current slave. 112 * @pos: current slave
92 * @cnt: counter for number max of moves
93 * @start: start point.
94 * @stop: stop point.
95 * 113 *
96 * Caller must hold bond->lock 114 * Caller must hold bond->lock
97 */ 115 */
98#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop) \ 116#define bond_for_each_slave(bond, pos) \
99 for (cnt = 0, pos = start; \ 117 list_for_each_entry(pos, &(bond)->slave_list, list)
100 ((cnt < (bond)->slave_cnt) && (pos != (stop)->next)); \ 118
101 cnt++, pos = (pos)->next) 119/* Caller must have rcu_read_lock */
120#define bond_for_each_slave_rcu(bond, pos) \
121 list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
102 122
103/** 123/**
104 * bond_for_each_slave - iterate the slaves list from head 124 * bond_for_each_slave_reverse - iterate in reverse from a given position
105 * @bond: the bond holding this list. 125 * @bond: the bond holding this list
106 * @pos: current slave. 126 * @pos: slave to continue from
107 * @cnt: counter for max number of moves
108 * 127 *
109 * Caller must hold bond->lock 128 * Caller must hold bond->lock
110 */ 129 */
111#define bond_for_each_slave(bond, pos, cnt) \ 130#define bond_for_each_slave_continue_reverse(bond, pos) \
112 bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) 131 list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
113
114 132
115#ifdef CONFIG_NET_POLL_CONTROLLER 133#ifdef CONFIG_NET_POLL_CONTROLLER
116extern atomic_t netpoll_block_tx; 134extern atomic_t netpoll_block_tx;
@@ -167,15 +185,9 @@ struct bond_parm_tbl {
167 185
168#define BOND_MAX_MODENAME_LEN 20 186#define BOND_MAX_MODENAME_LEN 20
169 187
170struct vlan_entry {
171 struct list_head vlan_list;
172 unsigned short vlan_id;
173};
174
175struct slave { 188struct slave {
176 struct net_device *dev; /* first - useful for panic debug */ 189 struct net_device *dev; /* first - useful for panic debug */
177 struct slave *next; 190 struct list_head list;
178 struct slave *prev;
179 struct bonding *bond; /* our master */ 191 struct bonding *bond; /* our master */
180 int delay; 192 int delay;
181 unsigned long jiffies; 193 unsigned long jiffies;
@@ -215,7 +227,7 @@ struct slave {
215 */ 227 */
216struct bonding { 228struct bonding {
217 struct net_device *dev; /* first - useful for panic debug */ 229 struct net_device *dev; /* first - useful for panic debug */
218 struct slave *first_slave; 230 struct list_head slave_list;
219 struct slave *curr_active_slave; 231 struct slave *curr_active_slave;
220 struct slave *current_arp_slave; 232 struct slave *current_arp_slave;
221 struct slave *primary_slave; 233 struct slave *primary_slave;
@@ -237,7 +249,6 @@ struct bonding {
237 struct ad_bond_info ad_info; 249 struct ad_bond_info ad_info;
238 struct alb_bond_info alb_info; 250 struct alb_bond_info alb_info;
239 struct bond_params params; 251 struct bond_params params;
240 struct list_head vlan_list;
241 struct workqueue_struct *wq; 252 struct workqueue_struct *wq;
242 struct delayed_work mii_work; 253 struct delayed_work mii_work;
243 struct delayed_work arp_work; 254 struct delayed_work arp_work;
@@ -250,11 +261,6 @@ struct bonding {
250#endif /* CONFIG_DEBUG_FS */ 261#endif /* CONFIG_DEBUG_FS */
251}; 262};
252 263
253static inline bool bond_vlan_used(struct bonding *bond)
254{
255 return !list_empty(&bond->vlan_list);
256}
257
258#define bond_slave_get_rcu(dev) \ 264#define bond_slave_get_rcu(dev) \
259 ((struct slave *) rcu_dereference(dev->rx_handler_data)) 265 ((struct slave *) rcu_dereference(dev->rx_handler_data))
260 266
@@ -270,13 +276,10 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
270 struct net_device *slave_dev) 276 struct net_device *slave_dev)
271{ 277{
272 struct slave *slave = NULL; 278 struct slave *slave = NULL;
273 int i;
274 279
275 bond_for_each_slave(bond, slave, i) { 280 bond_for_each_slave(bond, slave)
276 if (slave->dev == slave_dev) { 281 if (slave->dev == slave_dev)
277 return slave; 282 return slave;
278 }
279 }
280 283
281 return NULL; 284 return NULL;
282} 285}
@@ -416,10 +419,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
416 return addr; 419 return addr;
417} 420}
418 421
422static inline bool slave_can_tx(struct slave *slave)
423{
424 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
425 bond_is_active_slave(slave))
426 return true;
427 else
428 return false;
429}
430
419struct bond_net; 431struct bond_net;
420 432
421struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 433struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
422int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 434int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
435void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
423int bond_create(struct net *net, const char *name); 436int bond_create(struct net *net, const char *name);
424int bond_create_sysfs(struct bond_net *net); 437int bond_create_sysfs(struct bond_net *net);
425void bond_destroy_sysfs(struct bond_net *net); 438void bond_destroy_sysfs(struct bond_net *net);
@@ -477,10 +490,9 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
477static inline struct slave *bond_slave_has_mac(struct bonding *bond, 490static inline struct slave *bond_slave_has_mac(struct bonding *bond,
478 const u8 *mac) 491 const u8 *mac)
479{ 492{
480 int i = 0;
481 struct slave *tmp; 493 struct slave *tmp;
482 494
483 bond_for_each_slave(bond, tmp, i) 495 bond_for_each_slave(bond, tmp)
484 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) 496 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
485 return tmp; 497 return tmp;
486 498
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 34dea95d58db..88a6a5810ec6 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -347,7 +347,9 @@ static int ldisc_open(struct tty_struct *tty)
347 /* release devices to avoid name collision */ 347 /* release devices to avoid name collision */
348 ser_release(NULL); 348 ser_release(NULL);
349 349
350 sprintf(name, "cf%s", tty->name); 350 result = snprintf(name, sizeof(name), "cf%s", tty->name);
351 if (result >= IFNAMSIZ)
352 return -EINVAL;
351 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); 353 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
352 if (!dev) 354 if (!dev)
353 return -ENOMEM; 355 return -ENOMEM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index dbbe97ae121e..3b1ff6148702 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1355,7 +1355,7 @@ static int at91_can_probe(struct platform_device *pdev)
1355 if (at91_is_sam9263(priv)) 1355 if (at91_is_sam9263(priv))
1356 dev->sysfs_groups[0] = &at91_sysfs_attr_group; 1356 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1357 1357
1358 dev_set_drvdata(&pdev->dev, dev); 1358 platform_set_drvdata(pdev, dev);
1359 SET_NETDEV_DEV(dev, &pdev->dev); 1359 SET_NETDEV_DEV(dev, &pdev->dev);
1360 1360
1361 err = register_candev(dev); 1361 err = register_candev(dev);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index c6f838d922a5..294ced3cc227 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -195,7 +195,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
195 195
196 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
197 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 197 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
198 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 198 if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
199 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 199 dev_info(&pdev->dev, "control memory is not used for raminit\n");
200 else 200 else
201 priv->raminit = c_can_hw_raminit; 201 priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7b0be0910f4b..71c677e651d7 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -850,12 +850,17 @@ static int flexcan_open(struct net_device *dev)
850 struct flexcan_priv *priv = netdev_priv(dev); 850 struct flexcan_priv *priv = netdev_priv(dev);
851 int err; 851 int err;
852 852
853 clk_prepare_enable(priv->clk_ipg); 853 err = clk_prepare_enable(priv->clk_ipg);
854 clk_prepare_enable(priv->clk_per); 854 if (err)
855 return err;
856
857 err = clk_prepare_enable(priv->clk_per);
858 if (err)
859 goto out_disable_ipg;
855 860
856 err = open_candev(dev); 861 err = open_candev(dev);
857 if (err) 862 if (err)
858 goto out; 863 goto out_disable_per;
859 864
860 err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); 865 err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
861 if (err) 866 if (err)
@@ -875,8 +880,9 @@ static int flexcan_open(struct net_device *dev)
875 880
876 out_close: 881 out_close:
877 close_candev(dev); 882 close_candev(dev);
878 out: 883 out_disable_per:
879 clk_disable_unprepare(priv->clk_per); 884 clk_disable_unprepare(priv->clk_per);
885 out_disable_ipg:
880 clk_disable_unprepare(priv->clk_ipg); 886 clk_disable_unprepare(priv->clk_ipg);
881 887
882 return err; 888 return err;
@@ -933,8 +939,13 @@ static int register_flexcandev(struct net_device *dev)
933 struct flexcan_regs __iomem *regs = priv->base; 939 struct flexcan_regs __iomem *regs = priv->base;
934 u32 reg, err; 940 u32 reg, err;
935 941
936 clk_prepare_enable(priv->clk_ipg); 942 err = clk_prepare_enable(priv->clk_ipg);
937 clk_prepare_enable(priv->clk_per); 943 if (err)
944 return err;
945
946 err = clk_prepare_enable(priv->clk_per);
947 if (err)
948 goto out_disable_ipg;
938 949
939 /* select "bus clock", chip must be disabled */ 950 /* select "bus clock", chip must be disabled */
940 flexcan_chip_disable(priv); 951 flexcan_chip_disable(priv);
@@ -959,15 +970,16 @@ static int register_flexcandev(struct net_device *dev)
959 if (!(reg & FLEXCAN_MCR_FEN)) { 970 if (!(reg & FLEXCAN_MCR_FEN)) {
960 netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); 971 netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
961 err = -ENODEV; 972 err = -ENODEV;
962 goto out; 973 goto out_disable_per;
963 } 974 }
964 975
965 err = register_candev(dev); 976 err = register_candev(dev);
966 977
967 out: 978 out_disable_per:
968 /* disable core and turn off clocks */ 979 /* disable core and turn off clocks */
969 flexcan_chip_disable(priv); 980 flexcan_chip_disable(priv);
970 clk_disable_unprepare(priv->clk_per); 981 clk_disable_unprepare(priv->clk_per);
982 out_disable_ipg:
971 clk_disable_unprepare(priv->clk_ipg); 983 clk_disable_unprepare(priv->clk_ipg);
972 984
973 return err; 985 return err;
@@ -1001,7 +1013,6 @@ static int flexcan_probe(struct platform_device *pdev)
1001 struct resource *mem; 1013 struct resource *mem;
1002 struct clk *clk_ipg = NULL, *clk_per = NULL; 1014 struct clk *clk_ipg = NULL, *clk_per = NULL;
1003 void __iomem *base; 1015 void __iomem *base;
1004 resource_size_t mem_size;
1005 int err, irq; 1016 int err, irq;
1006 u32 clock_freq = 0; 1017 u32 clock_freq = 0;
1007 1018
@@ -1013,43 +1024,25 @@ static int flexcan_probe(struct platform_device *pdev)
1013 clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1024 clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1014 if (IS_ERR(clk_ipg)) { 1025 if (IS_ERR(clk_ipg)) {
1015 dev_err(&pdev->dev, "no ipg clock defined\n"); 1026 dev_err(&pdev->dev, "no ipg clock defined\n");
1016 err = PTR_ERR(clk_ipg); 1027 return PTR_ERR(clk_ipg);
1017 goto failed_clock;
1018 } 1028 }
1019 clock_freq = clk_get_rate(clk_ipg); 1029 clock_freq = clk_get_rate(clk_ipg);
1020 1030
1021 clk_per = devm_clk_get(&pdev->dev, "per"); 1031 clk_per = devm_clk_get(&pdev->dev, "per");
1022 if (IS_ERR(clk_per)) { 1032 if (IS_ERR(clk_per)) {
1023 dev_err(&pdev->dev, "no per clock defined\n"); 1033 dev_err(&pdev->dev, "no per clock defined\n");
1024 err = PTR_ERR(clk_per); 1034 return PTR_ERR(clk_per);
1025 goto failed_clock;
1026 } 1035 }
1027 } 1036 }
1028 1037
1029 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1038 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1030 irq = platform_get_irq(pdev, 0); 1039 irq = platform_get_irq(pdev, 0);
1031 if (!mem || irq <= 0) { 1040 if (irq <= 0)
1032 err = -ENODEV; 1041 return -ENODEV;
1033 goto failed_get;
1034 }
1035 1042
1036 mem_size = resource_size(mem); 1043 base = devm_ioremap_resource(&pdev->dev, mem);
1037 if (!request_mem_region(mem->start, mem_size, pdev->name)) { 1044 if (IS_ERR(base))
1038 err = -EBUSY; 1045 return PTR_ERR(base);
1039 goto failed_get;
1040 }
1041
1042 base = ioremap(mem->start, mem_size);
1043 if (!base) {
1044 err = -ENOMEM;
1045 goto failed_map;
1046 }
1047
1048 dev = alloc_candev(sizeof(struct flexcan_priv), 1);
1049 if (!dev) {
1050 err = -ENOMEM;
1051 goto failed_alloc;
1052 }
1053 1046
1054 of_id = of_match_device(flexcan_of_match, &pdev->dev); 1047 of_id = of_match_device(flexcan_of_match, &pdev->dev);
1055 if (of_id) { 1048 if (of_id) {
@@ -1058,10 +1051,13 @@ static int flexcan_probe(struct platform_device *pdev)
1058 devtype_data = (struct flexcan_devtype_data *) 1051 devtype_data = (struct flexcan_devtype_data *)
1059 pdev->id_entry->driver_data; 1052 pdev->id_entry->driver_data;
1060 } else { 1053 } else {
1061 err = -ENODEV; 1054 return -ENODEV;
1062 goto failed_devtype;
1063 } 1055 }
1064 1056
1057 dev = alloc_candev(sizeof(struct flexcan_priv), 1);
1058 if (!dev)
1059 return -ENOMEM;
1060
1065 dev->netdev_ops = &flexcan_netdev_ops; 1061 dev->netdev_ops = &flexcan_netdev_ops;
1066 dev->irq = irq; 1062 dev->irq = irq;
1067 dev->flags |= IFF_ECHO; 1063 dev->flags |= IFF_ECHO;
@@ -1087,7 +1083,7 @@ static int flexcan_probe(struct platform_device *pdev)
1087 1083
1088 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); 1084 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
1089 1085
1090 dev_set_drvdata(&pdev->dev, dev); 1086 platform_set_drvdata(pdev, dev);
1091 SET_NETDEV_DEV(dev, &pdev->dev); 1087 SET_NETDEV_DEV(dev, &pdev->dev);
1092 1088
1093 err = register_flexcandev(dev); 1089 err = register_flexcandev(dev);
@@ -1104,28 +1100,15 @@ static int flexcan_probe(struct platform_device *pdev)
1104 return 0; 1100 return 0;
1105 1101
1106 failed_register: 1102 failed_register:
1107 failed_devtype:
1108 free_candev(dev); 1103 free_candev(dev);
1109 failed_alloc:
1110 iounmap(base);
1111 failed_map:
1112 release_mem_region(mem->start, mem_size);
1113 failed_get:
1114 failed_clock:
1115 return err; 1104 return err;
1116} 1105}
1117 1106
1118static int flexcan_remove(struct platform_device *pdev) 1107static int flexcan_remove(struct platform_device *pdev)
1119{ 1108{
1120 struct net_device *dev = platform_get_drvdata(pdev); 1109 struct net_device *dev = platform_get_drvdata(pdev);
1121 struct flexcan_priv *priv = netdev_priv(dev);
1122 struct resource *mem;
1123 1110
1124 unregister_flexcandev(dev); 1111 unregister_flexcandev(dev);
1125 iounmap(priv->base);
1126
1127 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1128 release_mem_region(mem->start, resource_size(mem));
1129 1112
1130 free_candev(dev); 1113 free_candev(dev);
1131 1114
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 8cda23bf0614..fe7dd696957e 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -37,9 +37,6 @@
37 * 37 *
38 * static struct mcp251x_platform_data mcp251x_info = { 38 * static struct mcp251x_platform_data mcp251x_info = {
39 * .oscillator_frequency = 8000000, 39 * .oscillator_frequency = 8000000,
40 * .board_specific_setup = &mcp251x_setup,
41 * .power_enable = mcp251x_power_enable,
42 * .transceiver_enable = NULL,
43 * }; 40 * };
44 * 41 *
45 * static struct spi_board_info spi_board_info[] = { 42 * static struct spi_board_info spi_board_info[] = {
@@ -76,6 +73,7 @@
76#include <linux/slab.h> 73#include <linux/slab.h>
77#include <linux/spi/spi.h> 74#include <linux/spi/spi.h>
78#include <linux/uaccess.h> 75#include <linux/uaccess.h>
76#include <linux/regulator/consumer.h>
79 77
80/* SPI interface instruction set */ 78/* SPI interface instruction set */
81#define INSTRUCTION_WRITE 0x02 79#define INSTRUCTION_WRITE 0x02
@@ -264,6 +262,8 @@ struct mcp251x_priv {
264#define AFTER_SUSPEND_POWER 4 262#define AFTER_SUSPEND_POWER 4
265#define AFTER_SUSPEND_RESTART 8 263#define AFTER_SUSPEND_RESTART 8
266 int restart_tx; 264 int restart_tx;
265 struct regulator *power;
266 struct regulator *transceiver;
267}; 267};
268 268
269#define MCP251X_IS(_model) \ 269#define MCP251X_IS(_model) \
@@ -667,16 +667,25 @@ static int mcp251x_hw_probe(struct spi_device *spi)
667 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0; 667 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
668} 668}
669 669
670static int mcp251x_power_enable(struct regulator *reg, int enable)
671{
672 if (IS_ERR(reg))
673 return 0;
674
675 if (enable)
676 return regulator_enable(reg);
677 else
678 return regulator_disable(reg);
679}
680
670static void mcp251x_open_clean(struct net_device *net) 681static void mcp251x_open_clean(struct net_device *net)
671{ 682{
672 struct mcp251x_priv *priv = netdev_priv(net); 683 struct mcp251x_priv *priv = netdev_priv(net);
673 struct spi_device *spi = priv->spi; 684 struct spi_device *spi = priv->spi;
674 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
675 685
676 free_irq(spi->irq, priv); 686 free_irq(spi->irq, priv);
677 mcp251x_hw_sleep(spi); 687 mcp251x_hw_sleep(spi);
678 if (pdata->transceiver_enable) 688 mcp251x_power_enable(priv->transceiver, 0);
679 pdata->transceiver_enable(0);
680 close_candev(net); 689 close_candev(net);
681} 690}
682 691
@@ -684,7 +693,6 @@ static int mcp251x_stop(struct net_device *net)
684{ 693{
685 struct mcp251x_priv *priv = netdev_priv(net); 694 struct mcp251x_priv *priv = netdev_priv(net);
686 struct spi_device *spi = priv->spi; 695 struct spi_device *spi = priv->spi;
687 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
688 696
689 close_candev(net); 697 close_candev(net);
690 698
@@ -704,8 +712,7 @@ static int mcp251x_stop(struct net_device *net)
704 712
705 mcp251x_hw_sleep(spi); 713 mcp251x_hw_sleep(spi);
706 714
707 if (pdata->transceiver_enable) 715 mcp251x_power_enable(priv->transceiver, 0);
708 pdata->transceiver_enable(0);
709 716
710 priv->can.state = CAN_STATE_STOPPED; 717 priv->can.state = CAN_STATE_STOPPED;
711 718
@@ -928,8 +935,7 @@ static int mcp251x_open(struct net_device *net)
928{ 935{
929 struct mcp251x_priv *priv = netdev_priv(net); 936 struct mcp251x_priv *priv = netdev_priv(net);
930 struct spi_device *spi = priv->spi; 937 struct spi_device *spi = priv->spi;
931 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 938 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
932 unsigned long flags;
933 int ret; 939 int ret;
934 940
935 ret = open_candev(net); 941 ret = open_candev(net);
@@ -939,25 +945,17 @@ static int mcp251x_open(struct net_device *net)
939 } 945 }
940 946
941 mutex_lock(&priv->mcp_lock); 947 mutex_lock(&priv->mcp_lock);
942 if (pdata->transceiver_enable) 948 mcp251x_power_enable(priv->transceiver, 1);
943 pdata->transceiver_enable(1);
944 949
945 priv->force_quit = 0; 950 priv->force_quit = 0;
946 priv->tx_skb = NULL; 951 priv->tx_skb = NULL;
947 priv->tx_len = 0; 952 priv->tx_len = 0;
948 953
949 flags = IRQF_ONESHOT;
950 if (pdata->irq_flags)
951 flags |= pdata->irq_flags;
952 else
953 flags |= IRQF_TRIGGER_FALLING;
954
955 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, 954 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
956 flags, DEVICE_NAME, priv); 955 flags, DEVICE_NAME, priv);
957 if (ret) { 956 if (ret) {
958 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 957 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
959 if (pdata->transceiver_enable) 958 mcp251x_power_enable(priv->transceiver, 0);
960 pdata->transceiver_enable(0);
961 close_candev(net); 959 close_candev(net);
962 goto open_unlock; 960 goto open_unlock;
963 } 961 }
@@ -1026,6 +1024,19 @@ static int mcp251x_can_probe(struct spi_device *spi)
1026 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; 1024 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
1027 priv->model = spi_get_device_id(spi)->driver_data; 1025 priv->model = spi_get_device_id(spi)->driver_data;
1028 priv->net = net; 1026 priv->net = net;
1027
1028 priv->power = devm_regulator_get(&spi->dev, "vdd");
1029 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
1030 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
1031 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
1032 ret = -EPROBE_DEFER;
1033 goto error_power;
1034 }
1035
1036 ret = mcp251x_power_enable(priv->power, 1);
1037 if (ret)
1038 goto error_power;
1039
1029 spi_set_drvdata(spi, priv); 1040 spi_set_drvdata(spi, priv);
1030 1041
1031 priv->spi = spi; 1042 priv->spi = spi;
@@ -1068,30 +1079,24 @@ static int mcp251x_can_probe(struct spi_device *spi)
1068 } 1079 }
1069 } 1080 }
1070 1081
1071 if (pdata->power_enable)
1072 pdata->power_enable(1);
1073
1074 /* Call out to platform specific setup */
1075 if (pdata->board_specific_setup)
1076 pdata->board_specific_setup(spi);
1077
1078 SET_NETDEV_DEV(net, &spi->dev); 1082 SET_NETDEV_DEV(net, &spi->dev);
1079 1083
1080 /* Configure the SPI bus */ 1084 /* Configure the SPI bus */
1081 spi->mode = SPI_MODE_0; 1085 spi->mode = spi->mode ? : SPI_MODE_0;
1086 if (mcp251x_is_2510(spi))
1087 spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
1088 else
1089 spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
1082 spi->bits_per_word = 8; 1090 spi->bits_per_word = 8;
1083 spi_setup(spi); 1091 spi_setup(spi);
1084 1092
1085 /* Here is OK to not lock the MCP, no one knows about it yet */ 1093 /* Here is OK to not lock the MCP, no one knows about it yet */
1086 if (!mcp251x_hw_probe(spi)) { 1094 if (!mcp251x_hw_probe(spi)) {
1087 dev_info(&spi->dev, "Probe failed\n"); 1095 ret = -ENODEV;
1088 goto error_probe; 1096 goto error_probe;
1089 } 1097 }
1090 mcp251x_hw_sleep(spi); 1098 mcp251x_hw_sleep(spi);
1091 1099
1092 if (pdata->transceiver_enable)
1093 pdata->transceiver_enable(0);
1094
1095 ret = register_candev(net); 1100 ret = register_candev(net);
1096 if (ret) 1101 if (ret)
1097 goto error_probe; 1102 goto error_probe;
@@ -1109,13 +1114,13 @@ error_rx_buf:
1109 if (!mcp251x_enable_dma) 1114 if (!mcp251x_enable_dma)
1110 kfree(priv->spi_tx_buf); 1115 kfree(priv->spi_tx_buf);
1111error_tx_buf: 1116error_tx_buf:
1112 free_candev(net);
1113 if (mcp251x_enable_dma) 1117 if (mcp251x_enable_dma)
1114 dma_free_coherent(&spi->dev, PAGE_SIZE, 1118 dma_free_coherent(&spi->dev, PAGE_SIZE,
1115 priv->spi_tx_buf, priv->spi_tx_dma); 1119 priv->spi_tx_buf, priv->spi_tx_dma);
1120 mcp251x_power_enable(priv->power, 0);
1121error_power:
1122 free_candev(net);
1116error_alloc: 1123error_alloc:
1117 if (pdata->power_enable)
1118 pdata->power_enable(0);
1119 dev_err(&spi->dev, "probe failed\n"); 1124 dev_err(&spi->dev, "probe failed\n");
1120error_out: 1125error_out:
1121 return ret; 1126 return ret;
@@ -1123,12 +1128,10 @@ error_out:
1123 1128
1124static int mcp251x_can_remove(struct spi_device *spi) 1129static int mcp251x_can_remove(struct spi_device *spi)
1125{ 1130{
1126 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1127 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1131 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1128 struct net_device *net = priv->net; 1132 struct net_device *net = priv->net;
1129 1133
1130 unregister_candev(net); 1134 unregister_candev(net);
1131 free_candev(net);
1132 1135
1133 if (mcp251x_enable_dma) { 1136 if (mcp251x_enable_dma) {
1134 dma_free_coherent(&spi->dev, PAGE_SIZE, 1137 dma_free_coherent(&spi->dev, PAGE_SIZE,
@@ -1138,8 +1141,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
1138 kfree(priv->spi_rx_buf); 1141 kfree(priv->spi_rx_buf);
1139 } 1142 }
1140 1143
1141 if (pdata->power_enable) 1144 mcp251x_power_enable(priv->power, 0);
1142 pdata->power_enable(0); 1145
1146 free_candev(net);
1143 1147
1144 return 0; 1148 return 0;
1145} 1149}
@@ -1149,7 +1153,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
1149static int mcp251x_can_suspend(struct device *dev) 1153static int mcp251x_can_suspend(struct device *dev)
1150{ 1154{
1151 struct spi_device *spi = to_spi_device(dev); 1155 struct spi_device *spi = to_spi_device(dev);
1152 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1153 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1156 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1154 struct net_device *net = priv->net; 1157 struct net_device *net = priv->net;
1155 1158
@@ -1163,15 +1166,14 @@ static int mcp251x_can_suspend(struct device *dev)
1163 netif_device_detach(net); 1166 netif_device_detach(net);
1164 1167
1165 mcp251x_hw_sleep(spi); 1168 mcp251x_hw_sleep(spi);
1166 if (pdata->transceiver_enable) 1169 mcp251x_power_enable(priv->transceiver, 0);
1167 pdata->transceiver_enable(0);
1168 priv->after_suspend = AFTER_SUSPEND_UP; 1170 priv->after_suspend = AFTER_SUSPEND_UP;
1169 } else { 1171 } else {
1170 priv->after_suspend = AFTER_SUSPEND_DOWN; 1172 priv->after_suspend = AFTER_SUSPEND_DOWN;
1171 } 1173 }
1172 1174
1173 if (pdata->power_enable) { 1175 if (!IS_ERR(priv->power)) {
1174 pdata->power_enable(0); 1176 regulator_disable(priv->power);
1175 priv->after_suspend |= AFTER_SUSPEND_POWER; 1177 priv->after_suspend |= AFTER_SUSPEND_POWER;
1176 } 1178 }
1177 1179
@@ -1181,16 +1183,14 @@ static int mcp251x_can_suspend(struct device *dev)
1181static int mcp251x_can_resume(struct device *dev) 1183static int mcp251x_can_resume(struct device *dev)
1182{ 1184{
1183 struct spi_device *spi = to_spi_device(dev); 1185 struct spi_device *spi = to_spi_device(dev);
1184 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1185 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1186 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1186 1187
1187 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1188 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1188 pdata->power_enable(1); 1189 mcp251x_power_enable(priv->power, 1);
1189 queue_work(priv->wq, &priv->restart_work); 1190 queue_work(priv->wq, &priv->restart_work);
1190 } else { 1191 } else {
1191 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1192 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1192 if (pdata->transceiver_enable) 1193 mcp251x_power_enable(priv->transceiver, 1);
1193 pdata->transceiver_enable(1);
1194 queue_work(priv->wq, &priv->restart_work); 1194 queue_work(priv->wq, &priv->restart_work);
1195 } else { 1195 } else {
1196 priv->after_suspend = 0; 1196 priv->after_suspend = 0;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5b0ee8ef5885..e59b3a392af6 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -40,6 +40,7 @@ struct mpc5xxx_can_data {
40 unsigned int type; 40 unsigned int type;
41 u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name, 41 u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name,
42 int *mscan_clksrc); 42 int *mscan_clksrc);
43 void (*put_clock)(struct platform_device *ofdev);
43}; 44};
44 45
45#ifdef CONFIG_PPC_MPC52xx 46#ifdef CONFIG_PPC_MPC52xx
@@ -148,7 +149,10 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
148 goto exit_put; 149 goto exit_put;
149 } 150 }
150 151
151 /* Determine the MSCAN device index from the physical address */ 152 /* Determine the MSCAN device index from the peripheral's
153 * physical address. Register address offsets against the
154 * IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380
155 */
152 pval = of_get_property(ofdev->dev.of_node, "reg", &plen); 156 pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
153 BUG_ON(!pval || plen < sizeof(*pval)); 157 BUG_ON(!pval || plen < sizeof(*pval));
154 clockidx = (*pval & 0x80) ? 1 : 0; 158 clockidx = (*pval & 0x80) ? 1 : 0;
@@ -177,7 +181,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
177 clockdiv = 1; 181 clockdiv = 1;
178 182
179 if (!clock_name || !strcmp(clock_name, "sys")) { 183 if (!clock_name || !strcmp(clock_name, "sys")) {
180 sys_clk = clk_get(&ofdev->dev, "sys_clk"); 184 sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
181 if (IS_ERR(sys_clk)) { 185 if (IS_ERR(sys_clk)) {
182 dev_err(&ofdev->dev, "couldn't get sys_clk\n"); 186 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
183 goto exit_unmap; 187 goto exit_unmap;
@@ -200,7 +204,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
200 } 204 }
201 205
202 if (clocksrc < 0) { 206 if (clocksrc < 0) {
203 ref_clk = clk_get(&ofdev->dev, "ref_clk"); 207 ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
204 if (IS_ERR(ref_clk)) { 208 if (IS_ERR(ref_clk)) {
205 dev_err(&ofdev->dev, "couldn't get ref_clk\n"); 209 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
206 goto exit_unmap; 210 goto exit_unmap;
@@ -277,6 +281,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
277 dev = alloc_mscandev(); 281 dev = alloc_mscandev();
278 if (!dev) 282 if (!dev)
279 goto exit_dispose_irq; 283 goto exit_dispose_irq;
284 platform_set_drvdata(ofdev, dev);
285 SET_NETDEV_DEV(dev, &ofdev->dev);
280 286
281 priv = netdev_priv(dev); 287 priv = netdev_priv(dev);
282 priv->reg_base = base; 288 priv->reg_base = base;
@@ -293,8 +299,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
293 goto exit_free_mscan; 299 goto exit_free_mscan;
294 } 300 }
295 301
296 SET_NETDEV_DEV(dev, &ofdev->dev);
297
298 err = register_mscandev(dev, mscan_clksrc); 302 err = register_mscandev(dev, mscan_clksrc);
299 if (err) { 303 if (err) {
300 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", 304 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
@@ -302,8 +306,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
302 goto exit_free_mscan; 306 goto exit_free_mscan;
303 } 307 }
304 308
305 platform_set_drvdata(ofdev, dev);
306
307 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", 309 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
308 priv->reg_base, dev->irq, priv->can.clock.freq); 310 priv->reg_base, dev->irq, priv->can.clock.freq);
309 311
@@ -321,10 +323,17 @@ exit_unmap_mem:
321 323
322static int mpc5xxx_can_remove(struct platform_device *ofdev) 324static int mpc5xxx_can_remove(struct platform_device *ofdev)
323{ 325{
326 const struct of_device_id *match;
327 const struct mpc5xxx_can_data *data;
324 struct net_device *dev = platform_get_drvdata(ofdev); 328 struct net_device *dev = platform_get_drvdata(ofdev);
325 struct mscan_priv *priv = netdev_priv(dev); 329 struct mscan_priv *priv = netdev_priv(dev);
326 330
331 match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
332 data = match ? match->data : NULL;
333
327 unregister_mscandev(dev); 334 unregister_mscandev(dev);
335 if (data && data->put_clock)
336 data->put_clock(ofdev);
328 iounmap(priv->reg_base); 337 iounmap(priv->reg_base);
329 irq_dispose_mapping(dev->irq); 338 irq_dispose_mapping(dev->irq);
330 free_candev(dev); 339 free_candev(dev);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e6b40954e204..a955ec8c4b97 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -573,10 +573,21 @@ static int mscan_open(struct net_device *dev)
573 struct mscan_priv *priv = netdev_priv(dev); 573 struct mscan_priv *priv = netdev_priv(dev);
574 struct mscan_regs __iomem *regs = priv->reg_base; 574 struct mscan_regs __iomem *regs = priv->reg_base;
575 575
576 if (priv->clk_ipg) {
577 ret = clk_prepare_enable(priv->clk_ipg);
578 if (ret)
579 goto exit_retcode;
580 }
581 if (priv->clk_can) {
582 ret = clk_prepare_enable(priv->clk_can);
583 if (ret)
584 goto exit_dis_ipg_clock;
585 }
586
576 /* common open */ 587 /* common open */
577 ret = open_candev(dev); 588 ret = open_candev(dev);
578 if (ret) 589 if (ret)
579 return ret; 590 goto exit_dis_can_clock;
580 591
581 napi_enable(&priv->napi); 592 napi_enable(&priv->napi);
582 593
@@ -604,6 +615,13 @@ exit_free_irq:
604exit_napi_disable: 615exit_napi_disable:
605 napi_disable(&priv->napi); 616 napi_disable(&priv->napi);
606 close_candev(dev); 617 close_candev(dev);
618exit_dis_can_clock:
619 if (priv->clk_can)
620 clk_disable_unprepare(priv->clk_can);
621exit_dis_ipg_clock:
622 if (priv->clk_ipg)
623 clk_disable_unprepare(priv->clk_ipg);
624exit_retcode:
607 return ret; 625 return ret;
608} 626}
609 627
@@ -621,6 +639,11 @@ static int mscan_close(struct net_device *dev)
621 close_candev(dev); 639 close_candev(dev);
622 free_irq(dev->irq, dev); 640 free_irq(dev->irq, dev);
623 641
642 if (priv->clk_can)
643 clk_disable_unprepare(priv->clk_can);
644 if (priv->clk_ipg)
645 clk_disable_unprepare(priv->clk_ipg);
646
624 return 0; 647 return 0;
625} 648}
626 649
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index af2ed8baf0a3..9c24d60a23b1 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -21,6 +21,7 @@
21#ifndef __MSCAN_H__ 21#ifndef __MSCAN_H__
22#define __MSCAN_H__ 22#define __MSCAN_H__
23 23
24#include <linux/clk.h>
24#include <linux/types.h> 25#include <linux/types.h>
25 26
26/* MSCAN control register 0 (CANCTL0) bits */ 27/* MSCAN control register 0 (CANCTL0) bits */
@@ -283,6 +284,8 @@ struct mscan_priv {
283 unsigned int type; /* MSCAN type variants */ 284 unsigned int type; /* MSCAN type variants */
284 unsigned long flags; 285 unsigned long flags;
285 void __iomem *reg_base; /* ioremap'ed address to registers */ 286 void __iomem *reg_base; /* ioremap'ed address to registers */
287 struct clk *clk_ipg; /* clock for registers */
288 struct clk *clk_can; /* clock for bitrates */
286 u8 shadow_statflg; 289 u8 shadow_statflg;
287 u8 shadow_canrier; 290 u8 shadow_canrier;
288 u8 cur_pri; 291 u8 cur_pri;
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a5f91e1e8fe3..becef25fa194 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -148,7 +148,7 @@ config PCMCIA_PCNET
148 148
149config NE_H8300 149config NE_H8300
150 tristate "NE2000 compatible support for H8/300" 150 tristate "NE2000 compatible support for H8/300"
151 depends on H8300 151 depends on H8300H_AKI3068NET || H8300H_H8MAX
152 ---help--- 152 ---help---
153 Say Y here if you want to use the NE2000 compatible 153 Say Y here if you want to use the NE2000 compatible
154 controller on the Renesas H8/300 processor. 154 controller on the Renesas H8/300 processor.
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d26433d619..f92f001551da 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
707 707
708#ifdef CONFIG_AX88796_93CX6 708#ifdef CONFIG_AX88796_93CX6
709 if (ax->plat->flags & AXFLG_HAS_93CX6) { 709 if (ax->plat->flags & AXFLG_HAS_93CX6) {
710 unsigned char mac_addr[6]; 710 unsigned char mac_addr[ETH_ALEN];
711 struct eeprom_93cx6 eeprom; 711 struct eeprom_93cx6 eeprom;
712 712
713 eeprom.data = ei_local; 713 eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
719 (__le16 __force *)mac_addr, 719 (__le16 __force *)mac_addr,
720 sizeof(mac_addr) >> 1); 720 sizeof(mac_addr) >> 1);
721 721
722 memcpy(dev->dev_addr, mac_addr, 6); 722 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
723 } 723 }
724#endif 724#endif
725 if (ax->plat->wordlength == 2) { 725 if (ax->plat->wordlength == 2) {
@@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev)
840 ei_local = netdev_priv(dev); 840 ei_local = netdev_priv(dev);
841 ax = to_ax_dev(dev); 841 ax = to_ax_dev(dev);
842 842
843 ax->plat = pdev->dev.platform_data; 843 ax->plat = dev_get_platdata(&pdev->dev);
844 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
845 845
846 ei_local->rxcr_base = ax->plat->rcr_val; 846 ei_local->rxcr_base = ax->plat->rcr_val;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2037080c504d..506b0248c400 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
90source "drivers/net/ethernet/mellanox/Kconfig" 90source "drivers/net/ethernet/mellanox/Kconfig"
91source "drivers/net/ethernet/micrel/Kconfig" 91source "drivers/net/ethernet/micrel/Kconfig"
92source "drivers/net/ethernet/microchip/Kconfig" 92source "drivers/net/ethernet/microchip/Kconfig"
93source "drivers/net/ethernet/moxa/Kconfig"
93source "drivers/net/ethernet/myricom/Kconfig" 94source "drivers/net/ethernet/myricom/Kconfig"
94 95
95config FEALNX 96config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 390bd0bfaa27..c0b8789952e7 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
42obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ 42obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
43obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ 43obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
44obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ 44obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
45obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
45obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ 46obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
46obj-$(CONFIG_FEALNX) += fealnx.o 47obj-$(CONFIG_FEALNX) += fealnx.o
47obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ 48obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e904b3838dcc..e66684a438f5 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev)
1647 1647
1648 setup_mac_addr(ndev->dev_addr); 1648 setup_mac_addr(ndev->dev_addr);
1649 1649
1650 if (!pdev->dev.platform_data) { 1650 if (!dev_get_platdata(&pdev->dev)) {
1651 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); 1651 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1652 rc = -ENODEV; 1652 rc = -ENODEV;
1653 goto out_err_probe_mac; 1653 goto out_err_probe_mac;
1654 } 1654 }
1655 pd = pdev->dev.platform_data; 1655 pd = dev_get_platdata(&pdev->dev);
1656 lp->mii_bus = platform_get_drvdata(pd); 1656 lp->mii_bus = platform_get_drvdata(pd);
1657 if (!lp->mii_bus) { 1657 if (!lp->mii_bus) {
1658 dev_err(&pdev->dev, "Cannot get mii_bus!\n"); 1658 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
@@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
1660 goto out_err_probe_mac; 1660 goto out_err_probe_mac;
1661 } 1661 }
1662 lp->mii_bus->priv = ndev; 1662 lp->mii_bus->priv = ndev;
1663 mii_bus_data = pd->dev.platform_data; 1663 mii_bus_data = dev_get_platdata(&pd->dev);
1664 1664
1665 rc = mii_probe(ndev, mii_bus_data->phy_mode); 1665 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1666 if (rc) { 1666 if (rc) {
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7ff4b30d55ea..e06694555144 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev)
1464 } 1464 }
1465 1465
1466 /* Allocate TX descriptor ring in coherent memory */ 1466 /* Allocate TX descriptor ring in coherent memory */
1467 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1467 greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
1468 &greth->tx_bd_base_phys, 1468 &greth->tx_bd_base_phys,
1469 GFP_KERNEL | __GFP_ZERO); 1469 GFP_KERNEL);
1470 if (!greth->tx_bd_base) { 1470 if (!greth->tx_bd_base) {
1471 err = -ENOMEM; 1471 err = -ENOMEM;
1472 goto error3; 1472 goto error3;
1473 } 1473 }
1474 1474
1475 /* Allocate RX descriptor ring in coherent memory */ 1475 /* Allocate RX descriptor ring in coherent memory */
1476 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1476 greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
1477 &greth->rx_bd_base_phys, 1477 &greth->rx_bd_base_phys,
1478 GFP_KERNEL | __GFP_ZERO); 1478 GFP_KERNEL);
1479 if (!greth->rx_bd_base) { 1479 if (!greth->rx_bd_base) {
1480 err = -ENOMEM; 1480 err = -ENOMEM;
1481 goto error4; 1481 goto error4;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index ceb45bc963a9..91d52b495848 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev)
1131 writel(0, aup->enable); 1131 writel(0, aup->enable);
1132 aup->mac_enabled = 0; 1132 aup->mac_enabled = 0;
1133 1133
1134 pd = pdev->dev.platform_data; 1134 pd = dev_get_platdata(&pdev->dev);
1135 if (!pd) { 1135 if (!pd) {
1136 dev_info(&pdev->dev, "no platform_data passed," 1136 dev_info(&pdev->dev, "no platform_data passed,"
1137 " PHY search on MAC0\n"); 1137 " PHY search on MAC0\n");
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index ed2130727643..2d8e28819779 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1521 char *chipname; 1521 char *chipname;
1522 struct net_device *dev; 1522 struct net_device *dev;
1523 const struct pcnet32_access *a = NULL; 1523 const struct pcnet32_access *a = NULL;
1524 u8 promaddr[6]; 1524 u8 promaddr[ETH_ALEN];
1525 int ret = -ENODEV; 1525 int ret = -ENODEV;
1526 1526
1527 /* reset the chip */ 1527 /* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1665 } 1665 }
1666 1666
1667 /* read PROM address and compare with CSR address */ 1667 /* read PROM address and compare with CSR address */
1668 for (i = 0; i < 6; i++) 1668 for (i = 0; i < ETH_ALEN; i++)
1669 promaddr[i] = inb(ioaddr + i); 1669 promaddr[i] = inb(ioaddr + i);
1670 1670
1671 if (memcmp(promaddr, dev->dev_addr, 6) || 1671 if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
1672 !is_valid_ether_addr(dev->dev_addr)) { 1672 !is_valid_ether_addr(dev->dev_addr)) {
1673 if (is_valid_ether_addr(promaddr)) { 1673 if (is_valid_ether_addr(promaddr)) {
1674 if (pcnet32_debug & NETIF_MSG_PROBE) { 1674 if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 55d79cb53a79..9e1601487263 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev)
149 struct sk_buff *skb = tx_buff->skb; 149 struct sk_buff *skb = tx_buff->skb;
150 unsigned int info = le32_to_cpu(txbd->info); 150 unsigned int info = le32_to_cpu(txbd->info);
151 151
152 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
153
154 if ((info & FOR_EMAC) || !txbd->data) 152 if ((info & FOR_EMAC) || !txbd->data)
155 break; 153 break;
156 154
@@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev)
180 txbd->data = 0; 178 txbd->data = 0;
181 txbd->info = 0; 179 txbd->info = 0;
182 180
181 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
182
183 if (netif_queue_stopped(ndev)) 183 if (netif_queue_stopped(ndev))
184 netif_wake_queue(ndev); 184 netif_wake_queue(ndev);
185 } 185 }
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 52c96036dcc4..2fa5b86f139d 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -130,7 +130,7 @@ config BNX2X_SRIOV
130 130
131config BGMAC 131config BGMAC
132 tristate "BCMA bus GBit core support" 132 tristate "BCMA bus GBit core support"
133 depends on BCMA_HOST_SOC && HAS_DMA 133 depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
134 select PHYLIB 134 select PHYLIB
135 ---help--- 135 ---help---
136 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. 136 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b1bcd4ba4744..8ac48fbf8a66 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev)
948 948
949 /* allocate rx dma ring */ 949 /* allocate rx dma ring */
950 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 950 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
951 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, 951 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
952 GFP_KERNEL | __GFP_ZERO);
953 if (!p) { 952 if (!p) {
954 ret = -ENOMEM; 953 ret = -ENOMEM;
955 goto out_freeirq_tx; 954 goto out_freeirq_tx;
@@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev)
960 959
961 /* allocate tx dma ring */ 960 /* allocate tx dma ring */
962 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 961 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
963 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, 962 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
964 GFP_KERNEL | __GFP_ZERO);
965 if (!p) { 963 if (!p) {
966 ret = -ENOMEM; 964 ret = -ENOMEM;
967 goto out_free_rx_ring; 965 goto out_free_rx_ring;
@@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
1747 if (!bcm_enet_shared_base[0]) 1745 if (!bcm_enet_shared_base[0])
1748 return -ENODEV; 1746 return -ENODEV;
1749 1747
1750 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1751 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1748 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1752 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1749 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1753 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 1750 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1754 if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) 1751 if (!res_irq || !res_irq_rx || !res_irq_tx)
1755 return -ENODEV; 1752 return -ENODEV;
1756 1753
1757 ret = 0; 1754 ret = 0;
@@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
1767 if (ret) 1764 if (ret)
1768 goto out; 1765 goto out;
1769 1766
1770 priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); 1767 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1771 if (priv->base == NULL) { 1768 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
1772 ret = -ENOMEM; 1769 if (IS_ERR(priv->base)) {
1770 ret = PTR_ERR(priv->base);
1773 goto out; 1771 goto out;
1774 } 1772 }
1775 1773
@@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1800 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1798 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1801 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1799 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1802 1800
1803 pd = pdev->dev.platform_data; 1801 pd = dev_get_platdata(&pdev->dev);
1804 if (pd) { 1802 if (pd) {
1805 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 1803 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1806 priv->has_phy = pd->has_phy; 1804 priv->has_phy = pd->has_phy;
@@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
1964 } else { 1962 } else {
1965 struct bcm63xx_enet_platform_data *pd; 1963 struct bcm63xx_enet_platform_data *pd;
1966 1964
1967 pd = pdev->dev.platform_data; 1965 pd = dev_get_platdata(&pdev->dev);
1968 if (pd && pd->mii_config) 1966 if (pd && pd->mii_config)
1969 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1967 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1970 bcm_enet_mdio_write_mii); 1968 bcm_enet_mdio_write_mii);
@@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2742 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 2740 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2743 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 2741 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2744 2742
2745 pd = pdev->dev.platform_data; 2743 pd = dev_get_platdata(&pdev->dev);
2746 if (pd) { 2744 if (pd) {
2747 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2745 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2748 memcpy(priv->used_ports, pd->used_ports, 2746 memcpy(priv->used_ports, pd->used_ports,
@@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
2836 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2834 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2837 release_mem_region(res->start, resource_size(res)); 2835 release_mem_region(res->start, resource_size(res));
2838 2836
2839 platform_set_drvdata(pdev, NULL);
2840 free_netdev(dev); 2837 free_netdev(dev);
2841 return 0; 2838 return 0;
2842} 2839}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6a2de1d79ff6..e838a3f74b69 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1,6 +1,6 @@
1/* bnx2.c: Broadcom NX2 network driver. 1/* bnx2.c: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2011 Broadcom Corporation 3 * Copyright (c) 2004-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.2.3" 61#define DRV_MODULE_VERSION "2.2.4"
62#define DRV_MODULE_RELDATE "June 27, 2012" 62#define DRV_MODULE_RELDATE "Aug 05, 2013"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
853 bp->status_stats_size = status_blk_size + 853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block); 854 sizeof(struct statistics_block);
855 855
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, 856 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, 857 &bp->status_blk_mapping, GFP_KERNEL);
858 GFP_KERNEL | __GFP_ZERO);
859 if (status_blk == NULL) 858 if (status_blk == NULL)
860 goto alloc_mem_err; 859 goto alloc_mem_err;
861 860
@@ -3908,136 +3907,121 @@ init_cpu_err:
3908 return rc; 3907 return rc;
3909} 3908}
3910 3909
3911static int 3910static void
3912bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) 3911bnx2_setup_wol(struct bnx2 *bp)
3913{ 3912{
3914 u16 pmcsr; 3913 int i;
3914 u32 val, wol_msg;
3915 3915
3916 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 3916 if (bp->wol) {
3917 u32 advertising;
3918 u8 autoneg;
3917 3919
3918 switch (state) { 3920 autoneg = bp->autoneg;
3919 case PCI_D0: { 3921 advertising = bp->advertising;
3920 u32 val;
3921 3922
3922 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3923 if (bp->phy_port == PORT_TP) {
3923 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3924 bp->autoneg = AUTONEG_SPEED;
3924 PCI_PM_CTRL_PME_STATUS); 3925 bp->advertising = ADVERTISED_10baseT_Half |
3926 ADVERTISED_10baseT_Full |
3927 ADVERTISED_100baseT_Half |
3928 ADVERTISED_100baseT_Full |
3929 ADVERTISED_Autoneg;
3930 }
3925 3931
3926 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 3932 spin_lock_bh(&bp->phy_lock);
3927 /* delay required during transition out of D3hot */ 3933 bnx2_setup_phy(bp, bp->phy_port);
3928 msleep(20); 3934 spin_unlock_bh(&bp->phy_lock);
3929 3935
3930 val = BNX2_RD(bp, BNX2_EMAC_MODE); 3936 bp->autoneg = autoneg;
3931 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; 3937 bp->advertising = advertising;
3932 val &= ~BNX2_EMAC_MODE_MPKT;
3933 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3934 3938
3935 val = BNX2_RD(bp, BNX2_RPM_CONFIG); 3939 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3936 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3937 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3938 break;
3939 }
3940 case PCI_D3hot: {
3941 int i;
3942 u32 val, wol_msg;
3943
3944 if (bp->wol) {
3945 u32 advertising;
3946 u8 autoneg;
3947
3948 autoneg = bp->autoneg;
3949 advertising = bp->advertising;
3950
3951 if (bp->phy_port == PORT_TP) {
3952 bp->autoneg = AUTONEG_SPEED;
3953 bp->advertising = ADVERTISED_10baseT_Half |
3954 ADVERTISED_10baseT_Full |
3955 ADVERTISED_100baseT_Half |
3956 ADVERTISED_100baseT_Full |
3957 ADVERTISED_Autoneg;
3958 }
3959 3940
3960 spin_lock_bh(&bp->phy_lock); 3941 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3961 bnx2_setup_phy(bp, bp->phy_port);
3962 spin_unlock_bh(&bp->phy_lock);
3963 3942
3964 bp->autoneg = autoneg; 3943 /* Enable port mode. */
3965 bp->advertising = advertising; 3944 val &= ~BNX2_EMAC_MODE_PORT;
3945 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3946 BNX2_EMAC_MODE_ACPI_RCVD |
3947 BNX2_EMAC_MODE_MPKT;
3948 if (bp->phy_port == PORT_TP) {
3949 val |= BNX2_EMAC_MODE_PORT_MII;
3950 } else {
3951 val |= BNX2_EMAC_MODE_PORT_GMII;
3952 if (bp->line_speed == SPEED_2500)
3953 val |= BNX2_EMAC_MODE_25G_MODE;
3954 }
3966 3955
3967 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); 3956 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3968 3957
3969 val = BNX2_RD(bp, BNX2_EMAC_MODE); 3958 /* receive all multicast */
3959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3960 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3961 0xffffffff);
3962 }
3963 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3970 3964
3971 /* Enable port mode. */ 3965 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3972 val &= ~BNX2_EMAC_MODE_PORT; 3966 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3973 val |= BNX2_EMAC_MODE_MPKT_RCVD | 3967 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3974 BNX2_EMAC_MODE_ACPI_RCVD | 3968 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3975 BNX2_EMAC_MODE_MPKT;
3976 if (bp->phy_port == PORT_TP)
3977 val |= BNX2_EMAC_MODE_PORT_MII;
3978 else {
3979 val |= BNX2_EMAC_MODE_PORT_GMII;
3980 if (bp->line_speed == SPEED_2500)
3981 val |= BNX2_EMAC_MODE_25G_MODE;
3982 }
3983 3969
3984 BNX2_WR(bp, BNX2_EMAC_MODE, val); 3970 /* Need to enable EMAC and RPM for WOL. */
3971 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3972 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3973 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3974 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3985 3975
3986 /* receive all multicast */ 3976 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3987 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3977 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3988 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3978 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3989 0xffffffff);
3990 }
3991 BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3992 BNX2_EMAC_RX_MODE_SORT_MODE);
3993 3979
3994 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | 3980 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3995 BNX2_RPM_SORT_USER0_MC_EN; 3981 } else {
3996 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3982 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3997 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); 3983 }
3998 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
3999 BNX2_RPM_SORT_USER0_ENA);
4000 3984
4001 /* Need to enable EMAC and RPM for WOL. */ 3985 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4002 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 3986 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
4003 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4004 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4005 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4006 3987
4007 val = BNX2_RD(bp, BNX2_RPM_CONFIG); 3988}
4008 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4009 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4010 3989
4011 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 3990static int
4012 } 3991bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4013 else { 3992{
4014 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; 3993 switch (state) {
4015 } 3994 case PCI_D0: {
3995 u32 val;
3996
3997 pci_enable_wake(bp->pdev, PCI_D0, false);
3998 pci_set_power_state(bp->pdev, PCI_D0);
4016 3999
4017 if (!(bp->flags & BNX2_FLAG_NO_WOL)) 4000 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4018 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 4001 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4019 1, 0); 4002 val &= ~BNX2_EMAC_MODE_MPKT;
4003 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4020 4004
4021 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4005 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4006 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4007 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4008 break;
4009 }
4010 case PCI_D3hot: {
4011 bnx2_setup_wol(bp);
4012 pci_wake_from_d3(bp->pdev, bp->wol);
4022 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || 4013 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4023 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { 4014 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4024 4015
4025 if (bp->wol) 4016 if (bp->wol)
4026 pmcsr |= 3; 4017 pci_set_power_state(bp->pdev, PCI_D3hot);
4027 } 4018 } else {
4028 else { 4019 pci_set_power_state(bp->pdev, PCI_D3hot);
4029 pmcsr |= 3;
4030 }
4031 if (bp->wol) {
4032 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4033 } 4020 }
4034 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4035 pmcsr);
4036 4021
4037 /* No more memory access after this point until 4022 /* No more memory access after this point until
4038 * device is brought back to D0. 4023 * device is brought back to D0.
4039 */ 4024 */
4040 udelay(50);
4041 break; 4025 break;
4042 } 4026 }
4043 default: 4027 default:
@@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev)
6317 6301
6318 netif_carrier_off(dev); 6302 netif_carrier_off(dev);
6319 6303
6320 bnx2_set_power_state(bp, PCI_D0);
6321 bnx2_disable_int(bp); 6304 bnx2_disable_int(bp);
6322 6305
6323 rc = bnx2_setup_int_mode(bp, disable_msi); 6306 rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev)
6724 bnx2_del_napi(bp); 6707 bnx2_del_napi(bp);
6725 bp->link_up = 0; 6708 bp->link_up = 0;
6726 netif_carrier_off(bp->dev); 6709 netif_carrier_off(bp->dev);
6727 bnx2_set_power_state(bp, PCI_D3hot);
6728 return 0; 6710 return 0;
6729} 6711}
6730 6712
@@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7081 else { 7063 else {
7082 bp->wol = 0; 7064 bp->wol = 0;
7083 } 7065 }
7066
7067 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7068
7084 return 0; 7069 return 0;
7085} 7070}
7086 7071
@@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7156 struct bnx2 *bp = netdev_priv(dev); 7141 struct bnx2 *bp = netdev_priv(dev);
7157 int rc; 7142 int rc;
7158 7143
7159 if (!netif_running(dev))
7160 return -EAGAIN;
7161
7162 /* parameters already validated in ethtool_get_eeprom */ 7144 /* parameters already validated in ethtool_get_eeprom */
7163 7145
7164 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); 7146 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7173 struct bnx2 *bp = netdev_priv(dev); 7155 struct bnx2 *bp = netdev_priv(dev);
7174 int rc; 7156 int rc;
7175 7157
7176 if (!netif_running(dev))
7177 return -EAGAIN;
7178
7179 /* parameters already validated in ethtool_set_eeprom */ 7158 /* parameters already validated in ethtool_set_eeprom */
7180 7159
7181 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); 7160 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7535{ 7514{
7536 struct bnx2 *bp = netdev_priv(dev); 7515 struct bnx2 *bp = netdev_priv(dev);
7537 7516
7538 bnx2_set_power_state(bp, PCI_D0);
7539
7540 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); 7517 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7541 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7518 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7542 int i; 7519 int i;
@@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7585 etest->flags |= ETH_TEST_FL_FAILED; 7562 etest->flags |= ETH_TEST_FL_FAILED;
7586 7563
7587 } 7564 }
7588 if (!netif_running(bp->dev))
7589 bnx2_set_power_state(bp, PCI_D3hot);
7590} 7565}
7591 7566
7592static void 7567static void
@@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7658 7633
7659 switch (state) { 7634 switch (state) {
7660 case ETHTOOL_ID_ACTIVE: 7635 case ETHTOOL_ID_ACTIVE:
7661 bnx2_set_power_state(bp, PCI_D0);
7662
7663 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); 7636 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7664 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); 7637 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7665 return 1; /* cycle on/off once per second */ 7638 return 1; /* cycle on/off once per second */
@@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7680 case ETHTOOL_ID_INACTIVE: 7653 case ETHTOOL_ID_INACTIVE:
7681 BNX2_WR(bp, BNX2_EMAC_LED, 0); 7654 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7682 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); 7655 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7683
7684 if (!netif_running(dev))
7685 bnx2_set_power_state(bp, PCI_D3hot);
7686 break; 7656 break;
7687 } 7657 }
7688 7658
@@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8130 goto err_out_release; 8100 goto err_out_release;
8131 } 8101 }
8132 8102
8133 bnx2_set_power_state(bp, PCI_D0);
8134
8135 /* Configure byte swap and enable write to the reg_window registers. 8103 /* Configure byte swap and enable write to the reg_window registers.
8136 * Rely on CPU to do target byte swapping on big endian systems 8104 * Rely on CPU to do target byte swapping on big endian systems
8137 * The chip's target access swapping will not swap all accesses 8105 * The chip's target access swapping will not swap all accesses
@@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8170 8138
8171 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && 8139 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8172 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { 8140 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8173 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) 8141 if (pdev->msix_cap)
8174 bp->flags |= BNX2_FLAG_MSIX_CAP; 8142 bp->flags |= BNX2_FLAG_MSIX_CAP;
8175 } 8143 }
8176 8144
8177 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && 8145 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8178 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { 8146 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8179 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) 8147 if (pdev->msi_cap)
8180 bp->flags |= BNX2_FLAG_MSI_CAP; 8148 bp->flags |= BNX2_FLAG_MSI_CAP;
8181 } 8149 }
8182 8150
@@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8369 bp->wol = 0; 8337 bp->wol = 0;
8370 } 8338 }
8371 8339
8340 if (bp->flags & BNX2_FLAG_NO_WOL)
8341 device_set_wakeup_capable(&bp->pdev->dev, false);
8342 else
8343 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8344
8372 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { 8345 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8373 bp->tx_quick_cons_trip_int = 8346 bp->tx_quick_cons_trip_int =
8374 bp->tx_quick_cons_trip; 8347 bp->tx_quick_cons_trip;
@@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev)
8609} 8582}
8610 8583
8611static int 8584static int
8612bnx2_suspend(struct pci_dev *pdev, pm_message_t state) 8585bnx2_suspend(struct device *device)
8613{ 8586{
8587 struct pci_dev *pdev = to_pci_dev(device);
8614 struct net_device *dev = pci_get_drvdata(pdev); 8588 struct net_device *dev = pci_get_drvdata(pdev);
8615 struct bnx2 *bp = netdev_priv(dev); 8589 struct bnx2 *bp = netdev_priv(dev);
8616 8590
8617 /* PCI register 4 needs to be saved whether netif_running() or not. 8591 if (netif_running(dev)) {
8618 * MSI address and data need to be saved if using MSI and 8592 cancel_work_sync(&bp->reset_task);
8619 * netif_running(). 8593 bnx2_netif_stop(bp, true);
8620 */ 8594 netif_device_detach(dev);
8621 pci_save_state(pdev); 8595 del_timer_sync(&bp->timer);
8622 if (!netif_running(dev)) 8596 bnx2_shutdown_chip(bp);
8623 return 0; 8597 __bnx2_free_irq(bp);
8624 8598 bnx2_free_skbs(bp);
8625 cancel_work_sync(&bp->reset_task); 8599 }
8626 bnx2_netif_stop(bp, true); 8600 bnx2_setup_wol(bp);
8627 netif_device_detach(dev);
8628 del_timer_sync(&bp->timer);
8629 bnx2_shutdown_chip(bp);
8630 bnx2_free_skbs(bp);
8631 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8632 return 0; 8601 return 0;
8633} 8602}
8634 8603
8635static int 8604static int
8636bnx2_resume(struct pci_dev *pdev) 8605bnx2_resume(struct device *device)
8637{ 8606{
8607 struct pci_dev *pdev = to_pci_dev(device);
8638 struct net_device *dev = pci_get_drvdata(pdev); 8608 struct net_device *dev = pci_get_drvdata(pdev);
8639 struct bnx2 *bp = netdev_priv(dev); 8609 struct bnx2 *bp = netdev_priv(dev);
8640 8610
8641 pci_restore_state(pdev);
8642 if (!netif_running(dev)) 8611 if (!netif_running(dev))
8643 return 0; 8612 return 0;
8644 8613
8645 bnx2_set_power_state(bp, PCI_D0); 8614 bnx2_set_power_state(bp, PCI_D0);
8646 netif_device_attach(dev); 8615 netif_device_attach(dev);
8616 bnx2_request_irq(bp);
8647 bnx2_init_nic(bp, 1); 8617 bnx2_init_nic(bp, 1);
8648 bnx2_netif_start(bp, true); 8618 bnx2_netif_start(bp, true);
8649 return 0; 8619 return 0;
8650} 8620}
8651 8621
8622#ifdef CONFIG_PM_SLEEP
8623static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8624#define BNX2_PM_OPS (&bnx2_pm_ops)
8625
8626#else
8627
8628#define BNX2_PM_OPS NULL
8629
8630#endif /* CONFIG_PM_SLEEP */
8652/** 8631/**
8653 * bnx2_io_error_detected - called when PCI error is detected 8632 * bnx2_io_error_detected - called when PCI error is detected
8654 * @pdev: Pointer to PCI device 8633 * @pdev: Pointer to PCI device
@@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8694{ 8673{
8695 struct net_device *dev = pci_get_drvdata(pdev); 8674 struct net_device *dev = pci_get_drvdata(pdev);
8696 struct bnx2 *bp = netdev_priv(dev); 8675 struct bnx2 *bp = netdev_priv(dev);
8697 pci_ers_result_t result; 8676 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8698 int err; 8677 int err = 0;
8699 8678
8700 rtnl_lock(); 8679 rtnl_lock();
8701 if (pci_enable_device(pdev)) { 8680 if (pci_enable_device(pdev)) {
8702 dev_err(&pdev->dev, 8681 dev_err(&pdev->dev,
8703 "Cannot re-enable PCI device after reset\n"); 8682 "Cannot re-enable PCI device after reset\n");
8704 result = PCI_ERS_RESULT_DISCONNECT;
8705 } else { 8683 } else {
8706 pci_set_master(pdev); 8684 pci_set_master(pdev);
8707 pci_restore_state(pdev); 8685 pci_restore_state(pdev);
8708 pci_save_state(pdev); 8686 pci_save_state(pdev);
8709 8687
8710 if (netif_running(dev)) { 8688 if (netif_running(dev))
8711 bnx2_set_power_state(bp, PCI_D0); 8689 err = bnx2_init_nic(bp, 1);
8712 bnx2_init_nic(bp, 1); 8690
8713 } 8691 if (!err)
8714 result = PCI_ERS_RESULT_RECOVERED; 8692 result = PCI_ERS_RESULT_RECOVERED;
8693 }
8694
8695 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8696 bnx2_napi_enable(bp);
8697 dev_close(dev);
8715 } 8698 }
8716 rtnl_unlock(); 8699 rtnl_unlock();
8717 8700
@@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8748 rtnl_unlock(); 8731 rtnl_unlock();
8749} 8732}
8750 8733
8734static void bnx2_shutdown(struct pci_dev *pdev)
8735{
8736 struct net_device *dev = pci_get_drvdata(pdev);
8737 struct bnx2 *bp;
8738
8739 if (!dev)
8740 return;
8741
8742 bp = netdev_priv(dev);
8743 if (!bp)
8744 return;
8745
8746 rtnl_lock();
8747 if (netif_running(dev))
8748 dev_close(bp->dev);
8749
8750 if (system_state == SYSTEM_POWER_OFF)
8751 bnx2_set_power_state(bp, PCI_D3hot);
8752
8753 rtnl_unlock();
8754}
8755
8751static const struct pci_error_handlers bnx2_err_handler = { 8756static const struct pci_error_handlers bnx2_err_handler = {
8752 .error_detected = bnx2_io_error_detected, 8757 .error_detected = bnx2_io_error_detected,
8753 .slot_reset = bnx2_io_slot_reset, 8758 .slot_reset = bnx2_io_slot_reset,
@@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = {
8759 .id_table = bnx2_pci_tbl, 8764 .id_table = bnx2_pci_tbl,
8760 .probe = bnx2_init_one, 8765 .probe = bnx2_init_one,
8761 .remove = bnx2_remove_one, 8766 .remove = bnx2_remove_one,
8762 .suspend = bnx2_suspend, 8767 .driver.pm = BNX2_PM_OPS,
8763 .resume = bnx2_resume,
8764 .err_handler = &bnx2_err_handler, 8768 .err_handler = &bnx2_err_handler,
8769 .shutdown = bnx2_shutdown,
8765}; 8770};
8766 8771
8767module_pci_driver(bnx2_pci_driver); 8772module_pci_driver(bnx2_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 172efbecfea2..18cb2d23e56b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -1,6 +1,6 @@
1/* bnx2.h: Broadcom NX2 network driver. 1/* bnx2.h: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2011 Broadcom Corporation 3 * Copyright (c) 2004-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 00b88cbfde25..0c338026ce01 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
825#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 825#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
826 826
827#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ 827#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
828#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ 828#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
829#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) 829#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
830#error "Min DB doorbell stride is 8" 830#error "Min DB doorbell stride is 8"
831#endif 831#endif
832#define DPM_TRIGER_TYPE 0x40
833#define DOORBELL(bp, cid, val) \ 832#define DOORBELL(bp, cid, val) \
834 do { \ 833 do { \
835 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ 834 writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
836 DPM_TRIGER_TYPE); \
837 } while (0) 835 } while (0)
838 836
839/* TX CSUM helpers */ 837/* TX CSUM helpers */
@@ -1100,13 +1098,27 @@ struct bnx2x_port {
1100extern struct workqueue_struct *bnx2x_wq; 1098extern struct workqueue_struct *bnx2x_wq;
1101 1099
1102#define BNX2X_MAX_NUM_OF_VFS 64 1100#define BNX2X_MAX_NUM_OF_VFS 64
1103#define BNX2X_VF_CID_WND 0 1101#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
1104#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) 1102#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
1105#define BNX2X_CLIENTS_PER_VF 1 1103
1106#define BNX2X_FIRST_VF_CID 256 1104/* We need to reserve doorbell addresses for all VF and queue combinations */
1107#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) 1105#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
1106
1107/* The doorbell is configured to have the same number of CIDs for PFs and for
1108 * VFs. For this reason the PF CID zone is as large as the VF zone.
1109 */
1110#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
1111#define BNX2X_MAX_NUM_VF_QUEUES 64
1108#define BNX2X_VF_ID_INVALID 0xFF 1112#define BNX2X_VF_ID_INVALID 0xFF
1109 1113
1114/* the number of VF CIDS multiplied by the amount of bytes reserved for each
1115 * cid must not exceed the size of the VF doorbell
1116 */
1117#define BNX2X_VF_BAR_SIZE 512
1118#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
1119#error "VF doorbell bar size is 512"
1120#endif
1121
1110/* 1122/*
1111 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is 1123 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
1112 * control by the number of fast-path status blocks supported by the 1124 * control by the number of fast-path status blocks supported by the
@@ -1331,7 +1343,7 @@ enum {
1331 BNX2X_SP_RTNL_ENABLE_SRIOV, 1343 BNX2X_SP_RTNL_ENABLE_SRIOV,
1332 BNX2X_SP_RTNL_VFPF_MCAST, 1344 BNX2X_SP_RTNL_VFPF_MCAST,
1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 1345 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1346 BNX2X_SP_RTNL_RX_MODE,
1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1347 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1336 BNX2X_SP_RTNL_TX_STOP, 1348 BNX2X_SP_RTNL_TX_STOP,
1337 BNX2X_SP_RTNL_TX_RESUME, 1349 BNX2X_SP_RTNL_TX_RESUME,
@@ -1650,10 +1662,10 @@ struct bnx2x {
1650 dma_addr_t fw_stats_data_mapping; 1662 dma_addr_t fw_stats_data_mapping;
1651 int fw_stats_data_sz; 1663 int fw_stats_data_sz;
1652 1664
1653 /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB 1665 /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
1654 * context size we need 8 ILT entries. 1666 * context size we need 8 ILT entries.
1655 */ 1667 */
1656#define ILT_MAX_L2_LINES 8 1668#define ILT_MAX_L2_LINES 32
1657 struct hw_context context[ILT_MAX_L2_LINES]; 1669 struct hw_context context[ILT_MAX_L2_LINES];
1658 1670
1659 struct bnx2x_ilt *ilt; 1671 struct bnx2x_ilt *ilt;
@@ -1869,7 +1881,7 @@ extern int num_queues;
1869#define FUNC_FLG_TPA 0x0008 1881#define FUNC_FLG_TPA 0x0008
1870#define FUNC_FLG_SPQ 0x0010 1882#define FUNC_FLG_SPQ 0x0010
1871#define FUNC_FLG_LEADING 0x0020 /* PF only */ 1883#define FUNC_FLG_LEADING 0x0020 /* PF only */
1872 1884#define FUNC_FLG_LEADING_STATS 0x0040
1873struct bnx2x_func_init_params { 1885struct bnx2x_func_init_params {
1874 /* dma */ 1886 /* dma */
1875 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ 1887 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2069,9 +2081,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
2069void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, 2081void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2070 bool is_pf); 2082 bool is_pf);
2071 2083
2072#define BNX2X_ILT_ZALLOC(x, y, size) \ 2084#define BNX2X_ILT_ZALLOC(x, y, size) \
2073 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ 2085 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
2074 GFP_KERNEL | __GFP_ZERO)
2075 2086
2076#define BNX2X_ILT_FREE(x, y, size) \ 2087#define BNX2X_ILT_FREE(x, y, size) \
2077 do { \ 2088 do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0cc26110868d..2361bf236ce3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1948,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1948 } 1948 }
1949} 1949}
1950 1950
1951static int bnx2x_init_rss_pf(struct bnx2x *bp) 1951static int bnx2x_init_rss(struct bnx2x *bp)
1952{ 1952{
1953 int i; 1953 int i;
1954 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1954 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1972,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
1972 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); 1972 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1973} 1973}
1974 1974
1975int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 1975int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1976 bool config_hash) 1976 bool config_hash, bool enable)
1977{ 1977{
1978 struct bnx2x_config_rss_params params = {NULL}; 1978 struct bnx2x_config_rss_params params = {NULL};
1979 1979
@@ -1988,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1988 1988
1989 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 1989 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1990 1990
1991 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags); 1991 if (enable) {
1992 1992 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1993 /* RSS configuration */ 1993
1994 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags); 1994 /* RSS configuration */
1995 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1995 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1996 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags); 1996 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1997 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags); 1997 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1998 if (rss_obj->udp_rss_v4) 1998 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1999 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags); 1999 if (rss_obj->udp_rss_v4)
2000 if (rss_obj->udp_rss_v6) 2000 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2001 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags); 2001 if (rss_obj->udp_rss_v6)
2002 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2003 } else {
2004 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2005 }
2002 2006
2003 /* Hash bits */ 2007 /* Hash bits */
2004 params.rss_result_mask = MULTI_MASK; 2008 params.rss_result_mask = MULTI_MASK;
@@ -2007,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2007 2011
2008 if (config_hash) { 2012 if (config_hash) {
2009 /* RSS keys */ 2013 /* RSS keys */
2010 prandom_bytes(params.rss_key, sizeof(params.rss_key)); 2014 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2011 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); 2015 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2012 } 2016 }
2013 2017
2014 return bnx2x_config_rss(bp, &params); 2018 if (IS_PF(bp))
2019 return bnx2x_config_rss(bp, &params);
2020 else
2021 return bnx2x_vfpf_config_rss(bp, &params);
2015} 2022}
2016 2023
2017static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 2024static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2066,7 +2073,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
2066 rparam.mcast_obj = &bp->mcast_obj; 2073 rparam.mcast_obj = &bp->mcast_obj;
2067 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 2074 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2068 2075
2069 /* Add a DEL command... */ 2076 /* Add a DEL command... - Since we're doing a driver cleanup only,
2077 * we take a lock surrounding both the initial send and the CONTs,
2078 * as we don't want a true completion to disrupt us in the middle.
2079 */
2080 netif_addr_lock_bh(bp->dev);
2070 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 2081 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2071 if (rc < 0) 2082 if (rc < 0)
2072 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", 2083 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2078,11 +2089,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
2078 if (rc < 0) { 2089 if (rc < 0) {
2079 BNX2X_ERR("Failed to clean multi-cast object: %d\n", 2090 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2080 rc); 2091 rc);
2092 netif_addr_unlock_bh(bp->dev);
2081 return; 2093 return;
2082 } 2094 }
2083 2095
2084 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2096 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2085 } 2097 }
2098 netif_addr_unlock_bh(bp->dev);
2086} 2099}
2087 2100
2088#ifndef BNX2X_STOP_ON_ERROR 2101#ifndef BNX2X_STOP_ON_ERROR
@@ -2438,9 +2451,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
2438 } 2451 }
2439 2452
2440 /* Initialize Rx filter. */ 2453 /* Initialize Rx filter. */
2441 netif_addr_lock_bh(bp->dev); 2454 bnx2x_set_rx_mode_inner(bp);
2442 bnx2x_set_rx_mode(bp->dev);
2443 netif_addr_unlock_bh(bp->dev);
2444 2455
2445 /* re-read iscsi info */ 2456 /* re-read iscsi info */
2446 bnx2x_get_iscsi_info(bp); 2457 bnx2x_get_iscsi_info(bp);
@@ -2647,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2647 2658
2648 /* initialize FW coalescing state machines in RAM */ 2659 /* initialize FW coalescing state machines in RAM */
2649 bnx2x_update_coalesce(bp); 2660 bnx2x_update_coalesce(bp);
2661 }
2650 2662
2651 /* setup the leading queue */ 2663 /* setup the leading queue */
2652 rc = bnx2x_setup_leading(bp); 2664 rc = bnx2x_setup_leading(bp);
2653 if (rc) { 2665 if (rc) {
2654 BNX2X_ERR("Setup leading failed!\n"); 2666 BNX2X_ERR("Setup leading failed!\n");
2655 LOAD_ERROR_EXIT(bp, load_error3); 2667 LOAD_ERROR_EXIT(bp, load_error3);
2656 } 2668 }
2657
2658 /* set up the rest of the queues */
2659 for_each_nondefault_eth_queue(bp, i) {
2660 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2661 if (rc) {
2662 BNX2X_ERR("Queue setup failed\n");
2663 LOAD_ERROR_EXIT(bp, load_error3);
2664 }
2665 }
2666 2669
2667 /* setup rss */ 2670 /* set up the rest of the queues */
2668 rc = bnx2x_init_rss_pf(bp); 2671 for_each_nondefault_eth_queue(bp, i) {
2672 if (IS_PF(bp))
2673 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2674 else /* VF */
2675 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2669 if (rc) { 2676 if (rc) {
2670 BNX2X_ERR("PF RSS init failed\n"); 2677 BNX2X_ERR("Queue %d setup failed\n", i);
2671 LOAD_ERROR_EXIT(bp, load_error3); 2678 LOAD_ERROR_EXIT(bp, load_error3);
2672 } 2679 }
2680 }
2673 2681
2674 } else { /* vf */ 2682 /* setup rss */
2675 for_each_eth_queue(bp, i) { 2683 rc = bnx2x_init_rss(bp);
2676 rc = bnx2x_vfpf_setup_q(bp, i); 2684 if (rc) {
2677 if (rc) { 2685 BNX2X_ERR("PF RSS init failed\n");
2678 BNX2X_ERR("Queue setup failed\n"); 2686 LOAD_ERROR_EXIT(bp, load_error3);
2679 LOAD_ERROR_EXIT(bp, load_error3);
2680 }
2681 }
2682 } 2687 }
2683 2688
2684 /* Now when Clients are configured we are ready to work */ 2689 /* Now when Clients are configured we are ready to work */
@@ -2710,9 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2710 /* Start fast path */ 2715 /* Start fast path */
2711 2716
2712 /* Initialize Rx filter. */ 2717 /* Initialize Rx filter. */
2713 netif_addr_lock_bh(bp->dev); 2718 bnx2x_set_rx_mode_inner(bp);
2714 bnx2x_set_rx_mode(bp->dev);
2715 netif_addr_unlock_bh(bp->dev);
2716 2719
2717 /* Start the Tx */ 2720 /* Start the Tx */
2718 switch (load_mode) { 2721 switch (load_mode) {
@@ -4789,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev)
4789void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 4792void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4790 u32 cid) 4793 u32 cid)
4791{ 4794{
4795 if (!cxt) {
4796 BNX2X_ERR("bad context pointer %p\n", cxt);
4797 return;
4798 }
4799
4792 /* ustorm cxt validation */ 4800 /* ustorm cxt validation */
4793 cxt->ustorm_ag_context.cdu_usage = 4801 cxt->ustorm_ag_context.cdu_usage =
4794 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 4802 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c07a6d054cfe..da8fcaa74495 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -51,8 +51,7 @@ extern int int_mode;
51 51
52#define BNX2X_PCI_ALLOC(x, y, size) \ 52#define BNX2X_PCI_ALLOC(x, y, size) \
53 do { \ 53 do { \
54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ 54 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
55 GFP_KERNEL | __GFP_ZERO); \
56 if (x == NULL) \ 55 if (x == NULL) \
57 goto alloc_mem_err; \ 56 goto alloc_mem_err; \
58 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 57 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
@@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
106 * @rss_obj: RSS object to use 105 * @rss_obj: RSS object to use
107 * @ind_table: indirection table to configure 106 * @ind_table: indirection table to configure
108 * @config_hash: re-configure RSS hash keys configuration 107 * @config_hash: re-configure RSS hash keys configuration
108 * @enable: enabled or disabled configuration
109 */ 109 */
110int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 110int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
111 bool config_hash); 111 bool config_hash, bool enable);
112 112
113/** 113/**
114 * bnx2x__init_func_obj - init function object 114 * bnx2x__init_func_obj - init function object
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
418 * netif_addr_lock_bh() 418 * netif_addr_lock_bh()
419 */ 419 */
420void bnx2x_set_rx_mode(struct net_device *dev); 420void bnx2x_set_rx_mode(struct net_device *dev);
421void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
421 422
422/** 423/**
423 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 424 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
@@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
980 981
981static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) 982static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
982{ 983{
983 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); 984 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
984} 985}
985 986
986/** 987/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c5f225101684..2612e3c715d4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3281 DP(BNX2X_MSG_ETHTOOL, 3281 DP(BNX2X_MSG_ETHTOOL,
3282 "rss re-configured, UDP 4-tupple %s\n", 3282 "rss re-configured, UDP 4-tupple %s\n",
3283 udp_rss_requested ? "enabled" : "disabled"); 3283 udp_rss_requested ? "enabled" : "disabled");
3284 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); 3284 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
3285 } else if ((info->flow_type == UDP_V6_FLOW) && 3285 } else if ((info->flow_type == UDP_V6_FLOW) &&
3286 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { 3286 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3287 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; 3287 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
3288 DP(BNX2X_MSG_ETHTOOL, 3288 DP(BNX2X_MSG_ETHTOOL,
3289 "rss re-configured, UDP 4-tupple %s\n", 3289 "rss re-configured, UDP 4-tupple %s\n",
3290 udp_rss_requested ? "enabled" : "disabled"); 3290 udp_rss_requested ? "enabled" : "disabled");
3291 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); 3291 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
3292 } 3292 }
3293 return 0; 3293 return 0;
3294 3294
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab34..664568420c9b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
6501 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 6501 struct bnx2x_phy *phy = &params->phy[INT_PHY];
6502 if (vars->line_speed == SPEED_AUTO_NEG && 6502 if (vars->line_speed == SPEED_AUTO_NEG &&
6503 (CHIP_IS_E1x(bp) || 6503 (CHIP_IS_E1x(bp) ||
6504 CHIP_IS_E2(bp))) 6504 CHIP_IS_E2(bp))) {
6505 bnx2x_set_parallel_detection(phy, params); 6505 bnx2x_set_parallel_detection(phy, params);
6506 if (params->phy[INT_PHY].config_init) 6506 if (params->phy[INT_PHY].config_init)
6507 params->phy[INT_PHY].config_init(phy, 6507 params->phy[INT_PHY].config_init(phy,
6508 params, 6508 params,
6509 vars); 6509 vars);
6510 }
6510 } 6511 }
6511 6512
6512 /* Init external phy*/ 6513 /* Init external phy*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 815f2dea6337..634a793c1c46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6893 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6893 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6894 6894
6895 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6895 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6896 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6896
6897 if (!CHIP_REV_IS_SLOW(bp)) 6897 if (!CHIP_REV_IS_SLOW(bp))
6898 /* enable hw interrupt from doorbell Q */ 6898 /* enable hw interrupt from doorbell Q */
6899 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6899 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -8063,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8063 8063
8064int bnx2x_setup_leading(struct bnx2x *bp) 8064int bnx2x_setup_leading(struct bnx2x *bp)
8065{ 8065{
8066 return bnx2x_setup_queue(bp, &bp->fp[0], 1); 8066 if (IS_PF(bp))
8067 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8068 else /* VF */
8069 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8067} 8070}
8068 8071
8069/** 8072/**
@@ -8077,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
8077{ 8080{
8078 int rc = 0; 8081 int rc = 0;
8079 8082
8080 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) 8083 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8084 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8081 return -EINVAL; 8085 return -EINVAL;
8086 }
8082 8087
8083 switch (int_mode) { 8088 switch (int_mode) {
8084 case BNX2X_INT_MODE_MSIX: 8089 case BNX2X_INT_MODE_MSIX:
@@ -9647,11 +9652,9 @@ sp_rtnl_not_reset:
9647 } 9652 }
9648 } 9653 }
9649 9654
9650 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 9655 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
9651 &bp->sp_rtnl_state)) { 9656 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
9652 DP(BNX2X_MSG_SP, 9657 bnx2x_set_rx_mode_inner(bp);
9653 "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
9654 bnx2x_vfpf_storm_rx_mode(bp);
9655 } 9658 }
9656 9659
9657 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 9660 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
@@ -11649,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11649 * second status block for the L2 queue, and a third status block for 11652 * second status block for the L2 queue, and a third status block for
11650 * CNIC if supported. 11653 * CNIC if supported.
11651 */ 11654 */
11652 if (CNIC_SUPPORT(bp)) 11655 if (IS_VF(bp))
11656 bp->min_msix_vec_cnt = 1;
11657 else if (CNIC_SUPPORT(bp))
11653 bp->min_msix_vec_cnt = 3; 11658 bp->min_msix_vec_cnt = 3;
11654 else 11659 else /* PF w/o cnic */
11655 bp->min_msix_vec_cnt = 2; 11660 bp->min_msix_vec_cnt = 2;
11656 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 11661 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11657 11662
@@ -11868,34 +11873,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
11868void bnx2x_set_rx_mode(struct net_device *dev) 11873void bnx2x_set_rx_mode(struct net_device *dev)
11869{ 11874{
11870 struct bnx2x *bp = netdev_priv(dev); 11875 struct bnx2x *bp = netdev_priv(dev);
11871 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11872 11876
11873 if (bp->state != BNX2X_STATE_OPEN) { 11877 if (bp->state != BNX2X_STATE_OPEN) {
11874 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 11878 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11875 return; 11879 return;
11880 } else {
11881 /* Schedule an SP task to handle rest of change */
11882 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
11883 smp_mb__before_clear_bit();
11884 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
11885 smp_mb__after_clear_bit();
11886 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11876 } 11887 }
11888}
11889
11890void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
11891{
11892 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11877 11893
11878 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 11894 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11879 11895
11880 if (dev->flags & IFF_PROMISC) 11896 netif_addr_lock_bh(bp->dev);
11897
11898 if (bp->dev->flags & IFF_PROMISC) {
11881 rx_mode = BNX2X_RX_MODE_PROMISC; 11899 rx_mode = BNX2X_RX_MODE_PROMISC;
11882 else if ((dev->flags & IFF_ALLMULTI) || 11900 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
11883 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 11901 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
11884 CHIP_IS_E1(bp))) 11902 CHIP_IS_E1(bp))) {
11885 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11903 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11886 else { 11904 } else {
11887 if (IS_PF(bp)) { 11905 if (IS_PF(bp)) {
11888 /* some multicasts */ 11906 /* some multicasts */
11889 if (bnx2x_set_mc_list(bp) < 0) 11907 if (bnx2x_set_mc_list(bp) < 0)
11890 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11908 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11891 11909
11910 /* release bh lock, as bnx2x_set_uc_list might sleep */
11911 netif_addr_unlock_bh(bp->dev);
11892 if (bnx2x_set_uc_list(bp) < 0) 11912 if (bnx2x_set_uc_list(bp) < 0)
11893 rx_mode = BNX2X_RX_MODE_PROMISC; 11913 rx_mode = BNX2X_RX_MODE_PROMISC;
11914 netif_addr_lock_bh(bp->dev);
11894 } else { 11915 } else {
11895 /* configuring mcast to a vf involves sleeping (when we 11916 /* configuring mcast to a vf involves sleeping (when we
11896 * wait for the pf's response). Since this function is 11917 * wait for the pf's response).
11897 * called from non sleepable context we must schedule
11898 * a work item for this purpose
11899 */ 11918 */
11900 smp_mb__before_clear_bit(); 11919 smp_mb__before_clear_bit();
11901 set_bit(BNX2X_SP_RTNL_VFPF_MCAST, 11920 set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11913,22 +11932,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
11913 /* Schedule the rx_mode command */ 11932 /* Schedule the rx_mode command */
11914 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11933 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11915 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 11934 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11935 netif_addr_unlock_bh(bp->dev);
11916 return; 11936 return;
11917 } 11937 }
11918 11938
11919 if (IS_PF(bp)) { 11939 if (IS_PF(bp)) {
11920 bnx2x_set_storm_rx_mode(bp); 11940 bnx2x_set_storm_rx_mode(bp);
11941 netif_addr_unlock_bh(bp->dev);
11921 } else { 11942 } else {
11922 /* configuring rx mode to storms in a vf involves sleeping (when 11943 /* VF will need to request the PF to make this change, and so
11923 * we wait for the pf's response). Since this function is 11944 * the VF needs to release the bottom-half lock prior to the
11924 * called from non sleepable context we must schedule 11945 * request (as it will likely require sleep on the VF side)
11925 * a work item for this purpose
11926 */ 11946 */
11927 smp_mb__before_clear_bit(); 11947 netif_addr_unlock_bh(bp->dev);
11928 set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 11948 bnx2x_vfpf_storm_rx_mode(bp);
11929 &bp->sp_rtnl_state);
11930 smp_mb__after_clear_bit();
11931 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11932 } 11949 }
11933} 11950}
11934 11951
@@ -12550,19 +12567,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12550 * @dev: pci device 12567 * @dev: pci device
12551 * 12568 *
12552 */ 12569 */
12553static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, 12570static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12554 int cnic_cnt, bool is_vf)
12555{ 12571{
12556 int pos, index; 12572 int index;
12557 u16 control = 0; 12573 u16 control = 0;
12558 12574
12559 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
12560
12561 /* 12575 /*
12562 * If MSI-X is not supported - return number of SBs needed to support 12576 * If MSI-X is not supported - return number of SBs needed to support
12563 * one fast path queue: one FP queue + SB for CNIC 12577 * one fast path queue: one FP queue + SB for CNIC
12564 */ 12578 */
12565 if (!pos) { 12579 if (!pdev->msix_cap) {
12566 dev_info(&pdev->dev, "no msix capability found\n"); 12580 dev_info(&pdev->dev, "no msix capability found\n");
12567 return 1 + cnic_cnt; 12581 return 1 + cnic_cnt;
12568 } 12582 }
@@ -12575,11 +12589,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
12575 * without the default SB. 12589 * without the default SB.
12576 * For VFs there is no default SB, then we return (index+1). 12590 * For VFs there is no default SB, then we return (index+1).
12577 */ 12591 */
12578 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 12592 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
12579 12593
12580 index = control & PCI_MSIX_FLAGS_QSIZE; 12594 index = control & PCI_MSIX_FLAGS_QSIZE;
12581 12595
12582 return is_vf ? index + 1 : index; 12596 return index;
12583} 12597}
12584 12598
12585static int set_max_cos_est(int chip_id) 12599static int set_max_cos_est(int chip_id)
@@ -12659,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12659 is_vf = set_is_vf(ent->driver_data); 12673 is_vf = set_is_vf(ent->driver_data);
12660 cnic_cnt = is_vf ? 0 : 1; 12674 cnic_cnt = is_vf ? 0 : 1;
12661 12675
12662 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); 12676 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12677
12678 /* add another SB for VF as it has no default SB */
12679 max_non_def_sbs += is_vf ? 1 : 0;
12663 12680
12664 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12681 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
12665 rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; 12682 rss_count = max_non_def_sbs - cnic_cnt;
12666 12683
12667 if (rss_count < 1) 12684 if (rss_count < 1)
12668 return -EINVAL; 12685 return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7b..5ecf267dc4cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6335,6 +6335,7 @@
6335#define PCI_ID_VAL2 0x438 6335#define PCI_ID_VAL2 0x438
6336#define PCI_ID_VAL3 0x43c 6336#define PCI_ID_VAL3 0x43c
6337 6337
6338#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
6338#define GRC_CONFIG_REG_PF_INIT_VF 0x624 6339#define GRC_CONFIG_REG_PF_INIT_VF 0x624
6339#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf 6340#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
6340/* First VF_NUM for PF is encoded in this register. 6341/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8f03c984550f..9fbeee522d2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
159 } 159 }
160} 160}
161 161
162static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
163 struct bnx2x_exe_queue_obj *o)
164{
165 spin_lock_bh(&o->lock);
166
167 __bnx2x_exe_queue_reset_pending(bp, o);
168
169 spin_unlock_bh(&o->lock);
170}
171
172/** 162/**
173 * bnx2x_exe_queue_step - execute one execution chunk atomically 163 * bnx2x_exe_queue_step - execute one execution chunk atomically
174 * 164 *
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
176 * @o: queue 166 * @o: queue
177 * @ramrod_flags: flags 167 * @ramrod_flags: flags
178 * 168 *
179 * (Atomicity is ensured using the exe_queue->lock). 169 * (Should be called while holding the exe_queue->lock).
180 */ 170 */
181static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 171static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
182 struct bnx2x_exe_queue_obj *o, 172 struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 177
188 memset(&spacer, 0, sizeof(spacer)); 178 memset(&spacer, 0, sizeof(spacer));
189 179
190 spin_lock_bh(&o->lock);
191
192 /* Next step should not be performed until the current is finished, 180 /* Next step should not be performed until the current is finished,
193 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 181 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
194 * properly clear object internals without sending any command to the FW 182 * properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
200 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 188 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
201 __bnx2x_exe_queue_reset_pending(bp, o); 189 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else { 190 } else {
203 spin_unlock_bh(&o->lock);
204 return 1; 191 return 1;
205 } 192 }
206 } 193 }
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
228 } 215 }
229 216
230 /* Sanity check */ 217 /* Sanity check */
231 if (!cur_len) { 218 if (!cur_len)
232 spin_unlock_bh(&o->lock);
233 return 0; 219 return 0;
234 }
235 220
236 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
237 if (rc < 0) 222 if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
245 */ 230 */
246 __bnx2x_exe_queue_reset_pending(bp, o); 231 __bnx2x_exe_queue_reset_pending(bp, o);
247 232
248 spin_unlock_bh(&o->lock);
249 return rc; 233 return rc;
250} 234}
251 235
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
432 return true; 416 return true;
433} 417}
434 418
419/**
420 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
421 *
422 * @bp: device handle
423 * @o: vlan_mac object
424 *
425 * @details: Non-blocking implementation; should be called under execution
426 * queue lock.
427 */
428static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429 struct bnx2x_vlan_mac_obj *o)
430{
431 if (o->head_reader) {
432 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
433 return -EBUSY;
434 }
435
436 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
437 return 0;
438}
439
440/**
441 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
442 *
443 * @bp: device handle
444 * @o: vlan_mac object
445 *
446 * @details Should be called under execution queue lock; notice it might release
447 * and reclaim it during its run.
448 */
449static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450 struct bnx2x_vlan_mac_obj *o)
451{
452 int rc;
453 unsigned long ramrod_flags = o->saved_ramrod_flags;
454
455 DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
456 ramrod_flags);
457 o->head_exe_request = false;
458 o->saved_ramrod_flags = 0;
459 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
460 if (rc != 0) {
461 BNX2X_ERR("execution of pending commands failed with rc %d\n",
462 rc);
463#ifdef BNX2X_STOP_ON_ERROR
464 bnx2x_panic();
465#endif
466 }
467}
468
469/**
470 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
471 *
472 * @bp: device handle
473 * @o: vlan_mac object
474 * @ramrod_flags: ramrod flags of missed execution
475 *
476 * @details Should be called under execution queue lock.
477 */
478static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479 struct bnx2x_vlan_mac_obj *o,
480 unsigned long ramrod_flags)
481{
482 o->head_exe_request = true;
483 o->saved_ramrod_flags = ramrod_flags;
484 DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
485 ramrod_flags);
486}
487
488/**
489 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
490 *
491 * @bp: device handle
492 * @o: vlan_mac object
493 *
494 * @details Should be called under execution queue lock. Notice if a pending
495 * execution exists, it would perform it - possibly releasing and
496 * reclaiming the execution queue lock.
497 */
498static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499 struct bnx2x_vlan_mac_obj *o)
500{
501 /* It's possible a new pending execution was added since this writer
502 * executed. If so, execute again. [Ad infinitum]
503 */
504 while (o->head_exe_request) {
505 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506 __bnx2x_vlan_mac_h_exec_pending(bp, o);
507 }
508}
509
510/**
511 * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
512 *
513 * @bp: device handle
514 * @o: vlan_mac object
515 *
516 * @details Notice if a pending execution exists, it would perform it -
517 * possibly releasing and reclaiming the execution queue lock.
518 */
519void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520 struct bnx2x_vlan_mac_obj *o)
521{
522 spin_lock_bh(&o->exe_queue.lock);
523 __bnx2x_vlan_mac_h_write_unlock(bp, o);
524 spin_unlock_bh(&o->exe_queue.lock);
525}
526
527/**
528 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
529 *
530 * @bp: device handle
531 * @o: vlan_mac object
532 *
533 * @details Should be called under the execution queue lock. May sleep. May
534 * release and reclaim execution queue lock during its run.
535 */
536static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537 struct bnx2x_vlan_mac_obj *o)
538{
539 /* If we got here, we're holding lock --> no WRITER exists */
540 o->head_reader++;
541 DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
542 o->head_reader);
543
544 return 0;
545}
546
547/**
548 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549 *
550 * @bp: device handle
551 * @o: vlan_mac object
552 *
553 * @details May sleep. Claims and releases execution queue lock during its run.
554 */
555int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556 struct bnx2x_vlan_mac_obj *o)
557{
558 int rc;
559
560 spin_lock_bh(&o->exe_queue.lock);
561 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562 spin_unlock_bh(&o->exe_queue.lock);
563
564 return rc;
565}
566
567/**
568 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569 *
570 * @bp: device handle
571 * @o: vlan_mac object
572 *
573 * @details Should be called under execution queue lock. Notice if a pending
574 * execution exists, it would be performed if this was the last
575 * reader. possibly releasing and reclaiming the execution queue lock.
576 */
577static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578 struct bnx2x_vlan_mac_obj *o)
579{
580 if (!o->head_reader) {
581 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582#ifdef BNX2X_STOP_ON_ERROR
583 bnx2x_panic();
584#endif
585 } else {
586 o->head_reader--;
587 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
588 o->head_reader);
589 }
590
591 /* It's possible a new pending execution was added, and that this reader
592 * was last - if so we need to execute the command.
593 */
594 if (!o->head_reader && o->head_exe_request) {
595 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
596
597 /* Writer release will do the trick */
598 __bnx2x_vlan_mac_h_write_unlock(bp, o);
599 }
600}
601
602/**
603 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
604 *
605 * @bp: device handle
606 * @o: vlan_mac object
607 *
608 * @details Notice if a pending execution exists, it would be performed if this
609 * was the last reader. Claims and releases the execution queue lock
610 * during its run.
611 */
612void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613 struct bnx2x_vlan_mac_obj *o)
614{
615 spin_lock_bh(&o->exe_queue.lock);
616 __bnx2x_vlan_mac_h_read_unlock(bp, o);
617 spin_unlock_bh(&o->exe_queue.lock);
618}
619
435static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 620static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
436 int n, u8 *base, u8 stride, u8 size) 621 int n, u8 *base, u8 stride, u8 size)
437{ 622{
438 struct bnx2x_vlan_mac_registry_elem *pos; 623 struct bnx2x_vlan_mac_registry_elem *pos;
439 u8 *next = base; 624 u8 *next = base;
440 int counter = 0; 625 int counter = 0;
626 int read_lock;
627
628 DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
630 if (read_lock != 0)
631 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
441 632
442 /* traverse list */ 633 /* traverse list */
443 list_for_each_entry(pos, &o->head, link) { 634 list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
449 next += stride + size; 640 next += stride + size;
450 } 641 }
451 } 642 }
643
644 if (read_lock == 0) {
645 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646 bnx2x_vlan_mac_h_read_unlock(bp, o);
647 }
648
452 return counter * ETH_ALEN; 649 return counter * ETH_ALEN;
453} 650}
454 651
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1397 return -EBUSY; 1594 return -EBUSY;
1398} 1595}
1399 1596
1597static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1598 struct bnx2x_vlan_mac_obj *o,
1599 unsigned long *ramrod_flags)
1600{
1601 int rc = 0;
1602
1603 spin_lock_bh(&o->exe_queue.lock);
1604
1605 DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1606 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1607
1608 if (rc != 0) {
1609 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1610
1611 /* Calling function should not diffrentiate between this case
1612 * and the case in which there is already a pending ramrod
1613 */
1614 rc = 1;
1615 } else {
1616 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1617 }
1618 spin_unlock_bh(&o->exe_queue.lock);
1619
1620 return rc;
1621}
1622
1400/** 1623/**
1401 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod 1624 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1402 * 1625 *
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1414 struct bnx2x_raw_obj *r = &o->raw; 1637 struct bnx2x_raw_obj *r = &o->raw;
1415 int rc; 1638 int rc;
1416 1639
1640 /* Clearing the pending list & raw state should be made
1641 * atomically (as execution flow assumes they represent the same).
1642 */
1643 spin_lock_bh(&o->exe_queue.lock);
1644
1417 /* Reset pending list */ 1645 /* Reset pending list */
1418 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); 1646 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1419 1647
1420 /* Clear pending */ 1648 /* Clear pending */
1421 r->clear_pending(r); 1649 r->clear_pending(r);
1422 1650
1651 spin_unlock_bh(&o->exe_queue.lock);
1652
1423 /* If ramrod failed this is most likely a SW bug */ 1653 /* If ramrod failed this is most likely a SW bug */
1424 if (cqe->message.error) 1654 if (cqe->message.error)
1425 return -EINVAL; 1655 return -EINVAL;
1426 1656
1427 /* Run the next bulk of pending commands if requested */ 1657 /* Run the next bulk of pending commands if requested */
1428 if (test_bit(RAMROD_CONT, ramrod_flags)) { 1658 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1429 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1659 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1660
1430 if (rc < 0) 1661 if (rc < 0)
1431 return rc; 1662 return rc;
1432 } 1663 }
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
1719 * @p: 1950 * @p:
1720 * 1951 *
1721 */ 1952 */
1722int bnx2x_config_vlan_mac( 1953int bnx2x_config_vlan_mac(struct bnx2x *bp,
1723 struct bnx2x *bp, 1954 struct bnx2x_vlan_mac_ramrod_params *p)
1724 struct bnx2x_vlan_mac_ramrod_params *p)
1725{ 1955{
1726 int rc = 0; 1956 int rc = 0;
1727 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1957 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
1752 /* Execute commands if required */ 1982 /* Execute commands if required */
1753 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || 1983 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1754 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { 1984 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1755 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1985 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1986 &p->ramrod_flags);
1756 if (rc < 0) 1987 if (rc < 0)
1757 return rc; 1988 return rc;
1758 } 1989 }
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
1775 return rc; 2006 return rc;
1776 2007
1777 /* Make a next step */ 2008 /* Make a next step */
1778 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, 2009 rc = __bnx2x_vlan_mac_execute_step(bp,
1779 ramrod_flags); 2010 p->vlan_mac_obj,
2011 &p->ramrod_flags);
1780 if (rc < 0) 2012 if (rc < 0)
1781 return rc; 2013 return rc;
1782 } 2014 }
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1806 unsigned long *ramrod_flags) 2038 unsigned long *ramrod_flags)
1807{ 2039{
1808 struct bnx2x_vlan_mac_registry_elem *pos = NULL; 2040 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1809 int rc = 0;
1810 struct bnx2x_vlan_mac_ramrod_params p; 2041 struct bnx2x_vlan_mac_ramrod_params p;
1811 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2042 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1812 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2043 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2044 int read_lock;
2045 int rc = 0;
1813 2046
1814 /* Clear pending commands first */ 2047 /* Clear pending commands first */
1815 2048
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1844 __clear_bit(RAMROD_EXEC, &p.ramrod_flags); 2077 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1845 __clear_bit(RAMROD_CONT, &p.ramrod_flags); 2078 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1846 2079
2080 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2081 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2082 if (read_lock != 0)
2083 return read_lock;
2084
1847 list_for_each_entry(pos, &o->head, link) { 2085 list_for_each_entry(pos, &o->head, link) {
1848 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2086 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1849 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2087 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1851 rc = bnx2x_config_vlan_mac(bp, &p); 2089 rc = bnx2x_config_vlan_mac(bp, &p);
1852 if (rc < 0) { 2090 if (rc < 0) {
1853 BNX2X_ERR("Failed to add a new DEL command\n"); 2091 BNX2X_ERR("Failed to add a new DEL command\n");
2092 bnx2x_vlan_mac_h_read_unlock(bp, o);
1854 return rc; 2093 return rc;
1855 } 2094 }
1856 } 2095 }
1857 } 2096 }
1858 2097
2098 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2099 bnx2x_vlan_mac_h_read_unlock(bp, o);
2100
1859 p.ramrod_flags = *ramrod_flags; 2101 p.ramrod_flags = *ramrod_flags;
1860 __set_bit(RAMROD_CONT, &p.ramrod_flags); 2102 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1861 2103
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1887 struct bnx2x_credit_pool_obj *vlans_pool) 2129 struct bnx2x_credit_pool_obj *vlans_pool)
1888{ 2130{
1889 INIT_LIST_HEAD(&o->head); 2131 INIT_LIST_HEAD(&o->head);
2132 o->head_reader = 0;
2133 o->head_exe_request = false;
2134 o->saved_ramrod_flags = 0;
1890 2135
1891 o->macs_pool = macs_pool; 2136 o->macs_pool = macs_pool;
1892 o->vlans_pool = vlans_pool; 2137 o->vlans_pool = vlans_pool;
@@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4171 rss_obj->config_rss = bnx2x_setup_rss; 4416 rss_obj->config_rss = bnx2x_setup_rss;
4172} 4417}
4173 4418
4419int validate_vlan_mac(struct bnx2x *bp,
4420 struct bnx2x_vlan_mac_obj *vlan_mac)
4421{
4422 if (!vlan_mac->get_n_elements) {
4423 BNX2X_ERR("vlan mac object was not intialized\n");
4424 return -EINVAL;
4425 }
4426 return 0;
4427}
4428
4174/********************** Queue state object ***********************************/ 4429/********************** Queue state object ***********************************/
4175 4430
4176/** 4431/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 798dfe996733..658f4e33abf9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
285 * entries. 285 * entries.
286 */ 286 */
287 struct list_head head; 287 struct list_head head;
288 /* Implement a simple reader/writer lock on the head list.
289 * all these fields should only be accessed under the exe_queue lock
290 */
291 u8 head_reader; /* Num. of readers accessing head list */
292 bool head_exe_request; /* Pending execution request. */
293 unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
288 294
289 /* TODO: Add it's initialization in the init functions */ 295 /* TODO: Add it's initialization in the init functions */
290 struct bnx2x_exe_queue_obj exe_queue; 296 struct bnx2x_exe_queue_obj exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1302 struct bnx2x_credit_pool_obj *macs_pool, 1308 struct bnx2x_credit_pool_obj *macs_pool,
1303 struct bnx2x_credit_pool_obj *vlans_pool); 1309 struct bnx2x_credit_pool_obj *vlans_pool);
1304 1310
1311int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
1312 struct bnx2x_vlan_mac_obj *o);
1313void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
1314 struct bnx2x_vlan_mac_obj *o);
1315int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
1316 struct bnx2x_vlan_mac_obj *o);
1317void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
1318 struct bnx2x_vlan_mac_obj *o);
1305int bnx2x_config_vlan_mac(struct bnx2x *bp, 1319int bnx2x_config_vlan_mac(struct bnx2x *bp,
1306 struct bnx2x_vlan_mac_ramrod_params *p); 1320 struct bnx2x_vlan_mac_ramrod_params *p);
1307 1321
1308int bnx2x_vlan_mac_move(struct bnx2x *bp, 1322int bnx2x_vlan_mac_move(struct bnx2x *bp,
1309 struct bnx2x_vlan_mac_ramrod_params *p, 1323 struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
1393void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 1407void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1394 u8 *ind_table); 1408 u8 *ind_table);
1395 1409
1410int validate_vlan_mac(struct bnx2x *bp,
1411 struct bnx2x_vlan_mac_obj *vlan_mac);
1396#endif /* BNX2X_SP_VERBS */ 1412#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e8706e19f96f..b26eb83069b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
170 BNX2X_VFOP_QTEARDOWN_DONE 170 BNX2X_VFOP_QTEARDOWN_DONE
171}; 171};
172 172
173enum bnx2x_vfop_rss_state {
174 BNX2X_VFOP_RSS_CONFIG,
175 BNX2X_VFOP_RSS_DONE
176};
177
173#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 178#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
174 179
175void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 180void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
267 272
268 if (vfq_is_leading(q)) {
269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
271 }
272
273 /* Setup-op rx parameters */ 273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 398 BNX2X_Q_LOGICAL_STATE_STOPPED) {
399 DP(BNX2X_MSG_IOV, 399 DP(BNX2X_MSG_IOV,
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
401 goto op_done; 401
402 /* next state */
403 vfop->state = BNX2X_VFOP_QDTOR_DONE;
404
405 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
402 } 406 }
403 407
404 /* next state */ 408 /* next state */
@@ -432,8 +436,10 @@ op_err:
432op_done: 436op_done:
433 case BNX2X_VFOP_QDTOR_DONE: 437 case BNX2X_VFOP_QDTOR_DONE:
434 /* invalidate the context */ 438 /* invalidate the context */
435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 439 if (qdtor->cxt) {
436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 440 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
441 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
442 }
437 bnx2x_vfop_end(bp, vf, vfop); 443 bnx2x_vfop_end(bp, vf, vfop);
438 return; 444 return;
439 default: 445 default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
466 cmd->block); 472 cmd->block);
467 } 473 }
468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
469 return -ENOMEM; 476 return -ENOMEM;
470} 477}
471 478
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
474{ 481{
475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 482 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
476 if (vf) { 483 if (vf) {
484 /* the first igu entry belonging to VFs of this PF */
485 if (!BP_VFDB(bp)->first_vf_igu_entry)
486 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
487
488 /* the first igu entry belonging to this VF */
477 if (!vf_sb_count(vf)) 489 if (!vf_sb_count(vf))
478 vf->igu_base_id = igu_sb_id; 490 vf->igu_base_id = igu_sb_id;
491
479 ++vf_sb_count(vf); 492 ++vf_sb_count(vf);
493 ++vf->sb_count;
480 } 494 }
495 BP_VFDB(bp)->vf_sbs_pool++;
481} 496}
482 497
483/* VFOP MAC/VLAN helpers */ 498/* VFOP MAC/VLAN helpers */
@@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
491 * and a valid credit counter 506 * and a valid credit counter
492 */ 507 */
493 if (!vfop->rc && args->credit) { 508 if (!vfop->rc && args->credit) {
494 int cnt = 0;
495 struct list_head *pos; 509 struct list_head *pos;
510 int read_lock;
511 int cnt = 0;
512
513 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
514 if (read_lock)
515 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
496 516
497 list_for_each(pos, &obj->head) 517 list_for_each(pos, &obj->head)
498 cnt++; 518 cnt++;
499 519
520 if (!read_lock)
521 bnx2x_vlan_mac_h_read_unlock(bp, obj);
522
500 atomic_set(args->credit, cnt); 523 atomic_set(args->credit, cnt);
501 } 524 }
502} 525}
@@ -692,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
692 int qid, bool drv_only) 715 int qid, bool drv_only)
693{ 716{
694 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 717 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
718 int rc;
695 719
696 if (vfop) { 720 if (vfop) {
697 struct bnx2x_vfop_args_filters filters = { 721 struct bnx2x_vfop_args_filters filters = {
@@ -711,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
711 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 735 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
712 736
713 /* set object */ 737 /* set object */
738 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
739 if (rc)
740 return rc;
714 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 741 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
715 742
716 /* set extra args */ 743 /* set extra args */
@@ -731,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
731 int qid, bool drv_only) 758 int qid, bool drv_only)
732{ 759{
733 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 760 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
761 int rc;
734 762
735 if (vfop) { 763 if (vfop) {
736 struct bnx2x_vfop_args_filters filters = { 764 struct bnx2x_vfop_args_filters filters = {
@@ -753,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
753 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 781 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
754 782
755 /* set object */ 783 /* set object */
784 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
785 if (rc)
786 return rc;
756 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 787 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
757 788
758 /* set extra args */ 789 /* set extra args */
@@ -773,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
773 int qid, u16 vid, bool add) 804 int qid, u16 vid, bool add)
774{ 805{
775 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 806 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
807 int rc;
776 808
777 if (vfop) { 809 if (vfop) {
778 struct bnx2x_vfop_args_filters filters = { 810 struct bnx2x_vfop_args_filters filters = {
@@ -793,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
793 ramrod->user_req.u.vlan.vlan = vid; 825 ramrod->user_req.u.vlan.vlan = vid;
794 826
795 /* set object */ 827 /* set object */
828 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
829 if (rc)
830 return rc;
796 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 831 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
797 832
798 /* set extra args */ 833 /* set extra args */
@@ -812,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
812 int qid, bool drv_only) 847 int qid, bool drv_only)
813{ 848{
814 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 849 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
850 int rc;
815 851
816 if (vfop) { 852 if (vfop) {
817 struct bnx2x_vfop_args_filters filters = { 853 struct bnx2x_vfop_args_filters filters = {
@@ -831,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
831 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 867 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
832 868
833 /* set object */ 869 /* set object */
870 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
871 if (rc)
872 return rc;
834 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
835 874
836 /* set extra args */ 875 /* set extra args */
@@ -851,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
851 int qid, bool drv_only) 890 int qid, bool drv_only)
852{ 891{
853 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 892 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
893 int rc;
854 894
855 if (vfop) { 895 if (vfop) {
856 struct bnx2x_vfop_args_filters filters = { 896 struct bnx2x_vfop_args_filters filters = {
@@ -870,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
870 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 910 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
871 911
872 /* set object */ 912 /* set object */
913 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
914 if (rc)
915 return rc;
873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 916 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
874 917
875 /* set extra args */ 918 /* set extra args */
@@ -980,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
980 case BNX2X_VFOP_QFLR_CLR_VLAN: 1023 case BNX2X_VFOP_QFLR_CLR_VLAN:
981 /* vlan-clear-all: driver-only, don't consume credit */ 1024 /* vlan-clear-all: driver-only, don't consume credit */
982 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1025 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
983 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1026 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
1027 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
1028 true);
984 if (vfop->rc) 1029 if (vfop->rc)
985 goto op_err; 1030 goto op_err;
986 return; 1031 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
987 1032
988 case BNX2X_VFOP_QFLR_CLR_MAC: 1033 case BNX2X_VFOP_QFLR_CLR_MAC:
989 /* mac-clear-all: driver only consume credit */ 1034 /* mac-clear-all: driver only consume credit */
990 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1035 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
991 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1036 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
1037 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
1038 true);
992 DP(BNX2X_MSG_IOV, 1039 DP(BNX2X_MSG_IOV,
993 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1040 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
994 vf->abs_vfid, vfop->rc); 1041 vf->abs_vfid, vfop->rc);
995 if (vfop->rc) 1042 if (vfop->rc)
996 goto op_err; 1043 goto op_err;
997 return; 1044 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
998 1045
999 case BNX2X_VFOP_QFLR_TERMINATE: 1046 case BNX2X_VFOP_QFLR_TERMINATE:
1000 qstate = &vfop->op_p->qctor.qstate; 1047 qstate = &vfop->op_p->qctor.qstate;
@@ -1291,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1291{ 1338{
1292 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1339 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1293 1340
1341 /* for non leading queues skip directly to qdown sate */
1294 if (vfop) { 1342 if (vfop) {
1295 vfop->args.qx.qid = qid; 1343 vfop->args.qx.qid = qid;
1296 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1344 bnx2x_vfop_opset(qid == LEADING_IDX ?
1297 bnx2x_vfop_qdown, cmd->done); 1345 BNX2X_VFOP_QTEARDOWN_RXMODE :
1346 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1347 cmd->done);
1298 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1348 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1299 cmd->block); 1349 cmd->block);
1300 } 1350 }
@@ -1447,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1447 * both known 1497 * both known
1448 */ 1498 */
1449static void 1499static void
1450bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1500bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1451{ 1501{
1502 struct vf_pf_resc_request *resc = &vf->alloc_resc;
1452 u16 vlan_count = 0; 1503 u16 vlan_count = 0;
1453 1504
1454 /* will be set only during VF-ACQUIRE */ 1505 /* will be set only during VF-ACQUIRE */
1455 resc->num_rxqs = 0; 1506 resc->num_rxqs = 0;
1456 resc->num_txqs = 0; 1507 resc->num_txqs = 0;
1457 1508
1458 /* no credit calculcis for macs (just yet) */ 1509 /* no credit calculations for macs (just yet) */
1459 resc->num_mac_filters = 1; 1510 resc->num_mac_filters = 1;
1460 1511
1461 /* divvy up vlan rules */ 1512 /* divvy up vlan rules */
@@ -1467,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1467 resc->num_mc_filters = 0; 1518 resc->num_mc_filters = 0;
1468 1519
1469 /* num_sbs already set */ 1520 /* num_sbs already set */
1521 resc->num_sbs = vf->sb_count;
1470} 1522}
1471 1523
1472/* FLR routines: */ 1524/* FLR routines: */
1473static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1525static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1474{ 1526{
1475 /* reset the state variables */ 1527 /* reset the state variables */
1476 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1528 bnx2x_iov_static_resc(bp, vf);
1477 vf->state = VF_FREE; 1529 vf->state = VF_FREE;
1478} 1530}
1479 1531
@@ -1693,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1693 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1745 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1694 * the Pf doorbell size although the 2 are independent. 1746 * the Pf doorbell size although the 2 are independent.
1695 */ 1747 */
1696 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1748 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1697 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1698 1749
1699 /* No security checks for now - 1750 /* No security checks for now -
1700 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1751 * configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1761,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1761{ 1812{
1762 int sb_id; 1813 int sb_id;
1763 u32 val; 1814 u32 val;
1764 u8 fid; 1815 u8 fid, current_pf = 0;
1765 1816
1766 /* IGU in normal mode - read CAM */ 1817 /* IGU in normal mode - read CAM */
1767 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1818 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1769,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1769 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1820 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1770 continue; 1821 continue;
1771 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1822 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1772 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1823 if (fid & IGU_FID_ENCODE_IS_PF)
1824 current_pf = fid & IGU_FID_PF_NUM_MASK;
1825 else if (current_pf == BP_ABS_FUNC(bp))
1773 bnx2x_vf_set_igu_info(bp, sb_id, 1826 bnx2x_vf_set_igu_info(bp, sb_id,
1774 (fid & IGU_FID_VF_NUM_MASK)); 1827 (fid & IGU_FID_VF_NUM_MASK));
1775
1776 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1828 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1777 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1829 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1778 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1830 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1779 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1831 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1780 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1832 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1781 } 1833 }
1834 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1782} 1835}
1783 1836
1784static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1837static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1844,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1844 return 0; 1897 return 0;
1845} 1898}
1846 1899
1847static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1848{
1849 int i;
1850 u8 queue_count = 0;
1851
1852 if (IS_SRIOV(bp))
1853 for_each_vf(bp, i)
1854 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1855
1856 return queue_count;
1857}
1858
1859/* must be called after PF bars are mapped */ 1900/* must be called after PF bars are mapped */
1860int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1901int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1861 int num_vfs_param) 1902 int num_vfs_param)
1862{ 1903{
1863 int err, i, qcount; 1904 int err, i;
1864 struct bnx2x_sriov *iov; 1905 struct bnx2x_sriov *iov;
1865 struct pci_dev *dev = bp->pdev; 1906 struct pci_dev *dev = bp->pdev;
1866 1907
@@ -1958,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1958 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1999 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1959 bnx2x_get_vf_igu_cam_info(bp); 2000 bnx2x_get_vf_igu_cam_info(bp);
1960 2001
1961 /* get the total queue count and allocate the global queue arrays */
1962 qcount = bnx2x_iov_get_max_queue_count(bp);
1963
1964 /* allocate the queue arrays for all VFs */ 2002 /* allocate the queue arrays for all VFs */
1965 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 2003 bp->vfdb->vfqs = kzalloc(
1966 GFP_KERNEL); 2004 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2005 GFP_KERNEL);
2006
2007 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2008
1967 if (!bp->vfdb->vfqs) { 2009 if (!bp->vfdb->vfqs) {
1968 BNX2X_ERR("failed to allocate vf queue array\n"); 2010 BNX2X_ERR("failed to allocate vf queue array\n");
1969 err = -ENOMEM; 2011 err = -ENOMEM;
@@ -2084,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2084 q_type); 2126 q_type);
2085 2127
2086 DP(BNX2X_MSG_IOV, 2128 DP(BNX2X_MSG_IOV,
2087 "initialized vf %d's queue object. func id set to %d\n", 2129 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2088 vf->abs_vfid, q->sp_obj.func_id); 2130 vf->abs_vfid, q->sp_obj.func_id, q->cid);
2089
2090 /* mac/vlan objects are per queue, but only those
2091 * that belong to the leading queue are initialized
2092 */
2093 if (vfq_is_leading(q)) {
2094 /* mac */
2095 bnx2x_init_mac_obj(bp, &q->mac_obj,
2096 cl_id, q->cid, func_id,
2097 bnx2x_vf_sp(bp, vf, mac_rdata),
2098 bnx2x_vf_sp_map(bp, vf, mac_rdata),
2099 BNX2X_FILTER_MAC_PENDING,
2100 &vf->filter_state,
2101 BNX2X_OBJ_TYPE_RX_TX,
2102 &bp->macs_pool);
2103 /* vlan */
2104 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2105 cl_id, q->cid, func_id,
2106 bnx2x_vf_sp(bp, vf, vlan_rdata),
2107 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2108 BNX2X_FILTER_VLAN_PENDING,
2109 &vf->filter_state,
2110 BNX2X_OBJ_TYPE_RX_TX,
2111 &bp->vlans_pool);
2112
2113 /* mcast */
2114 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2115 q->cid, func_id, func_id,
2116 bnx2x_vf_sp(bp, vf, mcast_rdata),
2117 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2118 BNX2X_FILTER_MCAST_PENDING,
2119 &vf->filter_state,
2120 BNX2X_OBJ_TYPE_RX_TX);
2121
2122 vf->leading_rss = cl_id;
2123 }
2124} 2131}
2125 2132
2126/* called by bnx2x_nic_load */ 2133/* called by bnx2x_nic_load */
2127int bnx2x_iov_nic_init(struct bnx2x *bp) 2134int bnx2x_iov_nic_init(struct bnx2x *bp)
2128{ 2135{
2129 int vfid, qcount, i; 2136 int vfid;
2130 2137
2131 if (!IS_SRIOV(bp)) { 2138 if (!IS_SRIOV(bp)) {
2132 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2139 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2155,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2155 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2162 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2156 2163
2157 /* init statically provisioned resources */ 2164 /* init statically provisioned resources */
2158 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2165 bnx2x_iov_static_resc(bp, vf);
2159 2166
2160 /* queues are initialized during VF-ACQUIRE */ 2167 /* queues are initialized during VF-ACQUIRE */
2161 2168
@@ -2191,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2191 } 2198 }
2192 2199
2193 /* Final VF init */ 2200 /* Final VF init */
2194 qcount = 0; 2201 for_each_vf(bp, vfid) {
2195 for_each_vf(bp, i) { 2202 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2196 struct bnx2x_virtf *vf = BP_VF(bp, i);
2197 2203
2198 /* fill in the BDF and bars */ 2204 /* fill in the BDF and bars */
2199 vf->bus = bnx2x_vf_bus(bp, i); 2205 vf->bus = bnx2x_vf_bus(bp, vfid);
2200 vf->devfn = bnx2x_vf_devfn(bp, i); 2206 vf->devfn = bnx2x_vf_devfn(bp, vfid);
2201 bnx2x_vf_set_bars(bp, vf); 2207 bnx2x_vf_set_bars(bp, vf);
2202 2208
2203 DP(BNX2X_MSG_IOV, 2209 DP(BNX2X_MSG_IOV,
@@ -2206,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2206 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2212 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2207 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2213 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2208 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2214 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2209
2210 /* set local queue arrays */
2211 vf->vfqs = &bp->vfdb->vfqs[qcount];
2212 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2213 } 2215 }
2214 2216
2215 return 0; 2217 return 0;
@@ -2515,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2515 for_each_vfq(vf, j) { 2517 for_each_vfq(vf, j) {
2516 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2518 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2517 2519
2520 dma_addr_t q_stats_addr =
2521 vf->fw_stat_map + j * vf->stats_stride;
2522
2518 /* collect stats fro active queues only */ 2523 /* collect stats fro active queues only */
2519 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2524 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2520 BNX2X_Q_LOGICAL_STATE_STOPPED) 2525 BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2522,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2522 2527
2523 /* create stats query entry for this queue */ 2528 /* create stats query entry for this queue */
2524 cur_query_entry->kind = STATS_TYPE_QUEUE; 2529 cur_query_entry->kind = STATS_TYPE_QUEUE;
2525 cur_query_entry->index = vfq_cl_id(vf, rxq); 2530 cur_query_entry->index = vfq_stat_id(vf, rxq);
2526 cur_query_entry->funcID = 2531 cur_query_entry->funcID =
2527 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2532 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2528 cur_query_entry->address.hi = 2533 cur_query_entry->address.hi =
2529 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2534 cpu_to_le32(U64_HI(q_stats_addr));
2530 cur_query_entry->address.lo = 2535 cur_query_entry->address.lo =
2531 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2536 cpu_to_le32(U64_LO(q_stats_addr));
2532 DP(BNX2X_MSG_IOV, 2537 DP(BNX2X_MSG_IOV,
2533 "added address %x %x for vf %d queue %d client %d\n", 2538 "added address %x %x for vf %d queue %d client %d\n",
2534 cur_query_entry->address.hi, 2539 cur_query_entry->address.hi,
@@ -2537,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2537 cur_query_entry++; 2542 cur_query_entry++;
2538 cur_data_offset += sizeof(struct per_queue_stats); 2543 cur_data_offset += sizeof(struct per_queue_stats);
2539 stats_count++; 2544 stats_count++;
2545
2546 /* all stats are coalesced to the leading queue */
2547 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2548 break;
2540 } 2549 }
2541 } 2550 }
2542 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2551 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2555,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
2555 for_each_vf(bp, i) { 2564 for_each_vf(bp, i) {
2556 struct bnx2x_virtf *vf = BP_VF(bp, i); 2565 struct bnx2x_virtf *vf = BP_VF(bp, i);
2557 2566
2567 if (!vf) {
2568 BNX2X_ERR("VF was null! skipping...\n");
2569 continue;
2570 }
2571
2558 if (!list_empty(&vf->op_list_head) && 2572 if (!list_empty(&vf->op_list_head) &&
2559 atomic_read(&vf->op_in_progress)) { 2573 atomic_read(&vf->op_in_progress)) {
2560 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2574 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2702,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2702 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2716 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2703 2717
2704 if (!q) { 2718 if (!q) {
2705 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2719 BNX2X_ERR("q number %d was not allocated\n", i);
2706 return -EINVAL; 2720 return -EINVAL;
2707 } 2721 }
2708 2722
@@ -2930,6 +2944,43 @@ op_done:
2930 bnx2x_vfop_end(bp, vf, vfop); 2944 bnx2x_vfop_end(bp, vf, vfop);
2931} 2945}
2932 2946
2947static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
2948{
2949 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2950 enum bnx2x_vfop_rss_state state;
2951
2952 if (!vfop) {
2953 BNX2X_ERR("vfop was null\n");
2954 return;
2955 }
2956
2957 state = vfop->state;
2958 bnx2x_vfop_reset_wq(vf);
2959
2960 if (vfop->rc < 0)
2961 goto op_err;
2962
2963 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2964
2965 switch (state) {
2966 case BNX2X_VFOP_RSS_CONFIG:
2967 /* next state */
2968 vfop->state = BNX2X_VFOP_RSS_DONE;
2969 bnx2x_config_rss(bp, &vfop->op_p->rss);
2970 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
2971op_err:
2972 BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
2973op_done:
2974 case BNX2X_VFOP_RSS_DONE:
2975 bnx2x_vfop_end(bp, vf, vfop);
2976 return;
2977 default:
2978 bnx2x_vfop_default(state);
2979 }
2980op_pending:
2981 return;
2982}
2983
2933int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2984int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2934 struct bnx2x_virtf *vf, 2985 struct bnx2x_virtf *vf,
2935 struct bnx2x_vfop_cmd *cmd) 2986 struct bnx2x_vfop_cmd *cmd)
@@ -2944,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2944 return -ENOMEM; 2995 return -ENOMEM;
2945} 2996}
2946 2997
2998int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
2999 struct bnx2x_virtf *vf,
3000 struct bnx2x_vfop_cmd *cmd)
3001{
3002 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3003
3004 if (vfop) {
3005 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3006 cmd->done);
3007 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3008 cmd->block);
3009 }
3010 return -ENOMEM;
3011}
3012
2947/* VF release ~ VF close + VF release-resources 3013/* VF release ~ VF close + VF release-resources
2948 * Release is the ultimate SW shutdown and is called whenever an 3014 * Release is the ultimate SW shutdown and is called whenever an
2949 * irrecoverable error is encountered. 3015 * irrecoverable error is encountered.
@@ -2955,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2955 .block = block, 3021 .block = block,
2956 }; 3022 };
2957 int rc; 3023 int rc;
3024
3025 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2958 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3026 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2959 3027
2960 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3028 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -2983,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2983void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3051void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2984 enum channel_tlvs tlv) 3052 enum channel_tlvs tlv)
2985{ 3053{
3054 /* we don't lock the channel for unsupported tlvs */
3055 if (!bnx2x_tlv_supported(tlv)) {
3056 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3057 return;
3058 }
3059
2986 /* lock the channel */ 3060 /* lock the channel */
2987 mutex_lock(&vf->op_mutex); 3061 mutex_lock(&vf->op_mutex);
2988 3062
@@ -2997,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2997void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3071void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2998 enum channel_tlvs expected_tlv) 3072 enum channel_tlvs expected_tlv)
2999{ 3073{
3074 enum channel_tlvs current_tlv;
3075
3076 if (!vf) {
3077 BNX2X_ERR("VF was %p\n", vf);
3078 return;
3079 }
3080
3081 current_tlv = vf->op_current;
3082
3083 /* we don't unlock the channel for unsupported tlvs */
3084 if (!bnx2x_tlv_supported(expected_tlv))
3085 return;
3086
3000 WARN(expected_tlv != vf->op_current, 3087 WARN(expected_tlv != vf->op_current,
3001 "lock mismatch: expected %d found %d", expected_tlv, 3088 "lock mismatch: expected %d found %d", expected_tlv,
3002 vf->op_current); 3089 vf->op_current);
3003 3090
3091 /* record the locking op */
3092 vf->op_current = CHANNEL_TLV_NONE;
3093
3004 /* lock the channel */ 3094 /* lock the channel */
3005 mutex_unlock(&vf->op_mutex); 3095 mutex_unlock(&vf->op_mutex);
3006 3096
3007 /* log the unlock */ 3097 /* log the unlock */
3008 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3098 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3009 vf->abs_vfid, vf->op_current); 3099 vf->abs_vfid, vf->op_current);
3010
3011 /* record the locking op */
3012 vf->op_current = CHANNEL_TLV_NONE;
3013} 3100}
3014 3101
3015int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3102int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3040,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3040 return bnx2x_enable_sriov(bp); 3127 return bnx2x_enable_sriov(bp);
3041 } 3128 }
3042} 3129}
3130#define IGU_ENTRY_SIZE 4
3043 3131
3044int bnx2x_enable_sriov(struct bnx2x *bp) 3132int bnx2x_enable_sriov(struct bnx2x *bp)
3045{ 3133{
3046 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3134 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3135 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3136 u32 igu_entry, address;
3137 u16 num_vf_queues;
3138
3139 if (req_vfs == 0)
3140 return 0;
3141
3142 first_vf = bp->vfdb->sriov.first_vf_in_pf;
3143
3144 /* statically distribute vf sb pool between VFs */
3145 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3146 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3147
3148 /* zero previous values learned from igu cam */
3149 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3150 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3151
3152 vf->sb_count = 0;
3153 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3154 }
3155 bp->vfdb->vf_sbs_pool = 0;
3156
3157 /* prepare IGU cam */
3158 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3159 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3160 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3161 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3162 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3163 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3164 IGU_REG_MAPPING_MEMORY_VALID;
3165 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3166 sb_idx, vf_idx);
3167 REG_WR(bp, address, igu_entry);
3168 sb_idx++;
3169 address += IGU_ENTRY_SIZE;
3170 }
3171 }
3172
3173 /* Reinitialize vf database according to igu cam */
3174 bnx2x_get_vf_igu_cam_info(bp);
3175
3176 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3177 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3178
3179 qcount = 0;
3180 for_each_vf(bp, vf_idx) {
3181 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3182
3183 /* set local queue arrays */
3184 vf->vfqs = &bp->vfdb->vfqs[qcount];
3185 qcount += vf_sb_count(vf);
3186 }
3047 3187
3188 /* prepare msix vectors in VF configuration space */
3189 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3190 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3191 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3192 num_vf_queues);
3193 }
3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3195
3196 /* enable sriov. This will probe all the VFs, and consequentially cause
3197 * the "acquire" messages to appear on the VF PF channel.
3198 */
3199 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3200 pci_disable_sriov(bp->pdev);
3048 rc = pci_enable_sriov(bp->pdev, req_vfs); 3201 rc = pci_enable_sriov(bp->pdev, req_vfs);
3049 if (rc) { 3202 if (rc) {
3050 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3203 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3072,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3072 pci_disable_sriov(bp->pdev); 3225 pci_disable_sriov(bp->pdev);
3073} 3226}
3074 3227
3075static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3228int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
3076 struct bnx2x_virtf **vf, 3229 struct pf_vf_bulletin_content **bulletin)
3077 struct pf_vf_bulletin_content **bulletin)
3078{ 3230{
3079 if (bp->state != BNX2X_STATE_OPEN) { 3231 if (bp->state != BNX2X_STATE_OPEN) {
3080 BNX2X_ERR("vf ndo called though PF is down\n"); 3232 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3097,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3097 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3249 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3098 3250
3099 if (!*vf) { 3251 if (!*vf) {
3100 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3252 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3253 vfidx);
3254 return -EINVAL;
3255 }
3256
3257 if (!(*vf)->vfqs) {
3258 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3101 vfidx); 3259 vfidx);
3102 return -EINVAL; 3260 return -EINVAL;
3103 } 3261 }
@@ -3125,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3125 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3283 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3126 if (rc) 3284 if (rc)
3127 return rc; 3285 return rc;
3128 mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3286 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3129 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3287 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3130 if (!mac_obj || !vlan_obj) { 3288 if (!mac_obj || !vlan_obj) {
3131 BNX2X_ERR("VF partially initialized\n"); 3289 BNX2X_ERR("VF partially initialized\n");
3132 return -EINVAL; 3290 return -EINVAL;
@@ -3138,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3138 ivi->spoofchk = 1; /*always enabled */ 3296 ivi->spoofchk = 1; /*always enabled */
3139 if (vf->state == VF_ENABLED) { 3297 if (vf->state == VF_ENABLED) {
3140 /* mac and vlan are in vlan_mac objects */ 3298 /* mac and vlan are in vlan_mac objects */
3141 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3299 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
3142 0, ETH_ALEN); 3300 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3143 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3301 0, ETH_ALEN);
3144 0, VLAN_HLEN); 3302 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3303 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3304 (u8 *)&ivi->vlan, 0,
3305 VLAN_HLEN);
3145 } else { 3306 } else {
3146 /* mac */ 3307 /* mac */
3147 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3308 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3209,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3209 return rc; 3370 return rc;
3210 } 3371 }
3211 3372
3212 /* is vf initialized and queue set up? */
3213 q_logical_state = 3373 q_logical_state =
3214 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3374 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3215 if (vf->state == VF_ENABLED && 3375 if (vf->state == VF_ENABLED &&
3216 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3376 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3217 /* configure the mac in device on this vf's queue */ 3377 /* configure the mac in device on this vf's queue */
3218 unsigned long ramrod_flags = 0; 3378 unsigned long ramrod_flags = 0;
3219 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3379 struct bnx2x_vlan_mac_obj *mac_obj =
3380 &bnx2x_leading_vfq(vf, mac_obj);
3381
3382 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3383 if (rc)
3384 return rc;
3220 3385
3221 /* must lock vfpf channel to protect against vf flows */ 3386 /* must lock vfpf channel to protect against vf flows */
3222 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3387 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3276,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3276 3441
3277 /* is vf initialized and queue set up? */ 3442 /* is vf initialized and queue set up? */
3278 q_logical_state = 3443 q_logical_state =
3279 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3444 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3280 if (vf->state == VF_ENABLED && 3445 if (vf->state == VF_ENABLED &&
3281 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3446 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3282 /* configure the vlan in device on this vf's queue */ 3447 /* configure the vlan in device on this vf's queue */
3283 unsigned long ramrod_flags = 0; 3448 unsigned long ramrod_flags = 0;
3284 unsigned long vlan_mac_flags = 0; 3449 unsigned long vlan_mac_flags = 0;
3285 struct bnx2x_vlan_mac_obj *vlan_obj = 3450 struct bnx2x_vlan_mac_obj *vlan_obj =
3286 &bnx2x_vfq(vf, 0, vlan_obj); 3451 &bnx2x_leading_vfq(vf, vlan_obj);
3287 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3288 struct bnx2x_queue_state_params q_params = {NULL}; 3453 struct bnx2x_queue_state_params q_params = {NULL};
3289 struct bnx2x_queue_update_params *update_params; 3454 struct bnx2x_queue_update_params *update_params;
3290 3455
3456 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3457 if (rc)
3458 return rc;
3291 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3459 memset(&ramrod_param, 0, sizeof(ramrod_param));
3292 3460
3293 /* must lock vfpf channel to protect against vf flows */ 3461 /* must lock vfpf channel to protect against vf flows */
@@ -3307,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3307 */ 3475 */
3308 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3476 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3309 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3477 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3310 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3478 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3311 update_params = &q_params.params.update; 3479 update_params = &q_params.params.update;
3312 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3480 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3313 &update_params->update_flags); 3481 &update_params->update_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbbe..2a8c1dc65d9c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
81 u32 cid; 81 u32 cid;
82 u16 index; 82 u16 index;
83 u16 sb_idx; 83 u16 sb_idx;
84 bool is_leading;
84}; 85};
85 86
86/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 87/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
194#define VF_CFG_INT_SIMD 0x0008 195#define VF_CFG_INT_SIMD 0x0008
195#define VF_CACHE_LINE 0x0010 196#define VF_CACHE_LINE 0x0010
196#define VF_CFG_VLAN 0x0020 197#define VF_CFG_VLAN 0x0020
198#define VF_CFG_STATS_COALESCE 0x0040
197 199
198 u8 state; 200 u8 state;
199#define VF_FREE 0 /* VF ready to be acquired holds no resc */ 201#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
213 215
214 /* dma */ 216 /* dma */
215 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ 217 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
218 u16 stats_stride;
216 dma_addr_t spq_map; 219 dma_addr_t spq_map;
217 dma_addr_t bulletin_map; 220 dma_addr_t bulletin_map;
218 221
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
239 u8 igu_base_id; /* base igu status block id */ 242 u8 igu_base_id; /* base igu status block id */
240 243
241 struct bnx2x_vf_queue *vfqs; 244 struct bnx2x_vf_queue *vfqs;
242#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) 245#define LEADING_IDX 0
246#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
247#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
248#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
243 249
244 u8 index; /* index in the vf array */ 250 u8 index; /* index in the vf array */
245 u8 abs_vfid; 251 u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
358 struct client_init_ramrod_data init_data; 364 struct client_init_ramrod_data init_data;
359 struct client_update_ramrod_data update_data; 365 struct client_update_ramrod_data update_data;
360 } q_data; 366 } q_data;
367
368 union {
369 struct eth_rss_update_ramrod_data e2;
370 } rss_rdata;
361}; 371};
362 372
363struct hw_dma { 373struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
403 413
404#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) 414#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
405 u32 flrd_vfs[FLRD_VFS_DWORDS]; 415 u32 flrd_vfs[FLRD_VFS_DWORDS];
416
417 /* the number of msix vectors belonging to this PF designated for VFs */
418 u16 vf_sbs_pool;
419 u16 first_vf_igu_entry;
406}; 420};
407 421
408/* queue access */ 422/* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
411 return &(vf->vfqs[index]); 425 return &(vf->vfqs[index]);
412} 426}
413 427
414static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
415{
416 return (vfq->index == 0);
417}
418
419/* FW ids */ 428/* FW ids */
420static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) 429static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
421{ 430{
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
434 443
435static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 444static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
436{ 445{
437 return vfq_cl_id(vf, q); 446 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
447 return vf->leading_rss;
448 else
449 return vfq_cl_id(vf, q);
438} 450}
439 451
440static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 452static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
691 struct bnx2x_virtf *vf, 703 struct bnx2x_virtf *vf,
692 struct bnx2x_vfop_cmd *cmd); 704 struct bnx2x_vfop_cmd *cmd);
693 705
706int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
707 struct bnx2x_virtf *vf,
708 struct bnx2x_vfop_cmd *cmd);
709
694/* VF release ~ VF close + VF release-resources 710/* VF release ~ VF close + VF release-resources
695 * 711 *
696 * Release is the ultimate SW shutdown and is called whenever an 712 * Release is the ultimate SW shutdown and is called whenever an
@@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
730int bnx2x_vfpf_release(struct bnx2x *bp); 746int bnx2x_vfpf_release(struct bnx2x *bp);
731int bnx2x_vfpf_init(struct bnx2x *bp); 747int bnx2x_vfpf_init(struct bnx2x *bp);
732void bnx2x_vfpf_close_vf(struct bnx2x *bp); 748void bnx2x_vfpf_close_vf(struct bnx2x *bp);
733int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); 749int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
750 bool is_leading);
734int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); 751int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
735int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); 752int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
753int bnx2x_vfpf_config_rss(struct bnx2x *bp,
754 struct bnx2x_config_rss_params *params);
736int bnx2x_vfpf_set_mcast(struct net_device *dev); 755int bnx2x_vfpf_set_mcast(struct net_device *dev);
737int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); 756int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
738 757
@@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
758void bnx2x_disable_sriov(struct bnx2x *bp); 777void bnx2x_disable_sriov(struct bnx2x *bp);
759static inline int bnx2x_vf_headroom(struct bnx2x *bp) 778static inline int bnx2x_vf_headroom(struct bnx2x *bp)
760{ 779{
761 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; 780 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
762} 781}
763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 782void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 783int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
@@ -793,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
793static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } 812static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
794static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } 813static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
795static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} 814static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
796static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } 815static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
797static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } 816static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
798static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, 817static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
799 u8 vf_qid, bool set) {return 0; } 818 u8 vf_qid, bool set) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d6..6cfb88732452 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
257 257
258 /* humble our request */ 258 /* humble our request */
259 req->resc_request.num_txqs = 259 req->resc_request.num_txqs =
260 bp->acquire_resp.resc.num_txqs; 260 min(req->resc_request.num_txqs,
261 bp->acquire_resp.resc.num_txqs);
261 req->resc_request.num_rxqs = 262 req->resc_request.num_rxqs =
262 bp->acquire_resp.resc.num_rxqs; 263 min(req->resc_request.num_rxqs,
264 bp->acquire_resp.resc.num_rxqs);
263 req->resc_request.num_sbs = 265 req->resc_request.num_sbs =
264 bp->acquire_resp.resc.num_sbs; 266 min(req->resc_request.num_sbs,
267 bp->acquire_resp.resc.num_sbs);
265 req->resc_request.num_mac_filters = 268 req->resc_request.num_mac_filters =
266 bp->acquire_resp.resc.num_mac_filters; 269 min(req->resc_request.num_mac_filters,
270 bp->acquire_resp.resc.num_mac_filters);
267 req->resc_request.num_vlan_filters = 271 req->resc_request.num_vlan_filters =
268 bp->acquire_resp.resc.num_vlan_filters; 272 min(req->resc_request.num_vlan_filters,
273 bp->acquire_resp.resc.num_vlan_filters);
269 req->resc_request.num_mc_filters = 274 req->resc_request.num_mc_filters =
270 bp->acquire_resp.resc.num_mc_filters; 275 min(req->resc_request.num_mc_filters,
276 bp->acquire_resp.resc.num_mc_filters);
271 277
272 /* Clear response buffer */ 278 /* Clear response buffer */
273 memset(&bp->vf2pf_mbox->resp, 0, 279 memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
293 bp->common.flash_size = 0; 299 bp->common.flash_size = 0;
294 bp->flags |= 300 bp->flags |=
295 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; 301 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
296 bp->igu_sb_cnt = 1; 302 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
297 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; 303 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
298 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, 304 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
299 sizeof(bp->fw_ver)); 305 sizeof(bp->fw_ver));
@@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
373 req->stats_addr = bp->fw_stats_data_mapping + 379 req->stats_addr = bp->fw_stats_data_mapping +
374 offsetof(struct bnx2x_fw_stats_data, queue_stats); 380 offsetof(struct bnx2x_fw_stats_data, queue_stats);
375 381
382 req->stats_stride = sizeof(struct per_queue_stats);
383
376 /* add list termination tlv */ 384 /* add list termination tlv */
377 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, 385 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
378 sizeof(struct channel_list_end_tlv)); 386 sizeof(struct channel_list_end_tlv));
@@ -452,12 +460,60 @@ free_irq:
452 bnx2x_free_irq(bp); 460 bnx2x_free_irq(bp);
453} 461}
454 462
463static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
464 struct bnx2x_vf_queue *q)
465{
466 u8 cl_id = vfq_cl_id(vf, q);
467 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
468
469 /* mac */
470 bnx2x_init_mac_obj(bp, &q->mac_obj,
471 cl_id, q->cid, func_id,
472 bnx2x_vf_sp(bp, vf, mac_rdata),
473 bnx2x_vf_sp_map(bp, vf, mac_rdata),
474 BNX2X_FILTER_MAC_PENDING,
475 &vf->filter_state,
476 BNX2X_OBJ_TYPE_RX_TX,
477 &bp->macs_pool);
478 /* vlan */
479 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
480 cl_id, q->cid, func_id,
481 bnx2x_vf_sp(bp, vf, vlan_rdata),
482 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
483 BNX2X_FILTER_VLAN_PENDING,
484 &vf->filter_state,
485 BNX2X_OBJ_TYPE_RX_TX,
486 &bp->vlans_pool);
487
488 /* mcast */
489 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
490 q->cid, func_id, func_id,
491 bnx2x_vf_sp(bp, vf, mcast_rdata),
492 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
493 BNX2X_FILTER_MCAST_PENDING,
494 &vf->filter_state,
495 BNX2X_OBJ_TYPE_RX_TX);
496
497 /* rss */
498 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
499 func_id, func_id,
500 bnx2x_vf_sp(bp, vf, rss_rdata),
501 bnx2x_vf_sp_map(bp, vf, rss_rdata),
502 BNX2X_FILTER_RSS_CONF_PENDING,
503 &vf->filter_state,
504 BNX2X_OBJ_TYPE_RX_TX);
505
506 vf->leading_rss = cl_id;
507 q->is_leading = true;
508}
509
455/* ask the pf to open a queue for the vf */ 510/* ask the pf to open a queue for the vf */
456int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) 511int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
512 bool is_leading)
457{ 513{
458 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; 514 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
459 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 515 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
460 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 516 u8 fp_idx = fp->index;
461 u16 tpa_agg_size = 0, flags = 0; 517 u16 tpa_agg_size = 0, flags = 0;
462 int rc; 518 int rc;
463 519
@@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
473 tpa_agg_size = TPA_AGG_SIZE; 529 tpa_agg_size = TPA_AGG_SIZE;
474 } 530 }
475 531
532 if (is_leading)
533 flags |= VFPF_QUEUE_FLG_LEADING_RSS;
534
476 /* calculate queue flags */ 535 /* calculate queue flags */
477 flags |= VFPF_QUEUE_FLG_STATS; 536 flags |= VFPF_QUEUE_FLG_STATS;
478 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; 537 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -646,6 +705,71 @@ out:
646 return 0; 705 return 0;
647} 706}
648 707
708/* request pf to config rss table for vf queues*/
709int bnx2x_vfpf_config_rss(struct bnx2x *bp,
710 struct bnx2x_config_rss_params *params)
711{
712 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
713 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
714 int rc = 0;
715
716 /* clear mailbox and prep first tlv */
717 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
718 sizeof(*req));
719
720 /* add list termination tlv */
721 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
722 sizeof(struct channel_list_end_tlv));
723
724 memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
725 memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
726 req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
727 req->rss_key_size = T_ETH_RSS_KEY;
728 req->rss_result_mask = params->rss_result_mask;
729
730 /* flags handled individually for backward/forward compatability */
731 if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
732 req->rss_flags |= VFPF_RSS_MODE_DISABLED;
733 if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
734 req->rss_flags |= VFPF_RSS_MODE_REGULAR;
735 if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
736 req->rss_flags |= VFPF_RSS_SET_SRCH;
737 if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
738 req->rss_flags |= VFPF_RSS_IPV4;
739 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
740 req->rss_flags |= VFPF_RSS_IPV4_TCP;
741 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
742 req->rss_flags |= VFPF_RSS_IPV4_UDP;
743 if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
744 req->rss_flags |= VFPF_RSS_IPV6;
745 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
746 req->rss_flags |= VFPF_RSS_IPV6_TCP;
747 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
748 req->rss_flags |= VFPF_RSS_IPV6_UDP;
749
750 DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
751
752 /* output tlvs list */
753 bnx2x_dp_tlv_list(bp, req);
754
755 /* send message to pf */
756 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
757 if (rc) {
758 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
759 goto out;
760 }
761
762 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
763 BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
764 resp->hdr.status);
765 rc = -EINVAL;
766 }
767out:
768 bnx2x_vfpf_finalize(bp, &req->first_tlv);
769
770 return 0;
771}
772
649int bnx2x_vfpf_set_mcast(struct net_device *dev) 773int bnx2x_vfpf_set_mcast(struct net_device *dev)
650{ 774{
651 struct bnx2x *bp = netdev_priv(dev); 775 struct bnx2x *bp = netdev_priv(dev);
@@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
948 1072
949 /* fill in pfdev info */ 1073 /* fill in pfdev info */
950 resp->pfdev_info.chip_num = bp->common.chip_id; 1074 resp->pfdev_info.chip_num = bp->common.chip_id;
951 resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); 1075 resp->pfdev_info.db_size = bp->db_size;
952 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; 1076 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
953 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | 1077 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
954 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); 1078 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1054 /* record ghost addresses from vf message */ 1178 /* record ghost addresses from vf message */
1055 vf->spq_map = init->spq_addr; 1179 vf->spq_map = init->spq_addr;
1056 vf->fw_stat_map = init->stats_addr; 1180 vf->fw_stat_map = init->stats_addr;
1181 vf->stats_stride = init->stats_stride;
1057 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); 1182 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1058 1183
1184 /* set VF multiqueue statistics collection mode */
1185 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1186 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1187
1059 /* response */ 1188 /* response */
1060 bnx2x_vf_mbx_resp(bp, vf); 1189 bnx2x_vf_mbx_resp(bp, vf);
1061} 1190}
@@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1080 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); 1209 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1081 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) 1210 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1082 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); 1211 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1212 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1213 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1083 1214
1084 /* outer vlan removal is set according to PF's multi function mode */ 1215 /* outer vlan removal is set according to PF's multi function mode */
1085 if (IS_MF_SD(bp)) 1216 if (IS_MF_SD(bp))
@@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1113 struct bnx2x_queue_init_params *init_p; 1244 struct bnx2x_queue_init_params *init_p;
1114 struct bnx2x_queue_setup_params *setup_p; 1245 struct bnx2x_queue_setup_params *setup_p;
1115 1246
1247 if (bnx2x_vfq_is_leading(q))
1248 bnx2x_leading_vfq_init(bp, vf, q);
1249
1116 /* re-init the VF operation context */ 1250 /* re-init the VF operation context */
1117 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); 1251 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
1118 setup_p = &vf->op_params.qctor.prep_qsetup; 1252 setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1552 bnx2x_vf_mbx_resp(bp, vf); 1686 bnx2x_vf_mbx_resp(bp, vf);
1553} 1687}
1554 1688
1689static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1690 struct bnx2x_vf_mbx *mbx)
1691{
1692 struct bnx2x_vfop_cmd cmd = {
1693 .done = bnx2x_vf_mbx_resp,
1694 .block = false,
1695 };
1696 struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
1697 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1698
1699 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1700 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1701 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1702 vf->index);
1703 vf->op_rc = -EINVAL;
1704 goto mbx_resp;
1705 }
1706
1707 /* set vfop params according to rss tlv */
1708 memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
1709 T_ETH_INDIRECTION_TABLE_SIZE);
1710 memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
1711 sizeof(rss_tlv->rss_key));
1712 vf_op_params->rss_obj = &vf->rss_conf_obj;
1713 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1714
1715 /* flags handled individually for backward/forward compatability */
1716 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1717 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1718 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1719 __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
1720 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1721 __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
1722 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1723 __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
1724 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1725 __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
1726 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1727 __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
1728 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1729 __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
1730 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1731 __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
1732 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1733 __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
1734
1735 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1736 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1737 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1738 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1739 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1740 vf->op_rc = -EINVAL;
1741 goto mbx_resp;
1742 }
1743
1744 vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
1745
1746mbx_resp:
1747 if (vf->op_rc)
1748 bnx2x_vf_mbx_resp(bp, vf);
1749}
1750
1555/* dispatch request */ 1751/* dispatch request */
1556static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, 1752static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1557 struct bnx2x_vf_mbx *mbx) 1753 struct bnx2x_vf_mbx *mbx)
@@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1588 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1589 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1590 break; 1786 break;
1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break;
1591 } 1790 }
1592 1791
1593 } else { 1792 } else {
@@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1607 /* test whether we can respond to the VF (do we have an address 1806 /* test whether we can respond to the VF (do we have an address
1608 * for it?) 1807 * for it?)
1609 */ 1808 */
1610 if (vf->state == VF_ACQUIRED) { 1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1611 /* mbx_resp uses the op_rc of the VF */ 1810 /* mbx_resp uses the op_rc of the VF */
1612 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; 1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1613 1812
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a63..1179fe06d0c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -51,6 +51,7 @@ struct hw_sb_info {
51#define VFPF_QUEUE_FLG_COS 0x0080 51#define VFPF_QUEUE_FLG_COS 0x0080
52#define VFPF_QUEUE_FLG_HC 0x0100 52#define VFPF_QUEUE_FLG_HC 0x0100
53#define VFPF_QUEUE_FLG_DHC 0x0200 53#define VFPF_QUEUE_FLG_DHC 0x0200
54#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
54 55
55#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) 56#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
56#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) 57#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
131 u8 padding[3]; 132 u8 padding[3];
132}; 133};
133 134
135/* receive side scaling tlv */
136struct vfpf_rss_tlv {
137 struct vfpf_first_tlv first_tlv;
138 u32 rss_flags;
139#define VFPF_RSS_MODE_DISABLED (1 << 0)
140#define VFPF_RSS_MODE_REGULAR (1 << 1)
141#define VFPF_RSS_SET_SRCH (1 << 2)
142#define VFPF_RSS_IPV4 (1 << 3)
143#define VFPF_RSS_IPV4_TCP (1 << 4)
144#define VFPF_RSS_IPV4_UDP (1 << 5)
145#define VFPF_RSS_IPV6 (1 << 6)
146#define VFPF_RSS_IPV6_TCP (1 << 7)
147#define VFPF_RSS_IPV6_UDP (1 << 8)
148 u8 rss_result_mask;
149 u8 ind_table_size;
150 u8 rss_key_size;
151 u8 padding;
152 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
153 u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
154};
155
134/* acquire response tlv - carries the allocated resources */ 156/* acquire response tlv - carries the allocated resources */
135struct pfvf_acquire_resp_tlv { 157struct pfvf_acquire_resp_tlv {
136 struct pfvf_tlv hdr; 158 struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
166 } resc; 188 } resc;
167}; 189};
168 190
191#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
192 * stats will be coalesced on
193 * the leading RSS queue
194 */
195
169/* Init VF */ 196/* Init VF */
170struct vfpf_init_tlv { 197struct vfpf_init_tlv {
171 struct vfpf_first_tlv first_tlv; 198 struct vfpf_first_tlv first_tlv;
172 aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ 199 aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
173 aligned_u64 spq_addr; 200 aligned_u64 spq_addr;
174 aligned_u64 stats_addr; 201 aligned_u64 stats_addr;
202 u16 stats_stride;
203 u32 flags;
204 u32 padding[2];
175}; 205};
176 206
177/* Setup Queue */ 207/* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
293 struct vfpf_q_op_tlv q_op; 323 struct vfpf_q_op_tlv q_op;
294 struct vfpf_setup_q_tlv setup_q; 324 struct vfpf_setup_q_tlv setup_q;
295 struct vfpf_set_q_filters_tlv set_q_filters; 325 struct vfpf_set_q_filters_tlv set_q_filters;
296 struct vfpf_release_tlv release; 326 struct vfpf_release_tlv release;
297 struct channel_list_end_tlv list_end; 327 struct vfpf_rss_tlv update_rss;
328 struct channel_list_end_tlv list_end;
298 struct tlv_buffer_size tlv_buf_size; 329 struct tlv_buffer_size tlv_buf_size;
299}; 330};
300 331
301union pfvf_tlvs { 332union pfvf_tlvs {
302 struct pfvf_general_resp_tlv general_resp; 333 struct pfvf_general_resp_tlv general_resp;
303 struct pfvf_acquire_resp_tlv acquire_resp; 334 struct pfvf_acquire_resp_tlv acquire_resp;
304 struct channel_list_end_tlv list_end; 335 struct channel_list_end_tlv list_end;
305 struct tlv_buffer_size tlv_buf_size; 336 struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
355 CHANNEL_TLV_INIT, 386 CHANNEL_TLV_INIT,
356 CHANNEL_TLV_SETUP_Q, 387 CHANNEL_TLV_SETUP_Q,
357 CHANNEL_TLV_SET_Q_FILTERS, 388 CHANNEL_TLV_SET_Q_FILTERS,
389 CHANNEL_TLV_ACTIVATE_Q,
390 CHANNEL_TLV_DEACTIVATE_Q,
358 CHANNEL_TLV_TEARDOWN_Q, 391 CHANNEL_TLV_TEARDOWN_Q,
359 CHANNEL_TLV_CLOSE, 392 CHANNEL_TLV_CLOSE,
360 CHANNEL_TLV_RELEASE, 393 CHANNEL_TLV_RELEASE,
394 CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
361 CHANNEL_TLV_PF_RELEASE_VF, 395 CHANNEL_TLV_PF_RELEASE_VF,
362 CHANNEL_TLV_LIST_END, 396 CHANNEL_TLV_LIST_END,
363 CHANNEL_TLV_FLR, 397 CHANNEL_TLV_FLR,
364 CHANNEL_TLV_PF_SET_MAC, 398 CHANNEL_TLV_PF_SET_MAC,
365 CHANNEL_TLV_PF_SET_VLAN, 399 CHANNEL_TLV_PF_SET_VLAN,
400 CHANNEL_TLV_UPDATE_RSS,
366 CHANNEL_TLV_MAX 401 CHANNEL_TLV_MAX
367}; 402};
368 403
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d78d4cf140ed..8142480d9770 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
1/* cnic.c: Broadcom CNIC core network driver. 1/* cnic.c: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2012 Broadcom Corporation 3 * Copyright (c) 2006-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -1184,6 +1184,7 @@ error:
1184static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1184static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1185{ 1185{
1186 struct cnic_local *cp = dev->cnic_priv; 1186 struct cnic_local *cp = dev->cnic_priv;
1187 struct bnx2x *bp = netdev_priv(dev->netdev);
1187 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1188 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1188 int total_mem, blks, i; 1189 int total_mem, blks, i;
1189 1190
@@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1201 1202
1202 cp->ctx_blks = blks; 1203 cp->ctx_blks = blks;
1203 cp->ctx_blk_size = ctx_blk_size; 1204 cp->ctx_blk_size = ctx_blk_size;
1204 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1205 if (!CHIP_IS_E1(bp))
1205 cp->ctx_align = 0; 1206 cp->ctx_align = 0;
1206 else 1207 else
1207 cp->ctx_align = ctx_blk_size; 1208 cp->ctx_align = ctx_blk_size;
@@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1231static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1232static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1232{ 1233{
1233 struct cnic_local *cp = dev->cnic_priv; 1234 struct cnic_local *cp = dev->cnic_priv;
1235 struct bnx2x *bp = netdev_priv(dev->netdev);
1234 struct cnic_eth_dev *ethdev = cp->ethdev; 1236 struct cnic_eth_dev *ethdev = cp->ethdev;
1235 u32 start_cid = ethdev->starting_cid; 1237 u32 start_cid = ethdev->starting_cid;
1236 int i, j, n, ret, pages; 1238 int i, j, n, ret, pages;
@@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1240 cp->iscsi_start_cid = start_cid; 1242 cp->iscsi_start_cid = start_cid;
1241 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1243 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1242 1244
1243 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1245 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1244 cp->max_cid_space += dev->max_fcoe_conn; 1246 cp->max_cid_space += dev->max_fcoe_conn;
1245 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1247 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1246 if (!cp->fcoe_init_cid) 1248 if (!cp->fcoe_init_cid)
@@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1288 if (ret) 1290 if (ret)
1289 goto error; 1291 goto error;
1290 1292
1291 if (CNIC_SUPPORTS_FCOE(cp)) { 1293 if (CNIC_SUPPORTS_FCOE(bp)) {
1292 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1294 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1293 if (ret) 1295 if (ret)
1294 goto error; 1296 goto error;
@@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1382 u32 type, union l5cm_specific_data *l5_data) 1384 u32 type, union l5cm_specific_data *l5_data)
1383{ 1385{
1384 struct cnic_local *cp = dev->cnic_priv; 1386 struct cnic_local *cp = dev->cnic_priv;
1387 struct bnx2x *bp = netdev_priv(dev->netdev);
1385 struct l5cm_spe kwqe; 1388 struct l5cm_spe kwqe;
1386 struct kwqe_16 *kwq[1]; 1389 struct kwqe_16 *kwq[1];
1387 u16 type_16; 1390 u16 type_16;
@@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1389 1392
1390 kwqe.hdr.conn_and_cmd_data = 1393 kwqe.hdr.conn_and_cmd_data =
1391 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1394 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1392 BNX2X_HW_CID(cp, cid))); 1395 BNX2X_HW_CID(bp, cid)));
1393 1396
1394 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1397 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1395 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1398 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1396 SPE_HDR_FUNCTION_ID; 1399 SPE_HDR_FUNCTION_ID;
1397 1400
1398 kwqe.hdr.type = cpu_to_le16(type_16); 1401 kwqe.hdr.type = cpu_to_le16(type_16);
@@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1427 rcu_read_unlock(); 1430 rcu_read_unlock();
1428} 1431}
1429 1432
1433static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1434 int en_tcp_dack)
1435{
1436 struct bnx2x *bp = netdev_priv(dev->netdev);
1437 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1438 u16 tstorm_flags = 0;
1439
1440 if (time_stamps) {
1441 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1442 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1443 }
1444 if (en_tcp_dack)
1445 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1446
1447 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1448 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1449
1450 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1451 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1452}
1453
1430static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1454static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1431{ 1455{
1432 struct cnic_local *cp = dev->cnic_priv; 1456 struct cnic_local *cp = dev->cnic_priv;
1433 struct bnx2x *bp = netdev_priv(dev->netdev); 1457 struct bnx2x *bp = netdev_priv(dev->netdev);
1434 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1458 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1435 int hq_bds, pages; 1459 int hq_bds, pages;
1436 u32 pfid = cp->pfid; 1460 u32 pfid = bp->pfid;
1437 1461
1438 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1462 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1439 cp->num_ccells = req1->num_ccells_per_conn; 1463 cp->num_ccells = req1->num_ccells_per_conn;
@@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1506 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1530 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1507 hq_bds); 1531 hq_bds);
1508 1532
1533 cnic_bnx2x_set_tcp_options(dev,
1534 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1535 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1536
1509 return 0; 1537 return 0;
1510} 1538}
1511 1539
1512static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1540static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1513{ 1541{
1514 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1542 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1515 struct cnic_local *cp = dev->cnic_priv;
1516 struct bnx2x *bp = netdev_priv(dev->netdev); 1543 struct bnx2x *bp = netdev_priv(dev->netdev);
1517 u32 pfid = cp->pfid; 1544 u32 pfid = bp->pfid;
1518 struct iscsi_kcqe kcqe; 1545 struct iscsi_kcqe kcqe;
1519 struct kcqe *cqes[1]; 1546 struct kcqe *cqes[1];
1520 1547
@@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1653 u32 num) 1680 u32 num)
1654{ 1681{
1655 struct cnic_local *cp = dev->cnic_priv; 1682 struct cnic_local *cp = dev->cnic_priv;
1683 struct bnx2x *bp = netdev_priv(dev->netdev);
1656 struct iscsi_kwqe_conn_offload1 *req1 = 1684 struct iscsi_kwqe_conn_offload1 *req1 =
1657 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1685 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1658 struct iscsi_kwqe_conn_offload2 *req2 = 1686 struct iscsi_kwqe_conn_offload2 *req2 =
@@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1661 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1689 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1662 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1690 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1663 u32 cid = ctx->cid; 1691 u32 cid = ctx->cid;
1664 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1692 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1665 struct iscsi_context *ictx; 1693 struct iscsi_context *ictx;
1666 struct regpair context_addr; 1694 struct regpair context_addr;
1667 int i, j, n = 2, n_max; 1695 int i, j, n = 2, n_max;
1668 u8 port = CNIC_PORT(cp); 1696 u8 port = BP_PORT(bp);
1669 1697
1670 ctx->ctx_flags = 0; 1698 ctx->ctx_flags = 0;
1671 if (!req2->num_additional_wqes) 1699 if (!req2->num_additional_wqes)
@@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1719 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1747 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1720 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = 1748 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1721 ETH_P_8021Q; 1749 ETH_P_8021Q;
1722 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 1750 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1723 cp->port_mode == CHIP_2_PORT_MODE) { 1751 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1724 1752
1725 port = 0; 1753 port = 0;
1726 } 1754 }
@@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1841 struct iscsi_kwqe_conn_offload1 *req1; 1869 struct iscsi_kwqe_conn_offload1 *req1;
1842 struct iscsi_kwqe_conn_offload2 *req2; 1870 struct iscsi_kwqe_conn_offload2 *req2;
1843 struct cnic_local *cp = dev->cnic_priv; 1871 struct cnic_local *cp = dev->cnic_priv;
1872 struct bnx2x *bp = netdev_priv(dev->netdev);
1844 struct cnic_context *ctx; 1873 struct cnic_context *ctx;
1845 struct iscsi_kcqe kcqe; 1874 struct iscsi_kcqe kcqe;
1846 struct kcqe *cqes[1]; 1875 struct kcqe *cqes[1];
@@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1894 } 1923 }
1895 1924
1896 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1925 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1897 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1926 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1898 1927
1899done: 1928done:
1900 cqes[0] = (struct kcqe *) &kcqe; 1929 cqes[0] = (struct kcqe *) &kcqe;
@@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1930static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1959static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1931{ 1960{
1932 struct cnic_local *cp = dev->cnic_priv; 1961 struct cnic_local *cp = dev->cnic_priv;
1962 struct bnx2x *bp = netdev_priv(dev->netdev);
1933 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1963 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1934 union l5cm_specific_data l5_data; 1964 union l5cm_specific_data l5_data;
1935 int ret; 1965 int ret;
@@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1938 init_waitqueue_head(&ctx->waitq); 1968 init_waitqueue_head(&ctx->waitq);
1939 ctx->wait_cond = 0; 1969 ctx->wait_cond = 0;
1940 memset(&l5_data, 0, sizeof(l5_data)); 1970 memset(&l5_data, 0, sizeof(l5_data));
1941 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1971 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1942 1972
1943 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1973 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1944 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1974 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
@@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2035 xstorm_buf->pseudo_header_checksum = 2065 xstorm_buf->pseudo_header_checksum =
2036 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 2066 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2037 2067
2038 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
2039 tstorm_buf->params |=
2040 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
2041 if (kwqe3->ka_timeout) { 2068 if (kwqe3->ka_timeout) {
2042 tstorm_buf->ka_enable = 1; 2069 tstorm_buf->ka_enable = 1;
2043 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 2070 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2049 2076
2050static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 2077static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2051{ 2078{
2052 struct cnic_local *cp = dev->cnic_priv;
2053 struct bnx2x *bp = netdev_priv(dev->netdev); 2079 struct bnx2x *bp = netdev_priv(dev->netdev);
2054 u32 pfid = cp->pfid; 2080 u32 pfid = bp->pfid;
2055 u8 *mac = dev->mac_addr; 2081 u8 *mac = dev->mac_addr;
2056 2082
2057 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2083 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2084 mac[0]); 2110 mac[0]);
2085} 2111}
2086 2112
2087static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2088{
2089 struct cnic_local *cp = dev->cnic_priv;
2090 struct bnx2x *bp = netdev_priv(dev->netdev);
2091 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2092 u16 tstorm_flags = 0;
2093
2094 if (tcp_ts) {
2095 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2096 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2097 }
2098
2099 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2100 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2101
2102 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2103 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2104}
2105
2106static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2113static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2107 u32 num, int *work) 2114 u32 num, int *work)
2108{ 2115{
@@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2176 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2183 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2177 2184
2178 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2185 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2179 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2186 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2180
2181 cnic_bnx2x_set_tcp_timestamp(dev,
2182 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2183 2187
2184 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2188 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2185 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2189 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
@@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2248 struct fcoe_stat_ramrod_params *fcoe_stat; 2252 struct fcoe_stat_ramrod_params *fcoe_stat;
2249 union l5cm_specific_data l5_data; 2253 union l5cm_specific_data l5_data;
2250 struct cnic_local *cp = dev->cnic_priv; 2254 struct cnic_local *cp = dev->cnic_priv;
2255 struct bnx2x *bp = netdev_priv(dev->netdev);
2251 int ret; 2256 int ret;
2252 u32 cid; 2257 u32 cid;
2253 2258
2254 req = (struct fcoe_kwqe_stat *) kwqe; 2259 req = (struct fcoe_kwqe_stat *) kwqe;
2255 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2260 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2256 2261
2257 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2262 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2258 if (!fcoe_stat) 2263 if (!fcoe_stat)
@@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2271{ 2276{
2272 int ret; 2277 int ret;
2273 struct cnic_local *cp = dev->cnic_priv; 2278 struct cnic_local *cp = dev->cnic_priv;
2279 struct bnx2x *bp = netdev_priv(dev->netdev);
2274 u32 cid; 2280 u32 cid;
2275 struct fcoe_init_ramrod_params *fcoe_init; 2281 struct fcoe_init_ramrod_params *fcoe_init;
2276 struct fcoe_kwqe_init1 *req1; 2282 struct fcoe_kwqe_init1 *req1;
@@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2315 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2321 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2316 cp->kcq2.sw_prod_idx = 0; 2322 cp->kcq2.sw_prod_idx = 0;
2317 2323
2318 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2324 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2319 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, 2325 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2320 FCOE_CONNECTION_TYPE, &l5_data); 2326 FCOE_CONNECTION_TYPE, &l5_data);
2321 *work = 3; 2327 *work = 3;
@@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2328 int ret = 0; 2334 int ret = 0;
2329 u32 cid = -1, l5_cid; 2335 u32 cid = -1, l5_cid;
2330 struct cnic_local *cp = dev->cnic_priv; 2336 struct cnic_local *cp = dev->cnic_priv;
2337 struct bnx2x *bp = netdev_priv(dev->netdev);
2331 struct fcoe_kwqe_conn_offload1 *req1; 2338 struct fcoe_kwqe_conn_offload1 *req1;
2332 struct fcoe_kwqe_conn_offload2 *req2; 2339 struct fcoe_kwqe_conn_offload2 *req2;
2333 struct fcoe_kwqe_conn_offload3 *req3; 2340 struct fcoe_kwqe_conn_offload3 *req3;
@@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2370 2377
2371 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2378 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2372 if (fctx) { 2379 if (fctx) {
2373 u32 hw_cid = BNX2X_HW_CID(cp, cid); 2380 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2374 u32 val; 2381 u32 val;
2375 2382
2376 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2383 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
@@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2394 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2401 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2395 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2402 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2396 2403
2397 cid = BNX2X_HW_CID(cp, cid); 2404 cid = BNX2X_HW_CID(bp, cid);
2398 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2405 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2399 FCOE_CONNECTION_TYPE, &l5_data); 2406 FCOE_CONNECTION_TYPE, &l5_data);
2400 if (!ret) 2407 if (!ret)
@@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2552 struct fcoe_kwqe_destroy *req; 2559 struct fcoe_kwqe_destroy *req;
2553 union l5cm_specific_data l5_data; 2560 union l5cm_specific_data l5_data;
2554 struct cnic_local *cp = dev->cnic_priv; 2561 struct cnic_local *cp = dev->cnic_priv;
2562 struct bnx2x *bp = netdev_priv(dev->netdev);
2555 int ret; 2563 int ret;
2556 u32 cid; 2564 u32 cid;
2557 2565
2558 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); 2566 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2559 2567
2560 req = (struct fcoe_kwqe_destroy *) kwqe; 2568 req = (struct fcoe_kwqe_destroy *) kwqe;
2561 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2569 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2562 2570
2563 memset(&l5_data, 0, sizeof(l5_data)); 2571 memset(&l5_data, 0, sizeof(l5_data));
2564 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, 2572 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
@@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2715static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2723static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2716 struct kwqe *wqes[], u32 num_wqes) 2724 struct kwqe *wqes[], u32 num_wqes)
2717{ 2725{
2718 struct cnic_local *cp = dev->cnic_priv; 2726 struct bnx2x *bp = netdev_priv(dev->netdev);
2719 int i, work, ret; 2727 int i, work, ret;
2720 u32 opcode; 2728 u32 opcode;
2721 struct kwqe *kwqe; 2729 struct kwqe *kwqe;
@@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2723 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2731 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2724 return -EAGAIN; /* bnx2 is down */ 2732 return -EAGAIN; /* bnx2 is down */
2725 2733
2726 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 2734 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2727 return -EINVAL; 2735 return -EINVAL;
2728 2736
2729 for (i = 0; i < num_wqes; ) { 2737 for (i = 0; i < num_wqes; ) {
@@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
3039static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 3047static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3040 u16 index, u8 op, u8 update) 3048 u16 index, u8 op, u8 update)
3041{ 3049{
3042 struct cnic_local *cp = dev->cnic_priv; 3050 struct bnx2x *bp = netdev_priv(dev->netdev);
3043 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 3051 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3044 COMMAND_REG_INT_ACK); 3052 COMMAND_REG_INT_ACK);
3045 struct igu_ack_register igu_ack; 3053 struct igu_ack_register igu_ack;
3046 3054
@@ -3603,6 +3611,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3603 csk1->rcv_buf = DEF_RCV_BUF; 3611 csk1->rcv_buf = DEF_RCV_BUF;
3604 csk1->snd_buf = DEF_SND_BUF; 3612 csk1->snd_buf = DEF_SND_BUF;
3605 csk1->seed = DEF_SEED; 3613 csk1->seed = DEF_SEED;
3614 csk1->tcp_flags = 0;
3606 3615
3607 *csk = csk1; 3616 *csk = csk1;
3608 return 0; 3617 return 0;
@@ -4020,15 +4029,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
4020 cnic_cm_upcall(cp, csk, opcode); 4029 cnic_cm_upcall(cp, csk, opcode);
4021 break; 4030 break;
4022 4031
4023 case L5CM_RAMROD_CMD_ID_CLOSE: 4032 case L5CM_RAMROD_CMD_ID_CLOSE: {
4024 if (l4kcqe->status != 0) { 4033 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4025 netdev_warn(dev->netdev, "RAMROD CLOSE compl with " 4034
4026 "status 0x%x\n", l4kcqe->status); 4035 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4036 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4037 l4kcqe->status, l5kcqe->completion_status);
4027 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 4038 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4028 /* Fall through */ 4039 /* Fall through */
4029 } else { 4040 } else {
4030 break; 4041 break;
4031 } 4042 }
4043 }
4032 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 4044 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4033 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 4045 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4034 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 4046 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4213,13 +4225,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4213 4225
4214static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 4226static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4215{ 4227{
4216 struct cnic_local *cp = dev->cnic_priv;
4217 struct bnx2x *bp = netdev_priv(dev->netdev); 4228 struct bnx2x *bp = netdev_priv(dev->netdev);
4218 u32 pfid = cp->pfid; 4229 u32 pfid = bp->pfid;
4219 u32 port = CNIC_PORT(cp); 4230 u32 port = BP_PORT(bp);
4220 4231
4221 cnic_init_bnx2x_mac(dev); 4232 cnic_init_bnx2x_mac(dev);
4222 cnic_bnx2x_set_tcp_timestamp(dev, 1); 4233 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4223 4234
4224 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 4235 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4225 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 4236 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -4897,6 +4908,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4897 struct client_init_ramrod_data *data) 4908 struct client_init_ramrod_data *data)
4898{ 4909{
4899 struct cnic_local *cp = dev->cnic_priv; 4910 struct cnic_local *cp = dev->cnic_priv;
4911 struct bnx2x *bp = netdev_priv(dev->netdev);
4900 struct cnic_uio_dev *udev = cp->udev; 4912 struct cnic_uio_dev *udev = cp->udev;
4901 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4913 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4902 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4914 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
@@ -4925,7 +4937,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4925 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; 4937 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4926 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4938 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4927 4939
4928 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 4940 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4929 pbd_e2->parsing_data = (UNICAST_ADDRESS << 4941 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4930 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 4942 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4931 else 4943 else
@@ -4962,6 +4974,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4962 struct client_init_ramrod_data *data) 4974 struct client_init_ramrod_data *data)
4963{ 4975{
4964 struct cnic_local *cp = dev->cnic_priv; 4976 struct cnic_local *cp = dev->cnic_priv;
4977 struct bnx2x *bp = netdev_priv(dev->netdev);
4965 struct cnic_uio_dev *udev = cp->udev; 4978 struct cnic_uio_dev *udev = cp->udev;
4966 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4979 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4967 BNX2_PAGE_SIZE); 4980 BNX2_PAGE_SIZE);
@@ -4970,7 +4983,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4970 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4983 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4971 int i; 4984 int i;
4972 u32 cli = cp->ethdev->iscsi_l2_client_id; 4985 u32 cli = cp->ethdev->iscsi_l2_client_id;
4973 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4986 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4974 u32 val; 4987 u32 val;
4975 dma_addr_t ring_map = udev->l2_ring_map; 4988 dma_addr_t ring_map = udev->l2_ring_map;
4976 4989
@@ -4979,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4979 data->general.activate_flg = 1; 4992 data->general.activate_flg = 1;
4980 data->general.sp_client_id = cli; 4993 data->general.sp_client_id = cli;
4981 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4994 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4982 data->general.func_id = cp->pfid; 4995 data->general.func_id = bp->pfid;
4983 4996
4984 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4997 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4985 dma_addr_t buf_map; 4998 dma_addr_t buf_map;
@@ -5029,13 +5042,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5029{ 5042{
5030 struct cnic_local *cp = dev->cnic_priv; 5043 struct cnic_local *cp = dev->cnic_priv;
5031 struct bnx2x *bp = netdev_priv(dev->netdev); 5044 struct bnx2x *bp = netdev_priv(dev->netdev);
5032 u32 pfid = cp->pfid; 5045 u32 pfid = bp->pfid;
5033 5046
5034 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 5047 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5035 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 5048 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5036 cp->kcq1.sw_prod_idx = 0; 5049 cp->kcq1.sw_prod_idx = 0;
5037 5050
5038 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5051 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5039 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 5052 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5040 5053
5041 cp->kcq1.hw_prod_idx_ptr = 5054 cp->kcq1.hw_prod_idx_ptr =
@@ -5051,7 +5064,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5051 &sb->sb.running_index[SM_RX_ID]; 5064 &sb->sb.running_index[SM_RX_ID];
5052 } 5065 }
5053 5066
5054 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5067 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5055 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 5068 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5056 5069
5057 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 5070 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -5073,12 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5073 u32 pfid; 5086 u32 pfid;
5074 5087
5075 dev->stats_addr = ethdev->addr_drv_info_to_mcp; 5088 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5076 cp->port_mode = bp->common.chip_port_mode;
5077 cp->pfid = bp->pfid;
5078 cp->func = bp->pf_num; 5089 cp->func = bp->pf_num;
5079 5090
5080 func = CNIC_FUNC(cp); 5091 func = CNIC_FUNC(cp);
5081 pfid = cp->pfid; 5092 pfid = bp->pfid;
5082 5093
5083 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 5094 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5084 cp->iscsi_start_cid, 0); 5095 cp->iscsi_start_cid, 0);
@@ -5086,7 +5097,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5086 if (ret) 5097 if (ret)
5087 return -ENOMEM; 5098 return -ENOMEM;
5088 5099
5089 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5100 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5090 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, 5101 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5091 cp->fcoe_start_cid, 0); 5102 cp->fcoe_start_cid, 0);
5092 5103
@@ -5168,12 +5179,12 @@ static void cnic_init_rings(struct cnic_dev *dev)
5168 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 5179 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5169 barrier(); 5180 barrier();
5170 5181
5171 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 5182 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5172 5183
5173 off = BAR_USTRORM_INTMEM + 5184 off = BAR_USTRORM_INTMEM +
5174 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? 5185 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5175 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 5186 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5176 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 5187 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5177 5188
5178 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 5189 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5179 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 5190 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
@@ -5271,6 +5282,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
5271 if (err) 5282 if (err)
5272 netdev_err(dev->netdev, "register_cnic failed\n"); 5283 netdev_err(dev->netdev, "register_cnic failed\n");
5273 5284
5285 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5286 * can change after firmware is downloaded.
5287 */
5288 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5289 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5290 dev->max_iscsi_conn = 0;
5291
5274 return err; 5292 return err;
5275} 5293}
5276 5294
@@ -5353,7 +5371,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5353 5371
5354 cnic_free_irq(dev); 5372 cnic_free_irq(dev);
5355 5373
5356 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5374 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5357 idx_off = offsetof(struct hc_status_block_e2, index_values) + 5375 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5358 (hc_index * sizeof(u16)); 5376 (hc_index * sizeof(u16));
5359 5377
@@ -5370,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5370 5388
5371 *cp->kcq1.hw_prod_idx_ptr = 0; 5389 *cp->kcq1.hw_prod_idx_ptr = 0;
5372 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5390 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5373 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5391 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5374 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5392 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5375 cnic_free_resc(dev); 5393 cnic_free_resc(dev);
5376} 5394}
@@ -5544,7 +5562,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5544 5562
5545 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5563 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5564 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5547 if (CNIC_SUPPORTS_FCOE(cp)) { 5565 if (CNIC_SUPPORTS_FCOE(bp)) {
5548 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5566 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5549 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; 5567 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5550 } 5568 }
@@ -5564,7 +5582,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5564 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5582 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5565 cp->enable_int = cnic_enable_bnx2x_int; 5583 cp->enable_int = cnic_enable_bnx2x_int;
5566 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5584 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5567 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5585 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5568 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5586 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5569 cp->arm_int = cnic_arm_bnx2x_e2_msix; 5587 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5570 } else { 5588 } else {
@@ -5628,7 +5646,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5628 5646
5629 dev = cnic_from_netdev(netdev); 5647 dev = cnic_from_netdev(netdev);
5630 5648
5631 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { 5649 if (!dev && event == NETDEV_REGISTER) {
5632 /* Check for the hot-plug device */ 5650 /* Check for the hot-plug device */
5633 dev = is_cnic_dev(netdev); 5651 dev = is_cnic_dev(netdev);
5634 if (dev) { 5652 if (dev) {
@@ -5644,7 +5662,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5644 else if (event == NETDEV_UNREGISTER) 5662 else if (event == NETDEV_UNREGISTER)
5645 cnic_ulp_exit(dev); 5663 cnic_ulp_exit(dev);
5646 5664
5647 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { 5665 if (event == NETDEV_UP) {
5648 if (cnic_register_netdev(dev) != 0) { 5666 if (cnic_register_netdev(dev) != 0) {
5649 cnic_put(dev); 5667 cnic_put(dev);
5650 goto done; 5668 goto done;
@@ -5693,21 +5711,8 @@ static struct notifier_block cnic_netdev_notifier = {
5693 5711
5694static void cnic_release(void) 5712static void cnic_release(void)
5695{ 5713{
5696 struct cnic_dev *dev;
5697 struct cnic_uio_dev *udev; 5714 struct cnic_uio_dev *udev;
5698 5715
5699 while (!list_empty(&cnic_dev_list)) {
5700 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5701 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5702 cnic_ulp_stop(dev);
5703 cnic_stop_hw(dev);
5704 }
5705
5706 cnic_ulp_exit(dev);
5707 cnic_unregister_netdev(dev);
5708 list_del_init(&dev->list);
5709 cnic_free_dev(dev);
5710 }
5711 while (!list_empty(&cnic_udev_list)) { 5716 while (!list_empty(&cnic_udev_list)) {
5712 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5717 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5713 list); 5718 list);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 62c670619ae6..0121a5d55192 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
1/* cnic.h: Broadcom CNIC core network driver. 1/* cnic.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2011 Broadcom Corporation 3 * Copyright (c) 2006-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -303,8 +303,6 @@ struct cnic_local {
303 303
304 u32 chip_id; 304 u32 chip_id;
305 int func; 305 int func;
306 u32 pfid;
307 u8 port_mode;
308 306
309 u32 shmem_base; 307 u32 shmem_base;
310 308
@@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next {
364 362
365#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ 363#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
366 364
367#define BNX2X_CHIP_NUM_57710 0x164e 365#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
368#define BNX2X_CHIP_NUM_57711 0x164f
369#define BNX2X_CHIP_NUM_57711E 0x1650
370#define BNX2X_CHIP_NUM_57712 0x1662
371#define BNX2X_CHIP_NUM_57712E 0x1663
372#define BNX2X_CHIP_NUM_57713 0x1651
373#define BNX2X_CHIP_NUM_57713E 0x1652
374#define BNX2X_CHIP_NUM_57800 0x168a
375#define BNX2X_CHIP_NUM_57810 0x168e
376#define BNX2X_CHIP_NUM_57840 0x168d
377
378#define BNX2X_CHIP_NUM(x) (x >> 16)
379#define BNX2X_CHIP_IS_57710(x) \
380 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
381#define BNX2X_CHIP_IS_57711(x) \
382 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
383#define BNX2X_CHIP_IS_57711E(x) \
384 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
385#define BNX2X_CHIP_IS_E1H(x) \
386 (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
387#define BNX2X_CHIP_IS_57712(x) \
388 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
389#define BNX2X_CHIP_IS_57712E(x) \
390 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
391#define BNX2X_CHIP_IS_57713(x) \
392 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
393#define BNX2X_CHIP_IS_57713E(x) \
394 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
395#define BNX2X_CHIP_IS_57800(x) \
396 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
397#define BNX2X_CHIP_IS_57810(x) \
398 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
399#define BNX2X_CHIP_IS_57840(x) \
400 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
401#define BNX2X_CHIP_IS_E2(x) \
402 (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
403 BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
404#define BNX2X_CHIP_IS_E3(x) \
405 (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
406 BNX2X_CHIP_IS_57840(x))
407#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
408 366
409#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \ 367#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
410 sizeof(struct eth_rx_bd)) 368 sizeof(struct eth_rx_bd))
@@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next {
439#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H 397#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
440#endif 398#endif
441 399
442#define CNIC_PORT(cp) ((cp)->pfid & 1)
443#define CNIC_FUNC(cp) ((cp)->func) 400#define CNIC_FUNC(cp) ((cp)->func)
444#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
445 0 : (CNIC_FUNC(cp) & 1))
446#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
447 401
448#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ 402#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
449 (CNIC_E1HVN(cp) << 17) | (x)) 403 (BP_VN(bp) << 17) | (x))
450 404
451#define BNX2X_SW_CID(x) (x & 0x1ffff) 405#define BNX2X_SW_CID(x) (x & 0x1ffff)
452 406
453#define BNX2X_CL_QZONE_ID(cp, cli) \ 407#define BNX2X_CL_QZONE_ID(bp, cli) \
454 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ 408 (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
455 cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) 409 cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
456 410
457#ifndef MAX_STAT_COUNTER_ID 411#ifndef MAX_STAT_COUNTER_ID
458#define MAX_STAT_COUNTER_ID \ 412#define MAX_STAT_COUNTER_ID \
459 (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ 413 (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
460 ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ 414 ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
461 MAX_STAT_COUNTER_ID_E1)) 415 MAX_STAT_COUNTER_ID_E1))
462#endif 416#endif
463 417
464#define CNIC_SUPPORTS_FCOE(cp) \ 418#define CNIC_SUPPORTS_FCOE(cp) \
465 (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ 419 (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
466 !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
467 420
468#define CNIC_RAMROD_TMO (HZ / 4) 421#define CNIC_RAMROD_TMO (HZ / 4)
469 422
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index ede3db35d757..95a8e4b11c9f 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
1 1
2/* cnic.c: Broadcom CNIC core network driver. 2/* cnic.c: Broadcom CNIC core network driver.
3 * 3 *
4 * Copyright (c) 2006-2012 Broadcom Corporation 4 * Copyright (c) 2006-2013 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
5400 u16 flags; 5400 u16 flags;
5401#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) 5401#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
5402#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 5402#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
5403#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) 5403#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
5404#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 5404#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
5405#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) 5405#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
5406#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 5406#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
5407#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) 5407#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ec9bb9ad4bb3..0658b43e148c 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
1/* cnic_if.h: Broadcom CNIC core network driver. 1/* cnic_if.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2012 Broadcom Corporation 3 * Copyright (c) 2006-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
14 14
15#include "bnx2x/bnx2x_mfw_req.h" 15#include "bnx2x/bnx2x_mfw_req.h"
16 16
17#define CNIC_MODULE_VERSION "2.5.16" 17#define CNIC_MODULE_VERSION "2.5.18"
18#define CNIC_MODULE_RELDATE "Dec 05, 2012" 18#define CNIC_MODULE_RELDATE "Sept 01, 2013"
19 19
20#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
@@ -238,8 +238,8 @@ struct cnic_sock {
238 u16 src_port; 238 u16 src_port;
239 u16 dst_port; 239 u16 dst_port;
240 u16 vlan_id; 240 u16 vlan_id;
241 unsigned char old_ha[6]; 241 unsigned char old_ha[ETH_ALEN];
242 unsigned char ha[6]; 242 unsigned char ha[ETH_ALEN];
243 u32 mtu; 243 u32 mtu;
244 u32 cid; 244 u32 cid;
245 u32 l5_cid; 245 u32 l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
308#define CNIC_F_BNX2_CLASS 3 308#define CNIC_F_BNX2_CLASS 3
309#define CNIC_F_BNX2X_CLASS 4 309#define CNIC_F_BNX2X_CLASS 4
310 atomic_t ref_count; 310 atomic_t ref_count;
311 u8 mac_addr[6]; 311 u8 mac_addr[ETH_ALEN];
312 312
313 int max_iscsi_conn; 313 int max_iscsi_conn;
314 int max_fcoe_conn; 314 int max_fcoe_conn;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0da2214ef1b9..5701f3d1a169 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 132 97#define TG3_MIN_NUM 133
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "May 21, 2013" 100#define DRV_MODULE_RELDATE "Jul 29, 2013"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
3030 return false; 3030 return false;
3031} 3031}
3032 3032
3033static bool tg3_phy_led_bug(struct tg3 *tp)
3034{
3035 switch (tg3_asic_rev(tp)) {
3036 case ASIC_REV_5719:
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 !tp->pci_fn)
3039 return true;
3040 return false;
3041 }
3042
3043 return false;
3044}
3045
3033static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3046static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3034{ 3047{
3035 u32 val; 3048 u32 val;
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3077 } 3090 }
3078 return; 3091 return;
3079 } else if (do_low_power) { 3092 } else if (do_low_power) {
3080 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3093 if (!tg3_phy_led_bug(tp))
3081 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3095 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3082 3096
3083 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3097 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3098 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
@@ -4226,8 +4240,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
4226 4240
4227static void tg3_power_down(struct tg3 *tp) 4241static void tg3_power_down(struct tg3 *tp)
4228{ 4242{
4229 tg3_power_down_prepare(tp);
4230
4231 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4243 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4232 pci_set_power_state(tp->pdev, PCI_D3hot); 4244 pci_set_power_state(tp->pdev, PCI_D3hot);
4233} 4245}
@@ -6095,10 +6107,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
6095/* tp->lock must be held */ 6107/* tp->lock must be held */
6096static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6108static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6097{ 6109{
6098 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); 6110 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6111
6112 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6099 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6113 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6100 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6114 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6101 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME); 6115 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6102} 6116}
6103 6117
6104static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6118static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6228,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6214static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6228static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6215 struct ptp_clock_request *rq, int on) 6229 struct ptp_clock_request *rq, int on)
6216{ 6230{
6231 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232 u32 clock_ctl;
6233 int rval = 0;
6234
6235 switch (rq->type) {
6236 case PTP_CLK_REQ_PEROUT:
6237 if (rq->perout.index != 0)
6238 return -EINVAL;
6239
6240 tg3_full_lock(tp, 0);
6241 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6242 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6243
6244 if (on) {
6245 u64 nsec;
6246
6247 nsec = rq->perout.start.sec * 1000000000ULL +
6248 rq->perout.start.nsec;
6249
6250 if (rq->perout.period.sec || rq->perout.period.nsec) {
6251 netdev_warn(tp->dev,
6252 "Device supports only a one-shot timesync output, period must be 0\n");
6253 rval = -EINVAL;
6254 goto err_out;
6255 }
6256
6257 if (nsec & (1ULL << 63)) {
6258 netdev_warn(tp->dev,
6259 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6260 rval = -EINVAL;
6261 goto err_out;
6262 }
6263
6264 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6265 tw32(TG3_EAV_WATCHDOG0_MSB,
6266 TG3_EAV_WATCHDOG0_EN |
6267 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6268
6269 tw32(TG3_EAV_REF_CLCK_CTL,
6270 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6271 } else {
6272 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6273 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6274 }
6275
6276err_out:
6277 tg3_full_unlock(tp);
6278 return rval;
6279
6280 default:
6281 break;
6282 }
6283
6217 return -EOPNOTSUPP; 6284 return -EOPNOTSUPP;
6218} 6285}
6219 6286
@@ -6223,7 +6290,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
6223 .max_adj = 250000000, 6290 .max_adj = 250000000,
6224 .n_alarm = 0, 6291 .n_alarm = 0,
6225 .n_ext_ts = 0, 6292 .n_ext_ts = 0,
6226 .n_per_out = 0, 6293 .n_per_out = 1,
6227 .pps = 0, 6294 .pps = 0,
6228 .adjfreq = tg3_ptp_adjfreq, 6295 .adjfreq = tg3_ptp_adjfreq,
6229 .adjtime = tg3_ptp_adjtime, 6296 .adjtime = tg3_ptp_adjtime,
@@ -8538,10 +8605,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8538 if (!i && tg3_flag(tp, ENABLE_RSS)) 8605 if (!i && tg3_flag(tp, ENABLE_RSS))
8539 continue; 8606 continue;
8540 8607
8541 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8608 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8542 TG3_RX_RCB_RING_BYTES(tp), 8609 TG3_RX_RCB_RING_BYTES(tp),
8543 &tnapi->rx_rcb_mapping, 8610 &tnapi->rx_rcb_mapping,
8544 GFP_KERNEL | __GFP_ZERO); 8611 GFP_KERNEL);
8545 if (!tnapi->rx_rcb) 8612 if (!tnapi->rx_rcb)
8546 goto err_out; 8613 goto err_out;
8547 } 8614 }
@@ -8590,10 +8657,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8590{ 8657{
8591 int i; 8658 int i;
8592 8659
8593 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8660 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8594 sizeof(struct tg3_hw_stats), 8661 sizeof(struct tg3_hw_stats),
8595 &tp->stats_mapping, 8662 &tp->stats_mapping, GFP_KERNEL);
8596 GFP_KERNEL | __GFP_ZERO);
8597 if (!tp->hw_stats) 8663 if (!tp->hw_stats)
8598 goto err_out; 8664 goto err_out;
8599 8665
@@ -8601,10 +8667,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8601 struct tg3_napi *tnapi = &tp->napi[i]; 8667 struct tg3_napi *tnapi = &tp->napi[i];
8602 struct tg3_hw_status *sblk; 8668 struct tg3_hw_status *sblk;
8603 8669
8604 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8670 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8605 TG3_HW_STATUS_SIZE, 8671 TG3_HW_STATUS_SIZE,
8606 &tnapi->status_mapping, 8672 &tnapi->status_mapping,
8607 GFP_KERNEL | __GFP_ZERO); 8673 GFP_KERNEL);
8608 if (!tnapi->hw_status) 8674 if (!tnapi->hw_status)
8609 goto err_out; 8675 goto err_out;
8610 8676
@@ -10367,6 +10433,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10367 if (tg3_flag(tp, 5755_PLUS)) 10433 if (tg3_flag(tp, 5755_PLUS))
10368 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10434 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10369 10435
10436 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10437 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10438
10370 if (tg3_flag(tp, ENABLE_RSS)) 10439 if (tg3_flag(tp, ENABLE_RSS))
10371 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10440 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10372 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10441 RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11571,7 @@ static int tg3_close(struct net_device *dev)
11502 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11571 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11503 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11572 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11504 11573
11505 tg3_power_down(tp); 11574 tg3_power_down_prepare(tp);
11506 11575
11507 tg3_carrier_off(tp); 11576 tg3_carrier_off(tp);
11508 11577
@@ -11724,9 +11793,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11724 if (tg3_flag(tp, NO_NVRAM)) 11793 if (tg3_flag(tp, NO_NVRAM))
11725 return -EINVAL; 11794 return -EINVAL;
11726 11795
11727 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11728 return -EAGAIN;
11729
11730 offset = eeprom->offset; 11796 offset = eeprom->offset;
11731 len = eeprom->len; 11797 len = eeprom->len;
11732 eeprom->len = 0; 11798 eeprom->len = 0;
@@ -11784,9 +11850,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11784 u8 *buf; 11850 u8 *buf;
11785 __be32 start, end; 11851 __be32 start, end;
11786 11852
11787 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11788 return -EAGAIN;
11789
11790 if (tg3_flag(tp, NO_NVRAM) || 11853 if (tg3_flag(tp, NO_NVRAM) ||
11791 eeprom->magic != TG3_EEPROM_MAGIC) 11854 eeprom->magic != TG3_EEPROM_MAGIC)
11792 return -EINVAL; 11855 return -EINVAL;
@@ -13515,7 +13578,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13515 tg3_phy_start(tp); 13578 tg3_phy_start(tp);
13516 } 13579 }
13517 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13580 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13518 tg3_power_down(tp); 13581 tg3_power_down_prepare(tp);
13519 13582
13520} 13583}
13521 13584
@@ -15917,7 +15980,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15917 */ 15980 */
15918 if (tg3_flag(tp, 5780_CLASS)) { 15981 if (tg3_flag(tp, 5780_CLASS)) {
15919 tg3_flag_set(tp, 40BIT_DMA_BUG); 15982 tg3_flag_set(tp, 40BIT_DMA_BUG);
15920 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 15983 tp->msi_cap = tp->pdev->msi_cap;
15921 } else { 15984 } else {
15922 struct pci_dev *bridge = NULL; 15985 struct pci_dev *bridge = NULL;
15923 15986
@@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev,
17547 tg3_asic_rev(tp) == ASIC_REV_5762) 17610 tg3_asic_rev(tp) == ASIC_REV_5762)
17548 tg3_flag_set(tp, PTP_CAPABLE); 17611 tg3_flag_set(tp, PTP_CAPABLE);
17549 17612
17550 if (tg3_flag(tp, 5717_PLUS)) {
17551 /* Resume a low-power mode */
17552 tg3_frob_aux_power(tp, false);
17553 }
17554
17555 tg3_timer_init(tp); 17613 tg3_timer_init(tp);
17556 17614
17557 tg3_carrier_off(tp); 17615 tg3_carrier_off(tp);
@@ -17755,6 +17813,23 @@ out:
17755 17813
17756static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 17814static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17757 17815
17816static void tg3_shutdown(struct pci_dev *pdev)
17817{
17818 struct net_device *dev = pci_get_drvdata(pdev);
17819 struct tg3 *tp = netdev_priv(dev);
17820
17821 rtnl_lock();
17822 netif_device_detach(dev);
17823
17824 if (netif_running(dev))
17825 dev_close(dev);
17826
17827 if (system_state == SYSTEM_POWER_OFF)
17828 tg3_power_down(tp);
17829
17830 rtnl_unlock();
17831}
17832
17758/** 17833/**
17759 * tg3_io_error_detected - called when PCI error is detected 17834 * tg3_io_error_detected - called when PCI error is detected
17760 * @pdev: Pointer to PCI device 17835 * @pdev: Pointer to PCI device
@@ -17914,6 +17989,7 @@ static struct pci_driver tg3_driver = {
17914 .remove = tg3_remove_one, 17989 .remove = tg3_remove_one,
17915 .err_handler = &tg3_err_handler, 17990 .err_handler = &tg3_err_handler,
17916 .driver.pm = &tg3_pm_ops, 17991 .driver.pm = &tg3_pm_ops,
17992 .shutdown = tg3_shutdown,
17917}; 17993};
17918 17994
17919module_pci_driver(tg3_driver); 17995module_pci_driver(tg3_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cd63d1189aae..ddb8be1298ea 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -532,6 +532,7 @@
532#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 532#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
533#define RX_MODE_RSS_ENABLE 0x00800000 533#define RX_MODE_RSS_ENABLE 0x00800000
534#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 534#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
535#define RX_MODE_IPV4_FRAG_FIX 0x02000000
535#define MAC_RX_STATUS 0x0000046c 536#define MAC_RX_STATUS 0x0000046c
536#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 537#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
537#define RX_STATUS_XOFF_RCVD 0x00000002 538#define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1818,12 +1819,21 @@
1818#define TG3_EAV_REF_CLCK_CTL 0x00006908 1819#define TG3_EAV_REF_CLCK_CTL 0x00006908
1819#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 1820#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
1820#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 1821#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
1822#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16)
1823#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17)
1824
1825#define TG3_EAV_WATCHDOG0_LSB 0x00006918
1826#define TG3_EAV_WATCHDOG0_MSB 0x0000691c
1827#define TG3_EAV_WATCHDOG0_EN (1 << 31)
1828#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff
1829
1821#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 1830#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
1822#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) 1831#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
1823#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) 1832#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
1824 1833
1825#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff 1834#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
1826/* 0x690c --> 0x7000 unused */ 1835
1836/* 0x692c --> 0x7000 unused */
1827 1837
1828/* NVRAM Control registers */ 1838/* NVRAM Control registers */
1829#define NVRAM_CMD 0x00007000 1839#define NVRAM_CMD 0x00007000
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 57cd1bff59f1..3c07064b2bc4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1419 bna_bfi_rx_enet_start(rx); 1419 bna_bfi_rx_enet_start(rx);
1420} 1420}
1421 1421
1422void 1422static void
1423bna_rx_sm_stop_wait_entry(struct bna_rx *rx) 1423bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1424{ 1424{
1425} 1425}
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1472 bna_rxf_start(&rx->rxf); 1472 bna_rxf_start(&rx->rxf);
1473} 1473}
1474 1474
1475void 1475static void
1476bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) 1476bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1477{ 1477{
1478} 1478}
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1528 } 1528 }
1529} 1529}
1530 1530
1531void 1531static void
1532bna_rx_sm_started_entry(struct bna_rx *rx) 1532bna_rx_sm_started_entry(struct bna_rx *rx)
1533{ 1533{
1534 struct bna_rxp *rxp; 1534 struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1593 } 1593 }
1594} 1594}
1595 1595
1596void 1596static void
1597bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) 1597bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1598{ 1598{
1599} 1599}
1600 1600
1601void 1601static void
1602bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) 1602bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1603{ 1603{
1604 switch (event) { 1604 switch (event) {
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index c37f706d9992..43405f654b4a 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
37 37
38extern char bfa_version[]; 38extern char bfa_version[];
39 39
40#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin" 40#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
41#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" 41#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ 42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
43 43
44#pragma pack(1) 44#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index bb5d63fb2e6d..ce75de9bae9e 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
304/* Detect MAC & PHY and perform ethernet interface initialization */ 304/* Detect MAC & PHY and perform ethernet interface initialization */
305static int __init at91ether_probe(struct platform_device *pdev) 305static int __init at91ether_probe(struct platform_device *pdev)
306{ 306{
307 struct macb_platform_data *board_data = pdev->dev.platform_data; 307 struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
308 struct resource *regs; 308 struct resource *regs;
309 struct net_device *dev; 309 struct net_device *dev;
310 struct phy_device *phydev; 310 struct phy_device *phydev;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e866608d7d91..92578690f6de 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -27,6 +27,7 @@
27#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_device.h> 29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
30#include <linux/of_net.h> 31#include <linux/of_net.h>
31#include <linux/pinctrl/consumer.h> 32#include <linux/pinctrl/consumer.h>
32 33
@@ -124,7 +125,7 @@ void macb_get_hwaddr(struct macb *bp)
124 u8 addr[6]; 125 u8 addr[6];
125 int i; 126 int i;
126 127
127 pdata = bp->pdev->dev.platform_data; 128 pdata = dev_get_platdata(&bp->pdev->dev);
128 129
129 /* Check all 4 address register for vaild address */ 130 /* Check all 4 address register for vaild address */
130 for (i = 0; i < 4; i++) { 131 for (i = 0; i < 4; i++) {
@@ -275,7 +276,7 @@ static int macb_mii_probe(struct net_device *dev)
275 phydev = phy_find_first(bp->mii_bus); 276 phydev = phy_find_first(bp->mii_bus);
276 if (!phydev) { 277 if (!phydev) {
277 netdev_err(dev, "no PHY found\n"); 278 netdev_err(dev, "no PHY found\n");
278 return -1; 279 return -ENXIO;
279 } 280 }
280 281
281 pdata = dev_get_platdata(&bp->pdev->dev); 282 pdata = dev_get_platdata(&bp->pdev->dev);
@@ -314,6 +315,7 @@ static int macb_mii_probe(struct net_device *dev)
314int macb_mii_init(struct macb *bp) 315int macb_mii_init(struct macb *bp)
315{ 316{
316 struct macb_platform_data *pdata; 317 struct macb_platform_data *pdata;
318 struct device_node *np;
317 int err = -ENXIO, i; 319 int err = -ENXIO, i;
318 320
319 /* Enable management port */ 321 /* Enable management port */
@@ -333,10 +335,7 @@ int macb_mii_init(struct macb *bp)
333 bp->pdev->name, bp->pdev->id); 335 bp->pdev->name, bp->pdev->id);
334 bp->mii_bus->priv = bp; 336 bp->mii_bus->priv = bp;
335 bp->mii_bus->parent = &bp->dev->dev; 337 bp->mii_bus->parent = &bp->dev->dev;
336 pdata = bp->pdev->dev.platform_data; 338 pdata = dev_get_platdata(&bp->pdev->dev);
337
338 if (pdata)
339 bp->mii_bus->phy_mask = pdata->phy_mask;
340 339
341 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 340 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
342 if (!bp->mii_bus->irq) { 341 if (!bp->mii_bus->irq) {
@@ -344,17 +343,45 @@ int macb_mii_init(struct macb *bp)
344 goto err_out_free_mdiobus; 343 goto err_out_free_mdiobus;
345 } 344 }
346 345
347 for (i = 0; i < PHY_MAX_ADDR; i++)
348 bp->mii_bus->irq[i] = PHY_POLL;
349
350 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 346 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
351 347
352 if (mdiobus_register(bp->mii_bus)) 348 np = bp->pdev->dev.of_node;
349 if (np) {
350 /* try dt phy registration */
351 err = of_mdiobus_register(bp->mii_bus, np);
352
353 /* fallback to standard phy registration if no phy were
354 found during dt phy registration */
355 if (!err && !phy_find_first(bp->mii_bus)) {
356 for (i = 0; i < PHY_MAX_ADDR; i++) {
357 struct phy_device *phydev;
358
359 phydev = mdiobus_scan(bp->mii_bus, i);
360 if (IS_ERR(phydev)) {
361 err = PTR_ERR(phydev);
362 break;
363 }
364 }
365
366 if (err)
367 goto err_out_unregister_bus;
368 }
369 } else {
370 for (i = 0; i < PHY_MAX_ADDR; i++)
371 bp->mii_bus->irq[i] = PHY_POLL;
372
373 if (pdata)
374 bp->mii_bus->phy_mask = pdata->phy_mask;
375
376 err = mdiobus_register(bp->mii_bus);
377 }
378
379 if (err)
353 goto err_out_free_mdio_irq; 380 goto err_out_free_mdio_irq;
354 381
355 if (macb_mii_probe(bp->dev) != 0) { 382 err = macb_mii_probe(bp->dev);
383 if (err)
356 goto err_out_unregister_bus; 384 goto err_out_unregister_bus;
357 }
358 385
359 return 0; 386 return 0;
360 387
@@ -1824,7 +1851,7 @@ static int __init macb_probe(struct platform_device *pdev)
1824 1851
1825 err = of_get_phy_mode(pdev->dev.of_node); 1852 err = of_get_phy_mode(pdev->dev.of_node);
1826 if (err < 0) { 1853 if (err < 0) {
1827 pdata = pdev->dev.platform_data; 1854 pdata = dev_get_platdata(&pdev->dev);
1828 if (pdata && pdata->is_rmii) 1855 if (pdata && pdata->is_rmii)
1829 bp->phy_interface = PHY_INTERFACE_MODE_RMII; 1856 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
1830 else 1857 else
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 7cb148c495c9..78d6d6b970e1 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
353 /* Receive errors */ 353 /* Receive errors */
354 unsigned long rx_watchdog; 354 unsigned long rx_watchdog;
355 unsigned long rx_da_filter_fail; 355 unsigned long rx_da_filter_fail;
356 unsigned long rx_sa_filter_fail;
357 unsigned long rx_payload_error; 356 unsigned long rx_payload_error;
358 unsigned long rx_ip_header_error; 357 unsigned long rx_ip_header_error;
359 /* Tx/Rx IRQ errors */ 358 /* Tx/Rx IRQ errors */
360 unsigned long tx_undeflow;
361 unsigned long tx_process_stopped; 359 unsigned long tx_process_stopped;
362 unsigned long rx_buf_unav; 360 unsigned long rx_buf_unav;
363 unsigned long rx_process_stopped; 361 unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
393 char rx_pause; 391 char rx_pause;
394 char tx_pause; 392 char tx_pause;
395 int wolopts; 393 int wolopts;
394 struct work_struct tx_timeout_work;
396}; 395};
397 396
398/* XGMAC Configuration Settings */ 397/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
409#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 408#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
410#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 409#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
411 410
411#define tx_dma_ring_space(p) \
412 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
413
412/* XGMAC Descriptor Access Helpers */ 414/* XGMAC Descriptor Access Helpers */
413static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 415static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
414{ 416{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
421 423
422static inline int desc_get_buf_len(struct xgmac_dma_desc *p) 424static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
423{ 425{
424 u32 len = cpu_to_le32(p->flags); 426 u32 len = le32_to_cpu(p->buf_size);
425 return (len & DESC_BUFFER1_SZ_MASK) + 427 return (len & DESC_BUFFER1_SZ_MASK) +
426 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); 428 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
427} 429}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
464 p->flags = cpu_to_le32(tmpflags); 466 p->flags = cpu_to_le32(tmpflags);
465} 467}
466 468
469static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
470{
471 u32 tmpflags = le32_to_cpu(p->flags);
472 tmpflags &= TXDESC_END_RING;
473 p->flags = cpu_to_le32(tmpflags);
474}
475
467static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) 476static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
468{ 477{
469 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 478 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
470} 479}
471 480
481static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
482{
483 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
484}
485
472static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 486static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
473{ 487{
474 return le32_to_cpu(p->buf1_addr); 488 return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
609{ 623{
610 u32 data; 624 u32 data;
611 625
612 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); 626 if (addr) {
613 writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); 627 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
614 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 628 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
615 writel(data, ioaddr + XGMAC_ADDR_LOW(num)); 629 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
630 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
631 } else {
632 writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
633 writel(0, ioaddr + XGMAC_ADDR_LOW(num));
634 }
616} 635}
617 636
618static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 637static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
683 if (unlikely(skb == NULL)) 702 if (unlikely(skb == NULL))
684 break; 703 break;
685 704
686 priv->rx_skbuff[entry] = skb;
687 paddr = dma_map_single(priv->device, skb->data, 705 paddr = dma_map_single(priv->device, skb->data,
688 bufsz, DMA_FROM_DEVICE); 706 priv->dma_buf_sz - NET_IP_ALIGN,
707 DMA_FROM_DEVICE);
708 if (dma_mapping_error(priv->device, paddr)) {
709 dev_kfree_skb_any(skb);
710 break;
711 }
712 priv->rx_skbuff[entry] = skb;
689 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 713 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
690 } 714 }
691 715
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
782 return; 806 return;
783 807
784 for (i = 0; i < DMA_RX_RING_SZ; i++) { 808 for (i = 0; i < DMA_RX_RING_SZ; i++) {
785 if (priv->rx_skbuff[i] == NULL) 809 struct sk_buff *skb = priv->rx_skbuff[i];
810 if (skb == NULL)
786 continue; 811 continue;
787 812
788 p = priv->dma_rx + i; 813 p = priv->dma_rx + i;
789 dma_unmap_single(priv->device, desc_get_buf_addr(p), 814 dma_unmap_single(priv->device, desc_get_buf_addr(p),
790 priv->dma_buf_sz, DMA_FROM_DEVICE); 815 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
791 dev_kfree_skb_any(priv->rx_skbuff[i]); 816 dev_kfree_skb_any(skb);
792 priv->rx_skbuff[i] = NULL; 817 priv->rx_skbuff[i] = NULL;
793 } 818 }
794} 819}
795 820
796static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 821static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
797{ 822{
798 int i, f; 823 int i;
799 struct xgmac_dma_desc *p; 824 struct xgmac_dma_desc *p;
800 825
801 if (!priv->tx_skbuff) 826 if (!priv->tx_skbuff)
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
806 continue; 831 continue;
807 832
808 p = priv->dma_tx + i; 833 p = priv->dma_tx + i;
809 dma_unmap_single(priv->device, desc_get_buf_addr(p), 834 if (desc_get_tx_fs(p))
810 desc_get_buf_len(p), DMA_TO_DEVICE); 835 dma_unmap_single(priv->device, desc_get_buf_addr(p),
811 836 desc_get_buf_len(p), DMA_TO_DEVICE);
812 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { 837 else
813 p = priv->dma_tx + i++;
814 dma_unmap_page(priv->device, desc_get_buf_addr(p), 838 dma_unmap_page(priv->device, desc_get_buf_addr(p),
815 desc_get_buf_len(p), DMA_TO_DEVICE); 839 desc_get_buf_len(p), DMA_TO_DEVICE);
816 }
817 840
818 dev_kfree_skb_any(priv->tx_skbuff[i]); 841 if (desc_get_tx_ls(p))
842 dev_kfree_skb_any(priv->tx_skbuff[i]);
819 priv->tx_skbuff[i] = NULL; 843 priv->tx_skbuff[i] = NULL;
820 } 844 }
821} 845}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
852 */ 876 */
853static void xgmac_tx_complete(struct xgmac_priv *priv) 877static void xgmac_tx_complete(struct xgmac_priv *priv)
854{ 878{
855 int i;
856
857 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 879 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
858 unsigned int entry = priv->tx_tail; 880 unsigned int entry = priv->tx_tail;
859 struct sk_buff *skb = priv->tx_skbuff[entry]; 881 struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
863 if (desc_get_owner(p)) 885 if (desc_get_owner(p))
864 break; 886 break;
865 887
866 /* Verify tx error by looking at the last segment */
867 if (desc_get_tx_ls(p))
868 desc_get_tx_status(priv, p);
869
870 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 888 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
871 priv->tx_head, priv->tx_tail); 889 priv->tx_head, priv->tx_tail);
872 890
873 dma_unmap_single(priv->device, desc_get_buf_addr(p), 891 if (desc_get_tx_fs(p))
874 desc_get_buf_len(p), DMA_TO_DEVICE); 892 dma_unmap_single(priv->device, desc_get_buf_addr(p),
875 893 desc_get_buf_len(p), DMA_TO_DEVICE);
876 priv->tx_skbuff[entry] = NULL; 894 else
877 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
878
879 if (!skb) {
880 continue;
881 }
882
883 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
884 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
885 DMA_TX_RING_SZ);
886 p = priv->dma_tx + priv->tx_tail;
887
888 dma_unmap_page(priv->device, desc_get_buf_addr(p), 895 dma_unmap_page(priv->device, desc_get_buf_addr(p),
889 desc_get_buf_len(p), DMA_TO_DEVICE); 896 desc_get_buf_len(p), DMA_TO_DEVICE);
897
898 /* Check tx error on the last segment */
899 if (desc_get_tx_ls(p)) {
900 desc_get_tx_status(priv, p);
901 dev_kfree_skb(skb);
890 } 902 }
891 903
892 dev_kfree_skb(skb); 904 priv->tx_skbuff[entry] = NULL;
905 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
893 } 906 }
894 907
895 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 908 /* Ensure tx_tail is visible to xgmac_xmit */
896 MAX_SKB_FRAGS) 909 smp_mb();
910 if (unlikely(netif_queue_stopped(priv->dev) &&
911 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
897 netif_wake_queue(priv->dev); 912 netif_wake_queue(priv->dev);
898} 913}
899 914
900/** 915static void xgmac_tx_timeout_work(struct work_struct *work)
901 * xgmac_tx_err:
902 * @priv: pointer to the private device structure
903 * Description: it cleans the descriptors and restarts the transmission
904 * in case of errors.
905 */
906static void xgmac_tx_err(struct xgmac_priv *priv)
907{ 916{
908 u32 reg, value, inten; 917 u32 reg, value;
918 struct xgmac_priv *priv =
919 container_of(work, struct xgmac_priv, tx_timeout_work);
909 920
910 netif_stop_queue(priv->dev); 921 napi_disable(&priv->napi);
911 922
912 inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
913 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 923 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
914 924
925 netif_tx_lock(priv->dev);
926
915 reg = readl(priv->base + XGMAC_DMA_CONTROL); 927 reg = readl(priv->base + XGMAC_DMA_CONTROL);
916 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 928 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
917 do { 929 do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
927 939
928 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 940 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
929 priv->base + XGMAC_DMA_STATUS); 941 priv->base + XGMAC_DMA_STATUS);
930 writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
931 942
943 netif_tx_unlock(priv->dev);
932 netif_wake_queue(priv->dev); 944 netif_wake_queue(priv->dev);
945
946 napi_enable(&priv->napi);
947
948 /* Enable interrupts */
949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
950 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
933} 951}
934 952
935static int xgmac_hw_init(struct net_device *dev) 953static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
957 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; 975 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
958 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 976 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
959 977
960 /* Enable interrupts */ 978 writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
962 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
963 979
964 /* Mask power mgt interrupt */ 980 /* Mask power mgt interrupt */
965 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); 981 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
1027 napi_enable(&priv->napi); 1043 napi_enable(&priv->napi);
1028 netif_start_queue(dev); 1044 netif_start_queue(dev);
1029 1045
1046 /* Enable interrupts */
1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1048 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1049
1030 return 0; 1050 return 0;
1031} 1051}
1032 1052
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1087 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1107 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1088 if (dma_mapping_error(priv->device, paddr)) { 1108 if (dma_mapping_error(priv->device, paddr)) {
1089 dev_kfree_skb(skb); 1109 dev_kfree_skb(skb);
1090 return -EIO; 1110 return NETDEV_TX_OK;
1091 } 1111 }
1092 priv->tx_skbuff[entry] = skb; 1112 priv->tx_skbuff[entry] = skb;
1093 desc_set_buf_addr_and_size(desc, paddr, len); 1113 desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1099 1119
1100 paddr = skb_frag_dma_map(priv->device, frag, 0, len, 1120 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1101 DMA_TO_DEVICE); 1121 DMA_TO_DEVICE);
1102 if (dma_mapping_error(priv->device, paddr)) { 1122 if (dma_mapping_error(priv->device, paddr))
1103 dev_kfree_skb(skb); 1123 goto dma_err;
1104 return -EIO;
1105 }
1106 1124
1107 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1125 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1108 desc = priv->dma_tx + entry; 1126 desc = priv->dma_tx + entry;
1109 priv->tx_skbuff[entry] = NULL; 1127 priv->tx_skbuff[entry] = skb;
1110 1128
1111 desc_set_buf_addr_and_size(desc, paddr, len); 1129 desc_set_buf_addr_and_size(desc, paddr, len);
1112 if (i < (nfrags - 1)) 1130 if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1124 wmb(); 1142 wmb();
1125 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); 1143 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1126 1144
1145 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1146
1127 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1147 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1128 1148
1129 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1149 /* Ensure tx_head update is visible to tx completion */
1130 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1150 smp_mb();
1131 MAX_SKB_FRAGS) 1151 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
1132 netif_stop_queue(dev); 1152 netif_stop_queue(dev);
1153 /* Ensure netif_stop_queue is visible to tx completion */
1154 smp_mb();
1155 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1156 netif_start_queue(dev);
1157 }
1158 return NETDEV_TX_OK;
1133 1159
1160dma_err:
1161 entry = priv->tx_head;
1162 for ( ; i > 0; i--) {
1163 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1164 desc = priv->dma_tx + entry;
1165 priv->tx_skbuff[entry] = NULL;
1166 dma_unmap_page(priv->device, desc_get_buf_addr(desc),
1167 desc_get_buf_len(desc), DMA_TO_DEVICE);
1168 desc_clear_tx_owner(desc);
1169 }
1170 desc = first;
1171 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1172 desc_get_buf_len(desc), DMA_TO_DEVICE);
1173 dev_kfree_skb(skb);
1134 return NETDEV_TX_OK; 1174 return NETDEV_TX_OK;
1135} 1175}
1136 1176
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1174 1214
1175 skb_put(skb, frame_len); 1215 skb_put(skb, frame_len);
1176 dma_unmap_single(priv->device, desc_get_buf_addr(p), 1216 dma_unmap_single(priv->device, desc_get_buf_addr(p),
1177 frame_len, DMA_FROM_DEVICE); 1217 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
1178 1218
1179 skb->protocol = eth_type_trans(skb, priv->dev); 1219 skb->protocol = eth_type_trans(skb, priv->dev);
1180 skb->ip_summed = ip_checksum; 1220 skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
1225static void xgmac_tx_timeout(struct net_device *dev) 1265static void xgmac_tx_timeout(struct net_device *dev)
1226{ 1266{
1227 struct xgmac_priv *priv = netdev_priv(dev); 1267 struct xgmac_priv *priv = netdev_priv(dev);
1228 1268 schedule_work(&priv->tx_timeout_work);
1229 /* Clear Tx resources and restart transmitting again */
1230 xgmac_tx_err(priv);
1231} 1269}
1232 1270
1233/** 1271/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1286 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1287 use_hash = true; 1325 use_hash = true;
1288 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1327 } else {
1328 use_hash = false;
1289 } 1329 }
1290 netdev_for_each_mc_addr(ha, dev) { 1330 netdev_for_each_mc_addr(ha, dev) {
1291 if (use_hash) { 1331 if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1302 } 1342 }
1303 1343
1304out: 1344out:
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg);
1305 for (i = 0; i < XGMAC_NUM_HASH; i++) 1347 for (i = 0; i < XGMAC_NUM_HASH; i++)
1306 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1307 1349
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1366static irqreturn_t xgmac_interrupt(int irq, void *dev_id) 1408static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1367{ 1409{
1368 u32 intr_status; 1410 u32 intr_status;
1369 bool tx_err = false;
1370 struct net_device *dev = (struct net_device *)dev_id; 1411 struct net_device *dev = (struct net_device *)dev_id;
1371 struct xgmac_priv *priv = netdev_priv(dev); 1412 struct xgmac_priv *priv = netdev_priv(dev);
1372 struct xgmac_extra_stats *x = &priv->xstats; 1413 struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1396 if (intr_status & DMA_STATUS_TPS) { 1437 if (intr_status & DMA_STATUS_TPS) {
1397 netdev_err(priv->dev, "transmit process stopped\n"); 1438 netdev_err(priv->dev, "transmit process stopped\n");
1398 x->tx_process_stopped++; 1439 x->tx_process_stopped++;
1399 tx_err = true; 1440 schedule_work(&priv->tx_timeout_work);
1400 } 1441 }
1401 if (intr_status & DMA_STATUS_FBI) { 1442 if (intr_status & DMA_STATUS_FBI) {
1402 netdev_err(priv->dev, "fatal bus error\n"); 1443 netdev_err(priv->dev, "fatal bus error\n");
1403 x->fatal_bus_error++; 1444 x->fatal_bus_error++;
1404 tx_err = true;
1405 } 1445 }
1406
1407 if (tx_err)
1408 xgmac_tx_err(priv);
1409 } 1446 }
1410 1447
1411 /* TX/RX NORMAL interrupts */ 1448 /* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
1569 XGMAC_STAT(rx_payload_error), 1606 XGMAC_STAT(rx_payload_error),
1570 XGMAC_STAT(rx_ip_header_error), 1607 XGMAC_STAT(rx_ip_header_error),
1571 XGMAC_STAT(rx_da_filter_fail), 1608 XGMAC_STAT(rx_da_filter_fail),
1572 XGMAC_STAT(rx_sa_filter_fail),
1573 XGMAC_STAT(fatal_bus_error), 1609 XGMAC_STAT(fatal_bus_error),
1574 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), 1610 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1575 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), 1611 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
1708 ndev->netdev_ops = &xgmac_netdev_ops; 1744 ndev->netdev_ops = &xgmac_netdev_ops;
1709 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1745 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1710 spin_lock_init(&priv->stats_lock); 1746 spin_lock_init(&priv->stats_lock);
1747 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1711 1748
1712 priv->device = &pdev->dev; 1749 priv->device = &pdev->dev;
1713 priv->dev = ndev; 1750 priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
1759 if (device_can_wakeup(priv->device)) 1796 if (device_can_wakeup(priv->device))
1760 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1797 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1761 1798
1762 ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; 1799 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
1763 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) 1800 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1764 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1801 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1765 NETIF_F_RXCSUM; 1802 NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 038df4b96139..79ac77cf62d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3982,7 +3982,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
3982 struct inet6_ifaddr *ifa = data; 3982 struct inet6_ifaddr *ifa = data;
3983 struct net_device *event_dev; 3983 struct net_device *event_dev;
3984 int ret = NOTIFY_DONE; 3984 int ret = NOTIFY_DONE;
3985 int cnt;
3986 struct bonding *bond = netdev_priv(ifa->idev->dev); 3985 struct bonding *bond = netdev_priv(ifa->idev->dev);
3987 struct slave *slave; 3986 struct slave *slave;
3988 struct pci_dev *first_pdev = NULL; 3987 struct pci_dev *first_pdev = NULL;
@@ -3996,7 +3995,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
3996 * in all of them only once. 3995 * in all of them only once.
3997 */ 3996 */
3998 read_lock(&bond->lock); 3997 read_lock(&bond->lock);
3999 bond_for_each_slave(bond, slave, cnt) { 3998 bond_for_each_slave(bond, slave) {
4000 if (!first_pdev) { 3999 if (!first_pdev) {
4001 ret = clip_add(slave->dev, ifa, event); 4000 ret = clip_add(slave->dev, ifa, event);
4002 /* If clip_add is success then only initialize 4001 /* If clip_add is success then only initialize
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e3d4ec836f8b..ec88de4ac162 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -814,7 +814,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
814 814
815 if (pdev == NULL) 815 if (pdev == NULL)
816 return -ENODEV; 816 return -ENODEV;
817 data = pdev->dev.platform_data; 817 data = dev_get_platdata(&pdev->dev);
818 818
819 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 819 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
820 irq = platform_get_irq(pdev, 0); 820 irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index 9d4974bba247..239e1e46545d 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
5 enic_ethtool.o enic_api.o
5 6
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index afe9b1662b8c..e9f7c656ddda 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,12 +32,12 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.39" 35#define DRV_VERSION "2.1.1.50"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 1 40#define ENIC_WQ_MAX 8
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 8
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -96,6 +96,7 @@ struct enic {
96#ifdef CONFIG_PCI_IOV 96#ifdef CONFIG_PCI_IOV
97 u16 num_vfs; 97 u16 num_vfs;
98#endif 98#endif
99 spinlock_t enic_api_lock;
99 struct enic_port_profile *pp; 100 struct enic_port_profile *pp;
100 101
101 /* work queue cache line section */ 102 /* work queue cache line section */
@@ -127,9 +128,57 @@ static inline struct device *enic_get_dev(struct enic *enic)
127 return &(enic->pdev->dev); 128 return &(enic->pdev->dev);
128} 129}
129 130
131static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
132{
133 return rq;
134}
135
136static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
137{
138 return enic->rq_count + wq;
139}
140
141static inline unsigned int enic_legacy_io_intr(void)
142{
143 return 0;
144}
145
146static inline unsigned int enic_legacy_err_intr(void)
147{
148 return 1;
149}
150
151static inline unsigned int enic_legacy_notify_intr(void)
152{
153 return 2;
154}
155
156static inline unsigned int enic_msix_rq_intr(struct enic *enic,
157 unsigned int rq)
158{
159 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
160}
161
162static inline unsigned int enic_msix_wq_intr(struct enic *enic,
163 unsigned int wq)
164{
165 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
166}
167
168static inline unsigned int enic_msix_err_intr(struct enic *enic)
169{
170 return enic->rq_count + enic->wq_count;
171}
172
173static inline unsigned int enic_msix_notify_intr(struct enic *enic)
174{
175 return enic->rq_count + enic->wq_count + 1;
176}
177
130void enic_reset_addr_lists(struct enic *enic); 178void enic_reset_addr_lists(struct enic *enic);
131int enic_sriov_enabled(struct enic *enic); 179int enic_sriov_enabled(struct enic *enic);
132int enic_is_valid_vf(struct enic *enic, int vf); 180int enic_is_valid_vf(struct enic *enic, int vf);
133int enic_is_dynamic(struct enic *enic); 181int enic_is_dynamic(struct enic *enic);
182void enic_set_ethtool_ops(struct net_device *netdev);
134 183
135#endif /* _ENIC_H_ */ 184#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
new file mode 100644
index 000000000000..e13efbdaa2ed
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -0,0 +1,48 @@
1/**
2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/netdevice.h>
20#include <linux/spinlock.h>
21
22#include "vnic_dev.h"
23#include "vnic_devcmd.h"
24
25#include "enic_res.h"
26#include "enic.h"
27#include "enic_api.h"
28
29int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
30 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
31{
32 int err;
33 struct enic *enic = netdev_priv(netdev);
34 struct vnic_dev *vdev = enic->vdev;
35
36 spin_lock(&enic->enic_api_lock);
37 spin_lock(&enic->devcmd_lock);
38
39 vnic_dev_cmd_proxy_by_index_start(vdev, vf);
40 err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
41 vnic_dev_cmd_proxy_end(vdev);
42
43 spin_unlock(&enic->devcmd_lock);
44 spin_unlock(&enic->enic_api_lock);
45
46 return err;
47}
48EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
new file mode 100644
index 000000000000..6b9f9255af28
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -0,0 +1,30 @@
1/**
2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef __ENIC_API_H__
20#define __ENIC_API_H__
21
22#include <linux/netdevice.h>
23
24#include "vnic_dev.h"
25#include "vnic_devcmd.h"
26
27int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
28 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
29
30#endif
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 08bded051b93..129b14a4efb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -20,6 +20,7 @@
20#define _ENIC_DEV_H_ 20#define _ENIC_DEV_H_
21 21
22#include "vnic_dev.h" 22#include "vnic_dev.h"
23#include "vnic_vic.h"
23 24
24/* 25/*
25 * Calls the devcmd function given by argument vnicdevcmdfn. 26 * Calls the devcmd function given by argument vnicdevcmdfn.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
new file mode 100644
index 000000000000..47e3562f4866
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -0,0 +1,257 @@
1/**
2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21
22#include "enic_res.h"
23#include "enic.h"
24#include "enic_dev.h"
25
26struct enic_stat {
27 char name[ETH_GSTRING_LEN];
28 unsigned int index;
29};
30
31#define ENIC_TX_STAT(stat) { \
32 .name = #stat, \
33 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
34}
35
36#define ENIC_RX_STAT(stat) { \
37 .name = #stat, \
38 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
39}
40
41static const struct enic_stat enic_tx_stats[] = {
42 ENIC_TX_STAT(tx_frames_ok),
43 ENIC_TX_STAT(tx_unicast_frames_ok),
44 ENIC_TX_STAT(tx_multicast_frames_ok),
45 ENIC_TX_STAT(tx_broadcast_frames_ok),
46 ENIC_TX_STAT(tx_bytes_ok),
47 ENIC_TX_STAT(tx_unicast_bytes_ok),
48 ENIC_TX_STAT(tx_multicast_bytes_ok),
49 ENIC_TX_STAT(tx_broadcast_bytes_ok),
50 ENIC_TX_STAT(tx_drops),
51 ENIC_TX_STAT(tx_errors),
52 ENIC_TX_STAT(tx_tso),
53};
54
55static const struct enic_stat enic_rx_stats[] = {
56 ENIC_RX_STAT(rx_frames_ok),
57 ENIC_RX_STAT(rx_frames_total),
58 ENIC_RX_STAT(rx_unicast_frames_ok),
59 ENIC_RX_STAT(rx_multicast_frames_ok),
60 ENIC_RX_STAT(rx_broadcast_frames_ok),
61 ENIC_RX_STAT(rx_bytes_ok),
62 ENIC_RX_STAT(rx_unicast_bytes_ok),
63 ENIC_RX_STAT(rx_multicast_bytes_ok),
64 ENIC_RX_STAT(rx_broadcast_bytes_ok),
65 ENIC_RX_STAT(rx_drop),
66 ENIC_RX_STAT(rx_no_bufs),
67 ENIC_RX_STAT(rx_errors),
68 ENIC_RX_STAT(rx_rss),
69 ENIC_RX_STAT(rx_crc_errors),
70 ENIC_RX_STAT(rx_frames_64),
71 ENIC_RX_STAT(rx_frames_127),
72 ENIC_RX_STAT(rx_frames_255),
73 ENIC_RX_STAT(rx_frames_511),
74 ENIC_RX_STAT(rx_frames_1023),
75 ENIC_RX_STAT(rx_frames_1518),
76 ENIC_RX_STAT(rx_frames_to_max),
77};
78
79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
81
82static int enic_get_settings(struct net_device *netdev,
83 struct ethtool_cmd *ecmd)
84{
85 struct enic *enic = netdev_priv(netdev);
86
87 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
88 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
89 ecmd->port = PORT_FIBRE;
90 ecmd->transceiver = XCVR_EXTERNAL;
91
92 if (netif_carrier_ok(netdev)) {
93 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
94 ecmd->duplex = DUPLEX_FULL;
95 } else {
96 ethtool_cmd_speed_set(ecmd, -1);
97 ecmd->duplex = -1;
98 }
99
100 ecmd->autoneg = AUTONEG_DISABLE;
101
102 return 0;
103}
104
105static void enic_get_drvinfo(struct net_device *netdev,
106 struct ethtool_drvinfo *drvinfo)
107{
108 struct enic *enic = netdev_priv(netdev);
109 struct vnic_devcmd_fw_info *fw_info;
110
111 enic_dev_fw_info(enic, &fw_info);
112
113 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
114 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
115 strlcpy(drvinfo->fw_version, fw_info->fw_version,
116 sizeof(drvinfo->fw_version));
117 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
118 sizeof(drvinfo->bus_info));
119}
120
121static void enic_get_strings(struct net_device *netdev, u32 stringset,
122 u8 *data)
123{
124 unsigned int i;
125
126 switch (stringset) {
127 case ETH_SS_STATS:
128 for (i = 0; i < enic_n_tx_stats; i++) {
129 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
130 data += ETH_GSTRING_LEN;
131 }
132 for (i = 0; i < enic_n_rx_stats; i++) {
133 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
134 data += ETH_GSTRING_LEN;
135 }
136 break;
137 }
138}
139
140static int enic_get_sset_count(struct net_device *netdev, int sset)
141{
142 switch (sset) {
143 case ETH_SS_STATS:
144 return enic_n_tx_stats + enic_n_rx_stats;
145 default:
146 return -EOPNOTSUPP;
147 }
148}
149
150static void enic_get_ethtool_stats(struct net_device *netdev,
151 struct ethtool_stats *stats, u64 *data)
152{
153 struct enic *enic = netdev_priv(netdev);
154 struct vnic_stats *vstats;
155 unsigned int i;
156
157 enic_dev_stats_dump(enic, &vstats);
158
159 for (i = 0; i < enic_n_tx_stats; i++)
160 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
161 for (i = 0; i < enic_n_rx_stats; i++)
162 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
163}
164
165static u32 enic_get_msglevel(struct net_device *netdev)
166{
167 struct enic *enic = netdev_priv(netdev);
168 return enic->msg_enable;
169}
170
171static void enic_set_msglevel(struct net_device *netdev, u32 value)
172{
173 struct enic *enic = netdev_priv(netdev);
174 enic->msg_enable = value;
175}
176
177static int enic_get_coalesce(struct net_device *netdev,
178 struct ethtool_coalesce *ecmd)
179{
180 struct enic *enic = netdev_priv(netdev);
181
182 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
183 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
184
185 return 0;
186}
187
188static int enic_set_coalesce(struct net_device *netdev,
189 struct ethtool_coalesce *ecmd)
190{
191 struct enic *enic = netdev_priv(netdev);
192 u32 tx_coalesce_usecs;
193 u32 rx_coalesce_usecs;
194 unsigned int i, intr;
195
196 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
197 vnic_dev_get_intr_coal_timer_max(enic->vdev));
198 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
199 vnic_dev_get_intr_coal_timer_max(enic->vdev));
200
201 switch (vnic_dev_get_intr_mode(enic->vdev)) {
202 case VNIC_DEV_INTR_MODE_INTX:
203 if (tx_coalesce_usecs != rx_coalesce_usecs)
204 return -EINVAL;
205
206 intr = enic_legacy_io_intr();
207 vnic_intr_coalescing_timer_set(&enic->intr[intr],
208 tx_coalesce_usecs);
209 break;
210 case VNIC_DEV_INTR_MODE_MSI:
211 if (tx_coalesce_usecs != rx_coalesce_usecs)
212 return -EINVAL;
213
214 vnic_intr_coalescing_timer_set(&enic->intr[0],
215 tx_coalesce_usecs);
216 break;
217 case VNIC_DEV_INTR_MODE_MSIX:
218 for (i = 0; i < enic->wq_count; i++) {
219 intr = enic_msix_wq_intr(enic, i);
220 vnic_intr_coalescing_timer_set(&enic->intr[intr],
221 tx_coalesce_usecs);
222 }
223
224 for (i = 0; i < enic->rq_count; i++) {
225 intr = enic_msix_rq_intr(enic, i);
226 vnic_intr_coalescing_timer_set(&enic->intr[intr],
227 rx_coalesce_usecs);
228 }
229
230 break;
231 default:
232 break;
233 }
234
235 enic->tx_coalesce_usecs = tx_coalesce_usecs;
236 enic->rx_coalesce_usecs = rx_coalesce_usecs;
237
238 return 0;
239}
240
241static const struct ethtool_ops enic_ethtool_ops = {
242 .get_settings = enic_get_settings,
243 .get_drvinfo = enic_get_drvinfo,
244 .get_msglevel = enic_get_msglevel,
245 .set_msglevel = enic_set_msglevel,
246 .get_link = ethtool_op_get_link,
247 .get_strings = enic_get_strings,
248 .get_sset_count = enic_get_sset_count,
249 .get_ethtool_stats = enic_get_ethtool_stats,
250 .get_coalesce = enic_get_coalesce,
251 .set_coalesce = enic_set_coalesce,
252};
253
254void enic_set_ethtool_ops(struct net_device *netdev)
255{
256 SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
257}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 992ec2ee64d9..7b756cf9474a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -31,7 +31,6 @@
31#include <linux/if.h> 31#include <linux/if.h>
32#include <linux/if_ether.h> 32#include <linux/if_ether.h>
33#include <linux/if_vlan.h> 33#include <linux/if_vlan.h>
34#include <linux/ethtool.h>
35#include <linux/in.h> 34#include <linux/in.h>
36#include <linux/ip.h> 35#include <linux/ip.h>
37#include <linux/ipv6.h> 36#include <linux/ipv6.h>
@@ -73,57 +72,6 @@ MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_VERSION); 72MODULE_VERSION(DRV_VERSION);
74MODULE_DEVICE_TABLE(pci, enic_id_table); 73MODULE_DEVICE_TABLE(pci, enic_id_table);
75 74
76struct enic_stat {
77 char name[ETH_GSTRING_LEN];
78 unsigned int offset;
79};
80
81#define ENIC_TX_STAT(stat) \
82 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
83#define ENIC_RX_STAT(stat) \
84 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
85
86static const struct enic_stat enic_tx_stats[] = {
87 ENIC_TX_STAT(tx_frames_ok),
88 ENIC_TX_STAT(tx_unicast_frames_ok),
89 ENIC_TX_STAT(tx_multicast_frames_ok),
90 ENIC_TX_STAT(tx_broadcast_frames_ok),
91 ENIC_TX_STAT(tx_bytes_ok),
92 ENIC_TX_STAT(tx_unicast_bytes_ok),
93 ENIC_TX_STAT(tx_multicast_bytes_ok),
94 ENIC_TX_STAT(tx_broadcast_bytes_ok),
95 ENIC_TX_STAT(tx_drops),
96 ENIC_TX_STAT(tx_errors),
97 ENIC_TX_STAT(tx_tso),
98};
99
100static const struct enic_stat enic_rx_stats[] = {
101 ENIC_RX_STAT(rx_frames_ok),
102 ENIC_RX_STAT(rx_frames_total),
103 ENIC_RX_STAT(rx_unicast_frames_ok),
104 ENIC_RX_STAT(rx_multicast_frames_ok),
105 ENIC_RX_STAT(rx_broadcast_frames_ok),
106 ENIC_RX_STAT(rx_bytes_ok),
107 ENIC_RX_STAT(rx_unicast_bytes_ok),
108 ENIC_RX_STAT(rx_multicast_bytes_ok),
109 ENIC_RX_STAT(rx_broadcast_bytes_ok),
110 ENIC_RX_STAT(rx_drop),
111 ENIC_RX_STAT(rx_no_bufs),
112 ENIC_RX_STAT(rx_errors),
113 ENIC_RX_STAT(rx_rss),
114 ENIC_RX_STAT(rx_crc_errors),
115 ENIC_RX_STAT(rx_frames_64),
116 ENIC_RX_STAT(rx_frames_127),
117 ENIC_RX_STAT(rx_frames_255),
118 ENIC_RX_STAT(rx_frames_511),
119 ENIC_RX_STAT(rx_frames_1023),
120 ENIC_RX_STAT(rx_frames_1518),
121 ENIC_RX_STAT(rx_frames_to_max),
122};
123
124static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
125static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
126
127int enic_is_dynamic(struct enic *enic) 75int enic_is_dynamic(struct enic *enic)
128{ 76{
129 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
148#endif 96#endif
149} 97}
150 98
151static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
152{
153 return rq;
154}
155
156static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
157{
158 return enic->rq_count + wq;
159}
160
161static inline unsigned int enic_legacy_io_intr(void)
162{
163 return 0;
164}
165
166static inline unsigned int enic_legacy_err_intr(void)
167{
168 return 1;
169}
170
171static inline unsigned int enic_legacy_notify_intr(void)
172{
173 return 2;
174}
175
176static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
177{
178 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
179}
180
181static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
182{
183 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
184}
185
186static inline unsigned int enic_msix_err_intr(struct enic *enic)
187{
188 return enic->rq_count + enic->wq_count;
189}
190
191static inline unsigned int enic_msix_notify_intr(struct enic *enic)
192{
193 return enic->rq_count + enic->wq_count + 1;
194}
195
196static int enic_get_settings(struct net_device *netdev,
197 struct ethtool_cmd *ecmd)
198{
199 struct enic *enic = netdev_priv(netdev);
200
201 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
202 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
203 ecmd->port = PORT_FIBRE;
204 ecmd->transceiver = XCVR_EXTERNAL;
205
206 if (netif_carrier_ok(netdev)) {
207 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
208 ecmd->duplex = DUPLEX_FULL;
209 } else {
210 ethtool_cmd_speed_set(ecmd, -1);
211 ecmd->duplex = -1;
212 }
213
214 ecmd->autoneg = AUTONEG_DISABLE;
215
216 return 0;
217}
218
219static void enic_get_drvinfo(struct net_device *netdev,
220 struct ethtool_drvinfo *drvinfo)
221{
222 struct enic *enic = netdev_priv(netdev);
223 struct vnic_devcmd_fw_info *fw_info;
224
225 enic_dev_fw_info(enic, &fw_info);
226
227 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
228 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
229 strlcpy(drvinfo->fw_version, fw_info->fw_version,
230 sizeof(drvinfo->fw_version));
231 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
232 sizeof(drvinfo->bus_info));
233}
234
235static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
236{
237 unsigned int i;
238
239 switch (stringset) {
240 case ETH_SS_STATS:
241 for (i = 0; i < enic_n_tx_stats; i++) {
242 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
243 data += ETH_GSTRING_LEN;
244 }
245 for (i = 0; i < enic_n_rx_stats; i++) {
246 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
247 data += ETH_GSTRING_LEN;
248 }
249 break;
250 }
251}
252
253static int enic_get_sset_count(struct net_device *netdev, int sset)
254{
255 switch (sset) {
256 case ETH_SS_STATS:
257 return enic_n_tx_stats + enic_n_rx_stats;
258 default:
259 return -EOPNOTSUPP;
260 }
261}
262
263static void enic_get_ethtool_stats(struct net_device *netdev,
264 struct ethtool_stats *stats, u64 *data)
265{
266 struct enic *enic = netdev_priv(netdev);
267 struct vnic_stats *vstats;
268 unsigned int i;
269
270 enic_dev_stats_dump(enic, &vstats);
271
272 for (i = 0; i < enic_n_tx_stats; i++)
273 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
274 for (i = 0; i < enic_n_rx_stats; i++)
275 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
276}
277
278static u32 enic_get_msglevel(struct net_device *netdev)
279{
280 struct enic *enic = netdev_priv(netdev);
281 return enic->msg_enable;
282}
283
284static void enic_set_msglevel(struct net_device *netdev, u32 value)
285{
286 struct enic *enic = netdev_priv(netdev);
287 enic->msg_enable = value;
288}
289
290static int enic_get_coalesce(struct net_device *netdev,
291 struct ethtool_coalesce *ecmd)
292{
293 struct enic *enic = netdev_priv(netdev);
294
295 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
296 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
297
298 return 0;
299}
300
301static int enic_set_coalesce(struct net_device *netdev,
302 struct ethtool_coalesce *ecmd)
303{
304 struct enic *enic = netdev_priv(netdev);
305 u32 tx_coalesce_usecs;
306 u32 rx_coalesce_usecs;
307 unsigned int i, intr;
308
309 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
310 vnic_dev_get_intr_coal_timer_max(enic->vdev));
311 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
312 vnic_dev_get_intr_coal_timer_max(enic->vdev));
313
314 switch (vnic_dev_get_intr_mode(enic->vdev)) {
315 case VNIC_DEV_INTR_MODE_INTX:
316 if (tx_coalesce_usecs != rx_coalesce_usecs)
317 return -EINVAL;
318
319 intr = enic_legacy_io_intr();
320 vnic_intr_coalescing_timer_set(&enic->intr[intr],
321 tx_coalesce_usecs);
322 break;
323 case VNIC_DEV_INTR_MODE_MSI:
324 if (tx_coalesce_usecs != rx_coalesce_usecs)
325 return -EINVAL;
326
327 vnic_intr_coalescing_timer_set(&enic->intr[0],
328 tx_coalesce_usecs);
329 break;
330 case VNIC_DEV_INTR_MODE_MSIX:
331 for (i = 0; i < enic->wq_count; i++) {
332 intr = enic_msix_wq_intr(enic, i);
333 vnic_intr_coalescing_timer_set(&enic->intr[intr],
334 tx_coalesce_usecs);
335 }
336
337 for (i = 0; i < enic->rq_count; i++) {
338 intr = enic_msix_rq_intr(enic, i);
339 vnic_intr_coalescing_timer_set(&enic->intr[intr],
340 rx_coalesce_usecs);
341 }
342
343 break;
344 default:
345 break;
346 }
347
348 enic->tx_coalesce_usecs = tx_coalesce_usecs;
349 enic->rx_coalesce_usecs = rx_coalesce_usecs;
350
351 return 0;
352}
353
354static const struct ethtool_ops enic_ethtool_ops = {
355 .get_settings = enic_get_settings,
356 .get_drvinfo = enic_get_drvinfo,
357 .get_msglevel = enic_get_msglevel,
358 .set_msglevel = enic_set_msglevel,
359 .get_link = ethtool_op_get_link,
360 .get_strings = enic_get_strings,
361 .get_sset_count = enic_get_sset_count,
362 .get_ethtool_stats = enic_get_ethtool_stats,
363 .get_coalesce = enic_get_coalesce,
364 .set_coalesce = enic_set_coalesce,
365};
366
367static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 99static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
368{ 100{
369 struct enic *enic = vnic_dev_priv(wq->vdev); 101 struct enic *enic = vnic_dev_priv(wq->vdev);
@@ -396,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
396 completed_index, enic_wq_free_buf, 128 completed_index, enic_wq_free_buf,
397 opaque); 129 opaque);
398 130
399 if (netif_queue_stopped(enic->netdev) && 131 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
400 vnic_wq_desc_avail(&enic->wq[q_number]) >= 132 vnic_wq_desc_avail(&enic->wq[q_number]) >=
401 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
402 netif_wake_queue(enic->netdev); 134 netif_wake_subqueue(enic->netdev, q_number);
403 135
404 spin_unlock(&enic->wq_lock[q_number]); 136 spin_unlock(&enic->wq_lock[q_number]);
405 137
@@ -560,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
560static irqreturn_t enic_isr_msix_wq(int irq, void *data) 292static irqreturn_t enic_isr_msix_wq(int irq, void *data)
561{ 293{
562 struct enic *enic = data; 294 struct enic *enic = data;
563 unsigned int cq = enic_cq_wq(enic, 0); 295 unsigned int cq;
564 unsigned int intr = enic_msix_wq_intr(enic, 0); 296 unsigned int intr;
565 unsigned int wq_work_to_do = -1; /* no limit */ 297 unsigned int wq_work_to_do = -1; /* no limit */
566 unsigned int wq_work_done; 298 unsigned int wq_work_done;
299 unsigned int wq_irq;
300
301 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
302 cq = enic_cq_wq(enic, wq_irq);
303 intr = enic_msix_wq_intr(enic, wq_irq);
567 304
568 wq_work_done = vnic_cq_service(&enic->cq[cq], 305 wq_work_done = vnic_cq_service(&enic->cq[cq],
569 wq_work_to_do, enic_wq_service, NULL); 306 wq_work_to_do, enic_wq_service, NULL);
@@ -779,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
779 struct net_device *netdev) 516 struct net_device *netdev)
780{ 517{
781 struct enic *enic = netdev_priv(netdev); 518 struct enic *enic = netdev_priv(netdev);
782 struct vnic_wq *wq = &enic->wq[0]; 519 struct vnic_wq *wq;
783 unsigned long flags; 520 unsigned long flags;
521 unsigned int txq_map;
784 522
785 if (skb->len <= 0) { 523 if (skb->len <= 0) {
786 dev_kfree_skb(skb); 524 dev_kfree_skb(skb);
787 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
788 } 526 }
789 527
528 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
529 wq = &enic->wq[txq_map];
530
790 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 531 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
791 * which is very likely. In the off chance it's going to take 532 * which is very likely. In the off chance it's going to take
792 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 533 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -799,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
799 return NETDEV_TX_OK; 540 return NETDEV_TX_OK;
800 } 541 }
801 542
802 spin_lock_irqsave(&enic->wq_lock[0], flags); 543 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
803 544
804 if (vnic_wq_desc_avail(wq) < 545 if (vnic_wq_desc_avail(wq) <
805 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 546 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
806 netif_stop_queue(netdev); 547 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
807 /* This is a hard error, log it */ 548 /* This is a hard error, log it */
808 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 549 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
809 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 550 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
810 return NETDEV_TX_BUSY; 551 return NETDEV_TX_BUSY;
811 } 552 }
812 553
813 enic_queue_wq_skb(enic, wq, skb); 554 enic_queue_wq_skb(enic, wq, skb);
814 555
815 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 556 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
816 netif_stop_queue(netdev); 557 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
817 558
818 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 559 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
819 560
820 return NETDEV_TX_OK; 561 return NETDEV_TX_OK;
821} 562}
@@ -1293,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1293 1034
1294 skb_put(skb, bytes_written); 1035 skb_put(skb, bytes_written);
1295 skb->protocol = eth_type_trans(skb, netdev); 1036 skb->protocol = eth_type_trans(skb, netdev);
1037 skb_record_rx_queue(skb, q_number);
1038 if (netdev->features & NETIF_F_RXHASH) {
1039 skb->rxhash = rss_hash;
1040 if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1041 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1042 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
1043 skb->l4_rxhash = true;
1044 }
1296 1045
1297 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { 1046 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1298 skb->csum = htons(checksum); 1047 skb->csum = htons(checksum);
@@ -1637,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
1637 1386
1638 enic_set_rx_mode(netdev); 1387 enic_set_rx_mode(netdev);
1639 1388
1640 netif_wake_queue(netdev); 1389 netif_tx_wake_all_queues(netdev);
1641 1390
1642 for (i = 0; i < enic->rq_count; i++) 1391 for (i = 0; i < enic->rq_count; i++)
1643 napi_enable(&enic->napi[i]); 1392 napi_enable(&enic->napi[i]);
@@ -2001,6 +1750,7 @@ static void enic_reset(struct work_struct *work)
2001 1750
2002 rtnl_lock(); 1751 rtnl_lock();
2003 1752
1753 spin_lock(&enic->enic_api_lock);
2004 enic_dev_hang_notify(enic); 1754 enic_dev_hang_notify(enic);
2005 enic_stop(enic->netdev); 1755 enic_stop(enic->netdev);
2006 enic_dev_hang_reset(enic); 1756 enic_dev_hang_reset(enic);
@@ -2009,6 +1759,8 @@ static void enic_reset(struct work_struct *work)
2009 enic_set_rss_nic_cfg(enic); 1759 enic_set_rss_nic_cfg(enic);
2010 enic_dev_set_ig_vlan_rewrite_mode(enic); 1760 enic_dev_set_ig_vlan_rewrite_mode(enic);
2011 enic_open(enic->netdev); 1761 enic_open(enic->netdev);
1762 spin_unlock(&enic->enic_api_lock);
1763 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2012 1764
2013 rtnl_unlock(); 1765 rtnl_unlock();
2014} 1766}
@@ -2297,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2297 * instance data is initialized to zero. 2049 * instance data is initialized to zero.
2298 */ 2050 */
2299 2051
2300 netdev = alloc_etherdev(sizeof(struct enic)); 2052 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2053 ENIC_RQ_MAX, ENIC_WQ_MAX);
2301 if (!netdev) 2054 if (!netdev)
2302 return -ENOMEM; 2055 return -ENOMEM;
2303 2056
@@ -2327,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2327 pci_set_master(pdev); 2080 pci_set_master(pdev);
2328 2081
2329 /* Query PCI controller on system for DMA addressing 2082 /* Query PCI controller on system for DMA addressing
2330 * limitation for the device. Try 40-bit first, and 2083 * limitation for the device. Try 64-bit first, and
2331 * fail to 32-bit. 2084 * fail to 32-bit.
2332 */ 2085 */
2333 2086
2334 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 2087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2335 if (err) { 2088 if (err) {
2336 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2089 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2337 if (err) { 2090 if (err) {
@@ -2345,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2345 goto err_out_release_regions; 2098 goto err_out_release_regions;
2346 } 2099 }
2347 } else { 2100 } else {
2348 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 2101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2349 if (err) { 2102 if (err) {
2350 dev_err(dev, "Unable to obtain %u-bit DMA " 2103 dev_err(dev, "Unable to obtain %u-bit DMA "
2351 "for consistent allocations, aborting\n", 40); 2104 "for consistent allocations, aborting\n", 64);
2352 goto err_out_release_regions; 2105 goto err_out_release_regions;
2353 } 2106 }
2354 using_dac = 1; 2107 using_dac = 1;
@@ -2421,6 +2174,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2421 */ 2174 */
2422 2175
2423 spin_lock_init(&enic->devcmd_lock); 2176 spin_lock_init(&enic->devcmd_lock);
2177 spin_lock_init(&enic->enic_api_lock);
2424 2178
2425 /* 2179 /*
2426 * Set ingress vlan rewrite mode before vnic initialization 2180 * Set ingress vlan rewrite mode before vnic initialization
@@ -2462,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2462 goto err_out_dev_close; 2216 goto err_out_dev_close;
2463 } 2217 }
2464 2218
2219 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2220 netif_set_real_num_rx_queues(netdev, enic->rq_count);
2221
2465 /* Setup notification timer, HW reset task, and wq locks 2222 /* Setup notification timer, HW reset task, and wq locks
2466 */ 2223 */
2467 2224
@@ -2496,7 +2253,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2496 netdev->netdev_ops = &enic_netdev_ops; 2253 netdev->netdev_ops = &enic_netdev_ops;
2497 2254
2498 netdev->watchdog_timeo = 2 * HZ; 2255 netdev->watchdog_timeo = 2 * HZ;
2499 netdev->ethtool_ops = &enic_ethtool_ops; 2256 enic_set_ethtool_ops(netdev);
2500 2257
2501 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2258 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2502 if (ENIC_SETTING(enic, LOOP)) { 2259 if (ENIC_SETTING(enic, LOOP)) {
@@ -2510,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2510 if (ENIC_SETTING(enic, TSO)) 2267 if (ENIC_SETTING(enic, TSO))
2511 netdev->hw_features |= NETIF_F_TSO | 2268 netdev->hw_features |= NETIF_F_TSO |
2512 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2269 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2270 if (ENIC_SETTING(enic, RSS))
2271 netdev->hw_features |= NETIF_F_RXHASH;
2513 if (ENIC_SETTING(enic, RXCSUM)) 2272 if (ENIC_SETTING(enic, RXCSUM))
2514 netdev->hw_features |= NETIF_F_RXCSUM; 2273 netdev->hw_features |= NETIF_F_RXCSUM;
2515 2274
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 25be2734c3fe..69f60afd6577 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
47 int offload_mode, int cq_entry, int sop, int eop, int loopback) 47 int offload_mode, int cq_entry, int sop, int eop, int loopback)
48{ 48{
49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
50 u8 desc_skip_cnt = 1;
51 u8 compressed_send = 0;
52 u64 wrid = 0;
50 53
51 wq_enet_desc_enc(desc, 54 wq_enet_desc_enc(desc,
52 (u64)dma_addr | VNIC_PADDR_TARGET, 55 (u64)dma_addr | VNIC_PADDR_TARGET,
@@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
59 (u16)vlan_tag, 62 (u16)vlan_tag,
60 (u8)loopback); 63 (u8)loopback);
61 64
62 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
66 (u8)cq_entry, compressed_send, wrid);
63} 67}
64 68
65static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, 69static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
@@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
120 dma_addr_t dma_addr, unsigned int len) 124 dma_addr_t dma_addr, unsigned int len)
121{ 125{
122 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); 126 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
127 u64 wrid = 0;
123 u8 type = os_buf_index ? 128 u8 type = os_buf_index ?
124 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; 129 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
125 130
@@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
127 (u64)dma_addr | VNIC_PADDR_TARGET, 132 (u64)dma_addr | VNIC_PADDR_TARGET,
128 type, (u16)len); 133 type, (u16)len);
129 134
130 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len); 135 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
131} 136}
132 137
133struct enic; 138struct enic;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 97455c573db5..69dd92598b7e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
175{ 175{
176 return vdev->res[type].count; 176 return vdev->res[type].count;
177} 177}
178EXPORT_SYMBOL(vnic_dev_get_res_count);
178 179
179void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 180void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
180 unsigned int index) 181 unsigned int index)
@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
193 return (char __iomem *)vdev->res[type].vaddr; 194 return (char __iomem *)vdev->res[type].vaddr;
194 } 195 }
195} 196}
197EXPORT_SYMBOL(vnic_dev_get_res);
196 198
197static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 199static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
198 unsigned int desc_count, unsigned int desc_size) 200 unsigned int desc_count, unsigned int desc_size)
@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
942 kfree(vdev); 944 kfree(vdev);
943 } 945 }
944} 946}
947EXPORT_SYMBOL(vnic_dev_unregister);
945 948
946struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 949struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
947 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 950 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
@@ -969,6 +972,13 @@ err_out:
969 vnic_dev_unregister(vdev); 972 vnic_dev_unregister(vdev);
970 return NULL; 973 return NULL;
971} 974}
975EXPORT_SYMBOL(vnic_dev_register);
976
977struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
978{
979 return vdev->pdev;
980}
981EXPORT_SYMBOL(vnic_dev_get_pdev);
972 982
973int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) 983int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
974{ 984{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index f3d9b79ba77e..e670029862a1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
127struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 127struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
128 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 128 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
129 unsigned int num_bars); 129 unsigned int num_bars);
130struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
130int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); 131int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
131int vnic_dev_enable2(struct vnic_dev *vdev, int active); 132int vnic_dev_enable2(struct vnic_dev *vdev, int active);
132int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); 133int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 23d555255cf8..b9a0d78fd639 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -281,11 +281,25 @@ enum vnic_devcmd_cmd {
281 * 0 if no VIF-CONFIG-INFO TLV was ever received. */ 281 * 0 if no VIF-CONFIG-INFO TLV was ever received. */
282 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), 282 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
283 283
284 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
285 * (u32)a1=INT13_CMD_xxx
286 */
287 CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
288
289 /* Set default vlan:
290 * in: (u16)a0=new default vlan
291 * (u16)a1=zero for overriding vlan with param a0,
292 * non-zero for resetting vlan to the default
293 * out: (u16)a0=old default vlan
294 */
295 CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
296
284 /* init_prov_info2: 297 /* init_prov_info2:
285 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable 298 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
286 * the vnic until CMD_ENABLE2 is issued. 299 * the vnic until CMD_ENABLE2 is issued.
287 * (u64)a0=paddr of vnic_devcmd_provinfo 300 * (u64)a0=paddr of vnic_devcmd_provinfo
288 * (u32)a1=sizeof provision info */ 301 * (u32)a1=sizeof provision info
302 */
289 CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), 303 CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
290 304
291 /* enable2: 305 /* enable2:
@@ -339,16 +353,57 @@ enum vnic_devcmd_cmd {
339 CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), 353 CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
340 354
341 /* 355 /*
342 * cmd_set_mac_addr 356 * Set the predefined mac address as default
343 * set mac address
344 * in: 357 * in:
345 * (u48)a0 = mac addr 358 * (u48)a0 = mac addr
346 *
347 */ 359 */
348 CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), 360 CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
361
362 /* Update the provisioning info of the given VIF
363 * (u64)a0=paddr of vnic_devcmd_provinfo
364 * (u32)a1=sizeof provision info
365 */
366 CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
367
368 /* Add a filter.
369 * in: (u64) a0= filter address
370 * (u32) a1= size of filter
371 * out: (u32) a0=filter identifier
372 */
373 CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
374
375 /* Delete a filter.
376 * in: (u32) a0=filter identifier
377 */
378 CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
379
380 /* Enable a Queue Pair in User space NIC
381 * in: (u32) a0=Queue Pair number
382 * (u32) a1= command
383 */
384 CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
385
386 /* Disable a Queue Pair in User space NIC
387 * in: (u32) a0=Queue Pair number
388 * (u32) a1= command
389 */
390 CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
391
392 /* Stats dump Queue Pair in User space NIC
393 * in: (u32) a0=Queue Pair number
394 * (u64) a1=host buffer addr for status dump
395 * (u32) a2=length of the buffer
396 */
397 CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
398
399 /* Clear stats for Queue Pair in User space NIC
400 * in: (u32) a0=Queue Pair number
401 */
402 CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
349}; 403};
350 404
351/* CMD_ENABLE2 flags */ 405/* CMD_ENABLE2 flags */
406#define CMD_ENABLE2_STANDBY 0x0
352#define CMD_ENABLE2_ACTIVE 0x1 407#define CMD_ENABLE2_ACTIVE 0x1
353 408
354/* flags for CMD_OPEN */ 409/* flags for CMD_OPEN */
@@ -364,6 +419,9 @@ enum vnic_devcmd_cmd {
364#define CMD_PFILTER_PROMISCUOUS 0x08 419#define CMD_PFILTER_PROMISCUOUS 0x08
365#define CMD_PFILTER_ALL_MULTICAST 0x10 420#define CMD_PFILTER_ALL_MULTICAST 0x10
366 421
422/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
423#define CMD_QP_RQWQ 0x0
424
367/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ 425/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
368#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 426#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
369#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 427#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
@@ -390,6 +448,7 @@ enum vnic_devcmd_error {
390 ERR_EMAXRES = 10, 448 ERR_EMAXRES = 10,
391 ERR_ENOTSUPPORTED = 11, 449 ERR_ENOTSUPPORTED = 11,
392 ERR_EINPROGRESS = 12, 450 ERR_EINPROGRESS = 12,
451 ERR_MAX
393}; 452};
394 453
395/* 454/*
@@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo {
435 u8 data[0]; 494 u8 data[0];
436}; 495};
437 496
497/* These are used in flags field of different filters to denote
498 * valid fields used.
499 */
500#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
501
502#define FILTER_FIELDS_USNIC ( \
503 FILTER_FIELD_VALID(1) | \
504 FILTER_FIELD_VALID(2) | \
505 FILTER_FIELD_VALID(3) | \
506 FILTER_FIELD_VALID(4))
507
508#define FILTER_FIELDS_IPV4_5TUPLE ( \
509 FILTER_FIELD_VALID(1) | \
510 FILTER_FIELD_VALID(2) | \
511 FILTER_FIELD_VALID(3) | \
512 FILTER_FIELD_VALID(4) | \
513 FILTER_FIELD_VALID(5))
514
515#define FILTER_FIELDS_MAC_VLAN ( \
516 FILTER_FIELD_VALID(1) | \
517 FILTER_FIELD_VALID(2))
518
519#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
520#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
521#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
522#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
523
524struct filter_usnic_id {
525 u32 flags;
526 u16 vlan;
527 u16 ethtype;
528 u8 proto_version;
529 u32 usnic_id;
530} __packed;
531
532#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
533#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
534#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
535#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
536#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
537
538/* Enums for the protocol field. */
539enum protocol_e {
540 PROTO_UDP = 0,
541 PROTO_TCP = 1,
542};
543
544struct filter_ipv4_5tuple {
545 u32 flags;
546 u32 protocol;
547 u32 src_addr;
548 u32 dst_addr;
549 u16 src_port;
550 u16 dst_port;
551} __packed;
552
553#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
554#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
555
556struct filter_mac_vlan {
557 u32 flags;
558 u16 vlan;
559 u8 mac_addr[6];
560} __packed;
561
562/* Specifies the filter_action type. */
563enum {
564 FILTER_ACTION_RQ_STEERING = 0,
565 FILTER_ACTION_MAX
566};
567
568struct filter_action {
569 u32 type;
570 union {
571 u32 rq_idx;
572 } u;
573} __packed;
574
575/* Specifies the filter type. */
576enum filter_type {
577 FILTER_USNIC_ID = 0,
578 FILTER_IPV4_5TUPLE = 1,
579 FILTER_MAC_VLAN = 2,
580 FILTER_MAX
581};
582
583struct filter {
584 u32 type;
585 union {
586 struct filter_usnic_id usnic;
587 struct filter_ipv4_5tuple ipv4;
588 struct filter_mac_vlan mac_vlan;
589 } u;
590} __packed;
591
592enum {
593 CLSF_TLV_FILTER = 0,
594 CLSF_TLV_ACTION = 1,
595};
596
597/* Maximum size of buffer to CMD_ADD_FILTER */
598#define FILTER_MAX_BUF_SIZE 100
599
600struct filter_tlv {
601 u_int32_t type;
602 u_int32_t length;
603 u_int32_t val[0];
604};
605
438/* 606/*
439 * Writing cmd register causes STAT_BUSY to get set in status register. 607 * Writing cmd register causes STAT_BUSY to get set in status register.
440 * When cmd completes, STAT_BUSY will be cleared. 608 * When cmd completes, STAT_BUSY will be cleared.
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 7e1488fc8ab2..36a2ed606c91 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -30,12 +30,9 @@
30static int vnic_rq_alloc_bufs(struct vnic_rq *rq) 30static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
31{ 31{
32 struct vnic_rq_buf *buf; 32 struct vnic_rq_buf *buf;
33 struct vnic_dev *vdev;
34 unsigned int i, j, count = rq->ring.desc_count; 33 unsigned int i, j, count = rq->ring.desc_count;
35 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); 34 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
36 35
37 vdev = rq->vdev;
38
39 for (i = 0; i < blks; i++) { 36 for (i = 0; i < blks; i++) {
40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); 37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!rq->bufs[i]) 38 if (!rq->bufs[i])
@@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
141 unsigned int error_interrupt_enable, 138 unsigned int error_interrupt_enable,
142 unsigned int error_interrupt_offset) 139 unsigned int error_interrupt_offset)
143{ 140{
144 u32 fetch_index; 141 u32 fetch_index = 0;
145 142
146 /* Use current fetch_index as the ring starting point */ 143 /* Use current fetch_index as the ring starting point */
147 fetch_index = ioread32(&rq->ctrl->fetch_index); 144 fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 2056586f4d4b..ee7bc95af278 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -72,6 +72,7 @@ struct vnic_rq_buf {
72 unsigned int len; 72 unsigned int len;
73 unsigned int index; 73 unsigned int index;
74 void *desc; 74 void *desc;
75 uint64_t wr_id;
75}; 76};
76 77
77struct vnic_rq { 78struct vnic_rq {
@@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
110 111
111static inline void vnic_rq_post(struct vnic_rq *rq, 112static inline void vnic_rq_post(struct vnic_rq *rq,
112 void *os_buf, unsigned int os_buf_index, 113 void *os_buf, unsigned int os_buf_index,
113 dma_addr_t dma_addr, unsigned int len) 114 dma_addr_t dma_addr, unsigned int len,
115 uint64_t wrid)
114{ 116{
115 struct vnic_rq_buf *buf = rq->to_use; 117 struct vnic_rq_buf *buf = rq->to_use;
116 118
@@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
118 buf->os_buf_index = os_buf_index; 120 buf->os_buf_index = os_buf_index;
119 buf->dma_addr = dma_addr; 121 buf->dma_addr = dma_addr;
120 buf->len = len; 122 buf->len = len;
123 buf->wr_id = wrid;
121 124
122 buf = buf->next; 125 buf = buf->next;
123 rq->to_use = buf; 126 rq->to_use = buf;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 5e0d7a2be9bc..3e6b8d54dafc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -30,12 +30,9 @@
30static int vnic_wq_alloc_bufs(struct vnic_wq *wq) 30static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
31{ 31{
32 struct vnic_wq_buf *buf; 32 struct vnic_wq_buf *buf;
33 struct vnic_dev *vdev;
34 unsigned int i, j, count = wq->ring.desc_count; 33 unsigned int i, j, count = wq->ring.desc_count;
35 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); 34 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
36 35
37 vdev = wq->vdev;
38
39 for (i = 0; i < blks; i++) { 36 for (i = 0; i < blks; i++) {
40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!wq->bufs[i]) 38 if (!wq->bufs[i])
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 7dd937ac11c2..2c6c70804a39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -58,6 +58,10 @@ struct vnic_wq_buf {
58 unsigned int index; 58 unsigned int index;
59 int sop; 59 int sop;
60 void *desc; 60 void *desc;
61 uint64_t wr_id; /* Cookie */
62 uint8_t cq_entry; /* Gets completion event from hw */
63 uint8_t desc_skip_cnt; /* Num descs to occupy */
64 uint8_t compressed_send; /* Both hdr and payload in one desc */
61}; 65};
62 66
63/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ 67/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
@@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
102 106
103static inline void vnic_wq_post(struct vnic_wq *wq, 107static inline void vnic_wq_post(struct vnic_wq *wq,
104 void *os_buf, dma_addr_t dma_addr, 108 void *os_buf, dma_addr_t dma_addr,
105 unsigned int len, int sop, int eop) 109 unsigned int len, int sop, int eop,
110 uint8_t desc_skip_cnt, uint8_t cq_entry,
111 uint8_t compressed_send, uint64_t wrid)
106{ 112{
107 struct vnic_wq_buf *buf = wq->to_use; 113 struct vnic_wq_buf *buf = wq->to_use;
108 114
109 buf->sop = sop; 115 buf->sop = sop;
116 buf->cq_entry = cq_entry;
117 buf->compressed_send = compressed_send;
118 buf->desc_skip_cnt = desc_skip_cnt;
110 buf->os_buf = eop ? os_buf : NULL; 119 buf->os_buf = eop ? os_buf : NULL;
111 buf->dma_addr = dma_addr; 120 buf->dma_addr = dma_addr;
112 buf->len = len; 121 buf->len = len;
122 buf->wr_id = wrid;
113 123
114 buf = buf->next; 124 buf = buf->next;
115 if (eop) { 125 if (eop) {
@@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
123 } 133 }
124 wq->to_use = buf; 134 wq->to_use = buf;
125 135
126 wq->ring.desc_avail--; 136 wq->ring.desc_avail -= desc_skip_cnt;
127} 137}
128 138
129static inline void vnic_wq_service(struct vnic_wq *wq, 139static inline void vnic_wq_service(struct vnic_wq *wq,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a13b312b50f2..5f5896e522d2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1384,7 +1384,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1384static int 1384static int
1385dm9000_probe(struct platform_device *pdev) 1385dm9000_probe(struct platform_device *pdev)
1386{ 1386{
1387 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 1387 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1388 struct board_info *db; /* Point a board information structure */ 1388 struct board_info *db; /* Point a board information structure */
1389 struct net_device *ndev; 1389 struct net_device *ndev;
1390 const unsigned char *mac_src; 1390 const unsigned char *mac_src;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 4c830030fb06..2db6c573cec7 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2319,7 +2319,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
2319 struct net_device *dev; 2319 struct net_device *dev;
2320 u_long iobase; 2320 u_long iobase;
2321 2321
2322 dev = dev_get_drvdata(&pdev->dev); 2322 dev = pci_get_drvdata(pdev);
2323 iobase = dev->base_addr; 2323 iobase = dev->base_addr;
2324 2324
2325 unregister_netdev (dev); 2325 unregister_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c94152f1c6be..4e8cfa2ac803 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1304{ 1304{
1305 struct tulip_private *tp; 1305 struct tulip_private *tp;
1306 /* See note below on the multiport cards. */ 1306 /* See note below on the multiport cards. */
1307 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; 1307 static unsigned char last_phys_addr[ETH_ALEN] = {
1308 0x00, 'L', 'i', 'n', 'u', 'x'
1309 };
1308 static int last_irq; 1310 static int last_irq;
1309 static int multiport_cnt; /* For four-port boards w/one EEPROM */ 1311 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1310 int i, irq; 1312 int i, irq;
@@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1627 dev->dev_addr[i] = last_phys_addr[i] + 1; 1629 dev->dev_addr[i] = last_phys_addr[i] + 1;
1628#if defined(CONFIG_SPARC) 1630#if defined(CONFIG_SPARC)
1629 addr = of_get_property(dp, "local-mac-address", &len); 1631 addr = of_get_property(dp, "local-mac-address", &len);
1630 if (addr && len == 6) 1632 if (addr && len == ETH_ALEN)
1631 memcpy(dev->dev_addr, addr, 6); 1633 memcpy(dev->dev_addr, addr, ETH_ALEN);
1632#endif 1634#endif
1633#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ 1635#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1634 if (last_irq) 1636 if (last_irq)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 50d9c6315930..bf3bf6f22c99 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
469 } 469 }
470} 470}
471 471
472#ifdef CONFIG_NET_POLL_CONTROLLER
473static void sundance_poll_controller(struct net_device *dev)
474{
475 struct netdev_private *np = netdev_priv(dev);
476
477 disable_irq(np->pci_dev->irq);
478 intr_handler(np->pci_dev->irq, dev);
479 enable_irq(np->pci_dev->irq);
480}
481#endif
482
472static const struct net_device_ops netdev_ops = { 483static const struct net_device_ops netdev_ops = {
473 .ndo_open = netdev_open, 484 .ndo_open = netdev_open,
474 .ndo_stop = netdev_close, 485 .ndo_stop = netdev_close,
@@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = {
480 .ndo_change_mtu = change_mtu, 491 .ndo_change_mtu = change_mtu,
481 .ndo_set_mac_address = sundance_set_mac_addr, 492 .ndo_set_mac_address = sundance_set_mac_addr,
482 .ndo_validate_addr = eth_validate_addr, 493 .ndo_validate_addr = eth_validate_addr,
494#ifdef CONFIG_NET_POLL_CONTROLLER
495 .ndo_poll_controller = sundance_poll_controller,
496#endif
483}; 497};
484 498
485static int sundance_probe1(struct pci_dev *pdev, 499static int sundance_probe1(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c827b1b6b1ce..ace5050dba38 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.6.62.0u" 37#define DRV_VER "4.9.134.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "Emulex BladeEngine2" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "Emulex BladeEngine3" 40#define BE3_NAME "Emulex BladeEngine3"
@@ -99,14 +99,18 @@ static inline char *nic_name(struct pci_dev *pdev)
99#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ 99#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
100#define MCC_CQ_LEN 256 100#define MCC_CQ_LEN 256
101 101
102#define BE3_MAX_RSS_QS 8
103#define BE2_MAX_RSS_QS 4 102#define BE2_MAX_RSS_QS 4
104#define MAX_RSS_QS BE3_MAX_RSS_QS 103#define BE3_MAX_RSS_QS 16
105#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ 104#define BE3_MAX_TX_QS 16
105#define BE3_MAX_EVT_QS 16
106
107#define MAX_RX_QS 32
108#define MAX_EVT_QS 32
109#define MAX_TX_QS 32
106 110
107#define MAX_TX_QS 8
108#define MAX_ROCE_EQS 5 111#define MAX_ROCE_EQS 5
109#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */ 112#define MAX_MSIX_VECTORS 32
113#define MIN_MSIX_VECTORS 1
110#define BE_TX_BUDGET 256 114#define BE_TX_BUDGET 256
111#define BE_NAPI_WEIGHT 64 115#define BE_NAPI_WEIGHT 64
112#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 116#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -189,6 +193,7 @@ struct be_eq_obj {
189 u32 cur_eqd; /* in usecs */ 193 u32 cur_eqd; /* in usecs */
190 194
191 u8 idx; /* array index */ 195 u8 idx; /* array index */
196 u8 msix_idx;
192 u16 tx_budget; 197 u16 tx_budget;
193 u16 spurious_intr; 198 u16 spurious_intr;
194 struct napi_struct napi; 199 struct napi_struct napi;
@@ -352,6 +357,18 @@ struct phy_info {
352 u32 supported; 357 u32 supported;
353}; 358};
354 359
360struct be_resources {
361 u16 max_vfs; /* Total VFs "really" supported by FW/HW */
362 u16 max_mcast_mac;
363 u16 max_tx_qs;
364 u16 max_rss_qs;
365 u16 max_rx_qs;
366 u16 max_uc_mac; /* Max UC MACs programmable */
367 u16 max_vlans; /* Number of vlans supported */
368 u16 max_evt_qs;
369 u32 if_cap_flags;
370};
371
355struct be_adapter { 372struct be_adapter {
356 struct pci_dev *pdev; 373 struct pci_dev *pdev;
357 struct net_device *netdev; 374 struct net_device *netdev;
@@ -369,18 +386,19 @@ struct be_adapter {
369 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 386 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
370 spinlock_t mcc_cq_lock; 387 spinlock_t mcc_cq_lock;
371 388
372 u32 num_msix_vec; 389 u16 cfg_num_qs; /* configured via set-channels */
373 u32 num_evt_qs; 390 u16 num_evt_qs;
374 struct be_eq_obj eq_obj[MAX_MSIX_VECTORS]; 391 u16 num_msix_vec;
392 struct be_eq_obj eq_obj[MAX_EVT_QS];
375 struct msix_entry msix_entries[MAX_MSIX_VECTORS]; 393 struct msix_entry msix_entries[MAX_MSIX_VECTORS];
376 bool isr_registered; 394 bool isr_registered;
377 395
378 /* TX Rings */ 396 /* TX Rings */
379 u32 num_tx_qs; 397 u16 num_tx_qs;
380 struct be_tx_obj tx_obj[MAX_TX_QS]; 398 struct be_tx_obj tx_obj[MAX_TX_QS];
381 399
382 /* Rx rings */ 400 /* Rx rings */
383 u32 num_rx_qs; 401 u16 num_rx_qs;
384 struct be_rx_obj rx_obj[MAX_RX_QS]; 402 struct be_rx_obj rx_obj[MAX_RX_QS];
385 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 403 u32 big_page_size; /* Compounded page size shared by rx wrbs */
386 404
@@ -430,8 +448,8 @@ struct be_adapter {
430 u32 flash_status; 448 u32 flash_status;
431 struct completion flash_compl; 449 struct completion flash_compl;
432 450
433 u32 num_vfs; /* Number of VFs provisioned by PF driver */ 451 struct be_resources res; /* resources available for the func */
434 u32 dev_num_vfs; /* Number of VFs supported by HW */ 452 u16 num_vfs; /* Number of VFs provisioned by PF */
435 u8 virtfn; 453 u8 virtfn;
436 struct be_vf_cfg *vf_cfg; 454 struct be_vf_cfg *vf_cfg;
437 bool be3_native; 455 bool be3_native;
@@ -446,21 +464,13 @@ struct be_adapter {
446 u16 qnq_vid; 464 u16 qnq_vid;
447 u32 msg_enable; 465 u32 msg_enable;
448 int be_get_temp_freq; 466 int be_get_temp_freq;
449 u16 max_mcast_mac;
450 u16 max_tx_queues;
451 u16 max_rss_queues;
452 u16 max_rx_queues;
453 u16 max_pmac_cnt;
454 u16 max_vlans;
455 u16 max_event_queues;
456 u32 if_cap_flags;
457 u8 pf_number; 467 u8 pf_number;
458 u64 rss_flags; 468 u64 rss_flags;
459}; 469};
460 470
461#define be_physfn(adapter) (!adapter->virtfn) 471#define be_physfn(adapter) (!adapter->virtfn)
462#define sriov_enabled(adapter) (adapter->num_vfs > 0) 472#define sriov_enabled(adapter) (adapter->num_vfs > 0)
463#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \ 473#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
464 be_physfn(adapter)) 474 be_physfn(adapter))
465#define for_all_vfs(adapter, vf_cfg, i) \ 475#define for_all_vfs(adapter, vf_cfg, i) \
466 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ 476 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
@@ -469,6 +479,26 @@ struct be_adapter {
469#define ON 1 479#define ON 1
470#define OFF 0 480#define OFF 0
471 481
482#define be_max_vlans(adapter) (adapter->res.max_vlans)
483#define be_max_uc(adapter) (adapter->res.max_uc_mac)
484#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
485#define be_max_vfs(adapter) (adapter->res.max_vfs)
486#define be_max_rss(adapter) (adapter->res.max_rss_qs)
487#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
488#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
489#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
490#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
491#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
492
493static inline u16 be_max_qs(struct be_adapter *adapter)
494{
495 /* If no RSS, need atleast the one def RXQ */
496 u16 num = max_t(u16, be_max_rss(adapter), 1);
497
498 num = min(num, be_max_eqs(adapter));
499 return min_t(u16, num, num_online_cpus());
500}
501
472#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ 502#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
473 adapter->pdev->device == OC_DEVICE_ID4) 503 adapter->pdev->device == OC_DEVICE_ID4)
474 504
@@ -672,6 +702,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func);
672extern bool be_is_wol_supported(struct be_adapter *adapter); 702extern bool be_is_wol_supported(struct be_adapter *adapter);
673extern bool be_pause_supported(struct be_adapter *adapter); 703extern bool be_pause_supported(struct be_adapter *adapter);
674extern u32 be_get_fw_log_level(struct be_adapter *adapter); 704extern u32 be_get_fw_log_level(struct be_adapter *adapter);
705int be_update_queues(struct be_adapter *adapter);
706int be_poll(struct napi_struct *napi, int budget);
675 707
676/* 708/*
677 * internal function to initialize-cleanup roce device. 709 * internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8ec5d74ad44d..1ab5dab11eff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -258,7 +258,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
258 (struct be_async_event_grp5_pvid_state *)evt); 258 (struct be_async_event_grp5_pvid_state *)evt);
259 break; 259 break;
260 default: 260 default:
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
262 event_type);
262 break; 263 break;
263 } 264 }
264} 265}
@@ -279,7 +280,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
279 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 280 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
280 break; 281 break;
281 default: 282 default:
282 dev_warn(&adapter->pdev->dev, "Unknown debug event\n"); 283 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
284 event_type);
283 break; 285 break;
284 } 286 }
285} 287}
@@ -631,6 +633,12 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
631 return &wrb->payload.sgl[0]; 633 return &wrb->payload.sgl[0];
632} 634}
633 635
636static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
637 unsigned long addr)
638{
639 wrb->tag0 = addr & 0xFFFFFFFF;
640 wrb->tag1 = upper_32_bits(addr);
641}
634 642
635/* Don't touch the hdr after it's prepared */ 643/* Don't touch the hdr after it's prepared */
636/* mem will be NULL for embedded commands */ 644/* mem will be NULL for embedded commands */
@@ -639,17 +647,12 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
639 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 647 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
640{ 648{
641 struct be_sge *sge; 649 struct be_sge *sge;
642 unsigned long addr = (unsigned long)req_hdr;
643 u64 req_addr = addr;
644 650
645 req_hdr->opcode = opcode; 651 req_hdr->opcode = opcode;
646 req_hdr->subsystem = subsystem; 652 req_hdr->subsystem = subsystem;
647 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 653 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
648 req_hdr->version = 0; 654 req_hdr->version = 0;
649 655 fill_wrb_tags(wrb, (ulong) req_hdr);
650 wrb->tag0 = req_addr & 0xFFFFFFFF;
651 wrb->tag1 = upper_32_bits(req_addr);
652
653 wrb->payload_length = cmd_len; 656 wrb->payload_length = cmd_len;
654 if (mem) { 657 if (mem) {
655 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 658 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -676,31 +679,6 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
676 } 679 }
677} 680}
678 681
679/* Converts interrupt delay in microseconds to multiplier value */
680static u32 eq_delay_to_mult(u32 usec_delay)
681{
682#define MAX_INTR_RATE 651042
683 const u32 round = 10;
684 u32 multiplier;
685
686 if (usec_delay == 0)
687 multiplier = 0;
688 else {
689 u32 interrupt_rate = 1000000 / usec_delay;
690 /* Max delay, corresponding to the lowest interrupt rate */
691 if (interrupt_rate == 0)
692 multiplier = 1023;
693 else {
694 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
695 multiplier /= interrupt_rate;
696 /* Round the multiplier to the closest value.*/
697 multiplier = (multiplier + round/2) / round;
698 multiplier = min(multiplier, (u32)1023);
699 }
700 }
701 return multiplier;
702}
703
704static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 682static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
705{ 683{
706 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 684 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
@@ -728,6 +706,78 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
728 return wrb; 706 return wrb;
729} 707}
730 708
709static bool use_mcc(struct be_adapter *adapter)
710{
711 return adapter->mcc_obj.q.created;
712}
713
714/* Must be used only in process context */
715static int be_cmd_lock(struct be_adapter *adapter)
716{
717 if (use_mcc(adapter)) {
718 spin_lock_bh(&adapter->mcc_lock);
719 return 0;
720 } else {
721 return mutex_lock_interruptible(&adapter->mbox_lock);
722 }
723}
724
725/* Must be used only in process context */
726static void be_cmd_unlock(struct be_adapter *adapter)
727{
728 if (use_mcc(adapter))
729 spin_unlock_bh(&adapter->mcc_lock);
730 else
731 return mutex_unlock(&adapter->mbox_lock);
732}
733
734static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
735 struct be_mcc_wrb *wrb)
736{
737 struct be_mcc_wrb *dest_wrb;
738
739 if (use_mcc(adapter)) {
740 dest_wrb = wrb_from_mccq(adapter);
741 if (!dest_wrb)
742 return NULL;
743 } else {
744 dest_wrb = wrb_from_mbox(adapter);
745 }
746
747 memcpy(dest_wrb, wrb, sizeof(*wrb));
748 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
749 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
750
751 return dest_wrb;
752}
753
754/* Must be used only in process context */
755static int be_cmd_notify_wait(struct be_adapter *adapter,
756 struct be_mcc_wrb *wrb)
757{
758 struct be_mcc_wrb *dest_wrb;
759 int status;
760
761 status = be_cmd_lock(adapter);
762 if (status)
763 return status;
764
765 dest_wrb = be_cmd_copy(adapter, wrb);
766 if (!dest_wrb)
767 return -EBUSY;
768
769 if (use_mcc(adapter))
770 status = be_mcc_notify_wait(adapter);
771 else
772 status = be_mbox_notify_wait(adapter);
773
774 if (!status)
775 memcpy(wrb, dest_wrb, sizeof(*wrb));
776
777 be_cmd_unlock(adapter);
778 return status;
779}
780
731/* Tell fw we're about to start firing cmds by writing a 781/* Tell fw we're about to start firing cmds by writing a
732 * special pattern across the wrb hdr; uses mbox 782 * special pattern across the wrb hdr; uses mbox
733 */ 783 */
@@ -788,13 +838,12 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
788 return status; 838 return status;
789} 839}
790 840
791int be_cmd_eq_create(struct be_adapter *adapter, 841int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
792 struct be_queue_info *eq, int eq_delay)
793{ 842{
794 struct be_mcc_wrb *wrb; 843 struct be_mcc_wrb *wrb;
795 struct be_cmd_req_eq_create *req; 844 struct be_cmd_req_eq_create *req;
796 struct be_dma_mem *q_mem = &eq->dma_mem; 845 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
797 int status; 846 int status, ver = 0;
798 847
799 if (mutex_lock_interruptible(&adapter->mbox_lock)) 848 if (mutex_lock_interruptible(&adapter->mbox_lock))
800 return -1; 849 return -1;
@@ -805,15 +854,18 @@ int be_cmd_eq_create(struct be_adapter *adapter,
805 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 854 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
806 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 855 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
807 856
857 /* Support for EQ_CREATEv2 available only SH-R onwards */
858 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
859 ver = 2;
860
861 req->hdr.version = ver;
808 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 862 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
809 863
810 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 864 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
811 /* 4byte eqe*/ 865 /* 4byte eqe*/
812 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 866 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
813 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 867 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
814 __ilog2_u32(eq->len/256)); 868 __ilog2_u32(eqo->q.len / 256));
815 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
816 eq_delay_to_mult(eq_delay));
817 be_dws_cpu_to_le(req->context, sizeof(req->context)); 869 be_dws_cpu_to_le(req->context, sizeof(req->context));
818 870
819 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 871 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -821,8 +873,10 @@ int be_cmd_eq_create(struct be_adapter *adapter,
821 status = be_mbox_notify_wait(adapter); 873 status = be_mbox_notify_wait(adapter);
822 if (!status) { 874 if (!status) {
823 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 875 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
824 eq->id = le16_to_cpu(resp->eq_id); 876 eqo->q.id = le16_to_cpu(resp->eq_id);
825 eq->created = true; 877 eqo->msix_idx =
878 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
879 eqo->q.created = true;
826 } 880 }
827 881
828 mutex_unlock(&adapter->mbox_lock); 882 mutex_unlock(&adapter->mbox_lock);
@@ -1010,9 +1064,9 @@ static u32 be_encoded_q_len(int q_len)
1010 return len_encoded; 1064 return len_encoded;
1011} 1065}
1012 1066
1013int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1067static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1014 struct be_queue_info *mccq, 1068 struct be_queue_info *mccq,
1015 struct be_queue_info *cq) 1069 struct be_queue_info *cq)
1016{ 1070{
1017 struct be_mcc_wrb *wrb; 1071 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_mcc_ext_create *req; 1072 struct be_cmd_req_mcc_ext_create *req;
@@ -1068,9 +1122,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1068 return status; 1122 return status;
1069} 1123}
1070 1124
1071int be_cmd_mccq_org_create(struct be_adapter *adapter, 1125static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1072 struct be_queue_info *mccq, 1126 struct be_queue_info *mccq,
1073 struct be_queue_info *cq) 1127 struct be_queue_info *cq)
1074{ 1128{
1075 struct be_mcc_wrb *wrb; 1129 struct be_mcc_wrb *wrb;
1076 struct be_cmd_req_mcc_create *req; 1130 struct be_cmd_req_mcc_create *req;
@@ -1128,25 +1182,16 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
1128 1182
1129int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1183int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{ 1184{
1131 struct be_mcc_wrb *wrb; 1185 struct be_mcc_wrb wrb = {0};
1132 struct be_cmd_req_eth_tx_create *req; 1186 struct be_cmd_req_eth_tx_create *req;
1133 struct be_queue_info *txq = &txo->q; 1187 struct be_queue_info *txq = &txo->q;
1134 struct be_queue_info *cq = &txo->cq; 1188 struct be_queue_info *cq = &txo->cq;
1135 struct be_dma_mem *q_mem = &txq->dma_mem; 1189 struct be_dma_mem *q_mem = &txq->dma_mem;
1136 int status, ver = 0; 1190 int status, ver = 0;
1137 1191
1138 spin_lock_bh(&adapter->mcc_lock); 1192 req = embedded_payload(&wrb);
1139
1140 wrb = wrb_from_mccq(adapter);
1141 if (!wrb) {
1142 status = -EBUSY;
1143 goto err;
1144 }
1145
1146 req = embedded_payload(wrb);
1147
1148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1149 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); 1194 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1150 1195
1151 if (lancer_chip(adapter)) { 1196 if (lancer_chip(adapter)) {
1152 req->hdr.version = 1; 1197 req->hdr.version = 1;
@@ -1164,12 +1209,11 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1164 req->cq_id = cpu_to_le16(cq->id); 1209 req->cq_id = cpu_to_le16(cq->id);
1165 req->queue_size = be_encoded_q_len(txq->len); 1210 req->queue_size = be_encoded_q_len(txq->len);
1166 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1211 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1167
1168 ver = req->hdr.version; 1212 ver = req->hdr.version;
1169 1213
1170 status = be_mcc_notify_wait(adapter); 1214 status = be_cmd_notify_wait(adapter, &wrb);
1171 if (!status) { 1215 if (!status) {
1172 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); 1216 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1173 txq->id = le16_to_cpu(resp->cid); 1217 txq->id = le16_to_cpu(resp->cid);
1174 if (ver == 2) 1218 if (ver == 2)
1175 txo->db_offset = le32_to_cpu(resp->db_offset); 1219 txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1178,9 +1222,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1178 txq->created = true; 1222 txq->created = true;
1179 } 1223 }
1180 1224
1181err:
1182 spin_unlock_bh(&adapter->mcc_lock);
1183
1184 return status; 1225 return status;
1185} 1226}
1186 1227
@@ -1309,40 +1350,32 @@ err:
1309} 1350}
1310 1351
1311/* Create an rx filtering policy configuration on an i/f 1352/* Create an rx filtering policy configuration on an i/f
1312 * Uses MCCQ 1353 * Will use MBOX only if MCCQ has not been created.
1313 */ 1354 */
1314int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1355int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1315 u32 *if_handle, u32 domain) 1356 u32 *if_handle, u32 domain)
1316{ 1357{
1317 struct be_mcc_wrb *wrb; 1358 struct be_mcc_wrb wrb = {0};
1318 struct be_cmd_req_if_create *req; 1359 struct be_cmd_req_if_create *req;
1319 int status; 1360 int status;
1320 1361
1321 spin_lock_bh(&adapter->mcc_lock); 1362 req = embedded_payload(&wrb);
1322
1323 wrb = wrb_from_mccq(adapter);
1324 if (!wrb) {
1325 status = -EBUSY;
1326 goto err;
1327 }
1328 req = embedded_payload(wrb);
1329
1330 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1331 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); 1364 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
1332 req->hdr.domain = domain; 1365 req->hdr.domain = domain;
1333 req->capability_flags = cpu_to_le32(cap_flags); 1366 req->capability_flags = cpu_to_le32(cap_flags);
1334 req->enable_flags = cpu_to_le32(en_flags); 1367 req->enable_flags = cpu_to_le32(en_flags);
1335
1336 req->pmac_invalid = true; 1368 req->pmac_invalid = true;
1337 1369
1338 status = be_mcc_notify_wait(adapter); 1370 status = be_cmd_notify_wait(adapter, &wrb);
1339 if (!status) { 1371 if (!status) {
1340 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1372 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1341 *if_handle = le32_to_cpu(resp->interface_id); 1373 *if_handle = le32_to_cpu(resp->interface_id);
1342 }
1343 1374
1344err: 1375 /* Hack to retrieve VF's pmac-id on BE3 */
1345 spin_unlock_bh(&adapter->mcc_lock); 1376 if (BE3_chip(adapter) && !be_physfn(adapter))
1377 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1378 }
1346 return status; 1379 return status;
1347} 1380}
1348 1381
@@ -1460,6 +1493,12 @@ static int be_mac_to_link_speed(int mac_speed)
1460 return 1000; 1493 return 1000;
1461 case PHY_LINK_SPEED_10GBPS: 1494 case PHY_LINK_SPEED_10GBPS:
1462 return 10000; 1495 return 10000;
1496 case PHY_LINK_SPEED_20GBPS:
1497 return 20000;
1498 case PHY_LINK_SPEED_25GBPS:
1499 return 25000;
1500 case PHY_LINK_SPEED_40GBPS:
1501 return 40000;
1463 } 1502 }
1464 return 0; 1503 return 0;
1465} 1504}
@@ -1520,7 +1559,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1520{ 1559{
1521 struct be_mcc_wrb *wrb; 1560 struct be_mcc_wrb *wrb;
1522 struct be_cmd_req_get_cntl_addnl_attribs *req; 1561 struct be_cmd_req_get_cntl_addnl_attribs *req;
1523 int status; 1562 int status = 0;
1524 1563
1525 spin_lock_bh(&adapter->mcc_lock); 1564 spin_lock_bh(&adapter->mcc_lock);
1526 1565
@@ -1785,8 +1824,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1785 */ 1824 */
1786 req->if_flags_mask |= 1825 req->if_flags_mask |=
1787 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1826 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1788 adapter->if_cap_flags); 1827 be_if_cap_flags(adapter));
1789
1790 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1828 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1791 netdev_for_each_mc_addr(ha, adapter->netdev) 1829 netdev_for_each_mc_addr(ha, adapter->netdev)
1792 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1830 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
@@ -2444,6 +2482,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2444 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2482 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2445 adapter->phy.misc_params = 2483 adapter->phy.misc_params =
2446 le32_to_cpu(resp_phy_info->misc_params); 2484 le32_to_cpu(resp_phy_info->misc_params);
2485
2486 if (BE2_chip(adapter)) {
2487 adapter->phy.fixed_speeds_supported =
2488 BE_SUPPORTED_SPEED_10GBPS |
2489 BE_SUPPORTED_SPEED_1GBPS;
2490 }
2447 } 2491 }
2448 pci_free_consistent(adapter->pdev, cmd.size, 2492 pci_free_consistent(adapter->pdev, cmd.size,
2449 cmd.va, cmd.dma); 2493 cmd.va, cmd.dma);
@@ -2606,9 +2650,44 @@ err:
2606 return status; 2650 return status;
2607} 2651}
2608 2652
2609/* Uses synchronous MCCQ */ 2653/* Set privilege(s) for a function */
2654int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2655 u32 domain)
2656{
2657 struct be_mcc_wrb *wrb;
2658 struct be_cmd_req_set_fn_privileges *req;
2659 int status;
2660
2661 spin_lock_bh(&adapter->mcc_lock);
2662
2663 wrb = wrb_from_mccq(adapter);
2664 if (!wrb) {
2665 status = -EBUSY;
2666 goto err;
2667 }
2668
2669 req = embedded_payload(wrb);
2670 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2671 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2672 wrb, NULL);
2673 req->hdr.domain = domain;
2674 if (lancer_chip(adapter))
2675 req->privileges_lancer = cpu_to_le32(privileges);
2676 else
2677 req->privileges = cpu_to_le32(privileges);
2678
2679 status = be_mcc_notify_wait(adapter);
2680err:
2681 spin_unlock_bh(&adapter->mcc_lock);
2682 return status;
2683}
2684
2685/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2686 * pmac_id_valid: false => pmac_id or MAC address is requested.
2687 * If pmac_id is returned, pmac_id_valid is returned as true
2688 */
2610int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2689int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2611 bool *pmac_id_active, u32 *pmac_id, u8 domain) 2690 bool *pmac_id_valid, u32 *pmac_id, u8 domain)
2612{ 2691{
2613 struct be_mcc_wrb *wrb; 2692 struct be_mcc_wrb *wrb;
2614 struct be_cmd_req_get_mac_list *req; 2693 struct be_cmd_req_get_mac_list *req;
@@ -2644,12 +2723,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2644 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 2723 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2645 req->hdr.domain = domain; 2724 req->hdr.domain = domain;
2646 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 2725 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2647 req->perm_override = 1; 2726 if (*pmac_id_valid) {
2727 req->mac_id = cpu_to_le32(*pmac_id);
2728 req->iface_id = cpu_to_le16(adapter->if_handle);
2729 req->perm_override = 0;
2730 } else {
2731 req->perm_override = 1;
2732 }
2648 2733
2649 status = be_mcc_notify_wait(adapter); 2734 status = be_mcc_notify_wait(adapter);
2650 if (!status) { 2735 if (!status) {
2651 struct be_cmd_resp_get_mac_list *resp = 2736 struct be_cmd_resp_get_mac_list *resp =
2652 get_mac_list_cmd.va; 2737 get_mac_list_cmd.va;
2738
2739 if (*pmac_id_valid) {
2740 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2741 ETH_ALEN);
2742 goto out;
2743 }
2744
2653 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2745 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2654 /* Mac list returned could contain one or more active mac_ids 2746 /* Mac list returned could contain one or more active mac_ids
2655 * or one or more true or pseudo permanant mac addresses. 2747 * or one or more true or pseudo permanant mac addresses.
@@ -2667,14 +2759,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2667 * is 6 bytes 2759 * is 6 bytes
2668 */ 2760 */
2669 if (mac_addr_size == sizeof(u32)) { 2761 if (mac_addr_size == sizeof(u32)) {
2670 *pmac_id_active = true; 2762 *pmac_id_valid = true;
2671 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 2763 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2672 *pmac_id = le32_to_cpu(mac_id); 2764 *pmac_id = le32_to_cpu(mac_id);
2673 goto out; 2765 goto out;
2674 } 2766 }
2675 } 2767 }
2676 /* If no active mac_id found, return first mac addr */ 2768 /* If no active mac_id found, return first mac addr */
2677 *pmac_id_active = false; 2769 *pmac_id_valid = false;
2678 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2770 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2679 ETH_ALEN); 2771 ETH_ALEN);
2680 } 2772 }
@@ -2686,6 +2778,41 @@ out:
2686 return status; 2778 return status;
2687} 2779}
2688 2780
2781int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
2782{
2783 bool active = true;
2784
2785 if (BEx_chip(adapter))
2786 return be_cmd_mac_addr_query(adapter, mac, false,
2787 adapter->if_handle, curr_pmac_id);
2788 else
2789 /* Fetch the MAC address using pmac_id */
2790 return be_cmd_get_mac_from_list(adapter, mac, &active,
2791 &curr_pmac_id, 0);
2792}
2793
2794int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2795{
2796 int status;
2797 bool pmac_valid = false;
2798
2799 memset(mac, 0, ETH_ALEN);
2800
2801 if (BEx_chip(adapter)) {
2802 if (be_physfn(adapter))
2803 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2804 0);
2805 else
2806 status = be_cmd_mac_addr_query(adapter, mac, false,
2807 adapter->if_handle, 0);
2808 } else {
2809 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2810 NULL, 0);
2811 }
2812
2813 return status;
2814}
2815
2689/* Uses synchronous MCCQ */ 2816/* Uses synchronous MCCQ */
2690int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2817int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2691 u8 mac_count, u32 domain) 2818 u8 mac_count, u32 domain)
@@ -2729,8 +2856,27 @@ err:
2729 return status; 2856 return status;
2730} 2857}
2731 2858
2859/* Wrapper to delete any active MACs and provision the new mac.
2860 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2861 * current list are active.
2862 */
2863int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2864{
2865 bool active_mac = false;
2866 u8 old_mac[ETH_ALEN];
2867 u32 pmac_id;
2868 int status;
2869
2870 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
2871 &pmac_id, dom);
2872 if (!status && active_mac)
2873 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2874
2875 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2876}
2877
2732int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 2878int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2733 u32 domain, u16 intf_id) 2879 u32 domain, u16 intf_id, u16 hsw_mode)
2734{ 2880{
2735 struct be_mcc_wrb *wrb; 2881 struct be_mcc_wrb *wrb;
2736 struct be_cmd_req_set_hsw_config *req; 2882 struct be_cmd_req_set_hsw_config *req;
@@ -2757,6 +2903,13 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2757 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 2903 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2758 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 2904 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2759 } 2905 }
2906 if (!BEx_chip(adapter) && hsw_mode) {
2907 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
2908 ctxt, adapter->hba_port_num);
2909 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
2910 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
2911 ctxt, hsw_mode);
2912 }
2760 2913
2761 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2914 be_dws_cpu_to_le(req->context, sizeof(req->context));
2762 status = be_mcc_notify_wait(adapter); 2915 status = be_mcc_notify_wait(adapter);
@@ -2768,7 +2921,7 @@ err:
2768 2921
2769/* Get Hyper switch config */ 2922/* Get Hyper switch config */
2770int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 2923int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2771 u32 domain, u16 intf_id) 2924 u32 domain, u16 intf_id, u8 *mode)
2772{ 2925{
2773 struct be_mcc_wrb *wrb; 2926 struct be_mcc_wrb *wrb;
2774 struct be_cmd_req_get_hsw_config *req; 2927 struct be_cmd_req_get_hsw_config *req;
@@ -2791,9 +2944,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2791 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2944 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2792 2945
2793 req->hdr.domain = domain; 2946 req->hdr.domain = domain;
2794 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, 2947 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2795 intf_id); 2948 ctxt, intf_id);
2796 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 2949 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2950
2951 if (!BEx_chip(adapter)) {
2952 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2953 ctxt, adapter->hba_port_num);
2954 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
2955 }
2797 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2956 be_dws_cpu_to_le(req->context, sizeof(req->context));
2798 2957
2799 status = be_mcc_notify_wait(adapter); 2958 status = be_mcc_notify_wait(adapter);
@@ -2804,7 +2963,11 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2804 sizeof(resp->context)); 2963 sizeof(resp->context));
2805 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 2964 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2806 pvid, &resp->context); 2965 pvid, &resp->context);
2807 *pvid = le16_to_cpu(vid); 2966 if (pvid)
2967 *pvid = le16_to_cpu(vid);
2968 if (mode)
2969 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2970 port_fwd_type, &resp->context);
2808 } 2971 }
2809 2972
2810err: 2973err:
@@ -2967,30 +3130,63 @@ err:
2967 return status; 3130 return status;
2968} 3131}
2969 3132
2970static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 3133static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
2971 u32 max_buf_size)
2972{ 3134{
2973 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf; 3135 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
2974 int i; 3136 int i;
2975 3137
2976 for (i = 0; i < desc_count; i++) { 3138 for (i = 0; i < desc_count; i++) {
2977 desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; 3139 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
2978 if (((void *)desc + desc->desc_len) > 3140 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
2979 (void *)(buf + max_buf_size)) 3141 return (struct be_nic_res_desc *)hdr;
2980 return NULL;
2981 3142
2982 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3143 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
2983 desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) 3144 hdr = (void *)hdr + hdr->desc_len;
2984 return desc;
2985
2986 desc = (void *)desc + desc->desc_len;
2987 } 3145 }
3146 return NULL;
3147}
2988 3148
3149static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3150 u32 desc_count)
3151{
3152 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3153 struct be_pcie_res_desc *pcie;
3154 int i;
3155
3156 for (i = 0; i < desc_count; i++) {
3157 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3158 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3159 pcie = (struct be_pcie_res_desc *)hdr;
3160 if (pcie->pf_num == devfn)
3161 return pcie;
3162 }
3163
3164 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3165 hdr = (void *)hdr + hdr->desc_len;
3166 }
2989 return NULL; 3167 return NULL;
2990} 3168}
2991 3169
3170static void be_copy_nic_desc(struct be_resources *res,
3171 struct be_nic_res_desc *desc)
3172{
3173 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3174 res->max_vlans = le16_to_cpu(desc->vlan_count);
3175 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3176 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3177 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3178 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3179 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3180 /* Clear flags that driver is not interested in */
3181 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3182 BE_IF_CAP_FLAGS_WANT;
3183 /* Need 1 RXQ as the default RXQ */
3184 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3185 res->max_rss_qs -= 1;
3186}
3187
2992/* Uses Mbox */ 3188/* Uses Mbox */
2993int be_cmd_get_func_config(struct be_adapter *adapter) 3189int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
2994{ 3190{
2995 struct be_mcc_wrb *wrb; 3191 struct be_mcc_wrb *wrb;
2996 struct be_cmd_req_get_func_config *req; 3192 struct be_cmd_req_get_func_config *req;
@@ -3029,28 +3225,16 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
3029 if (!status) { 3225 if (!status) {
3030 struct be_cmd_resp_get_func_config *resp = cmd.va; 3226 struct be_cmd_resp_get_func_config *resp = cmd.va;
3031 u32 desc_count = le32_to_cpu(resp->desc_count); 3227 u32 desc_count = le32_to_cpu(resp->desc_count);
3032 struct be_nic_resource_desc *desc; 3228 struct be_nic_res_desc *desc;
3033 3229
3034 desc = be_get_nic_desc(resp->func_param, desc_count, 3230 desc = be_get_nic_desc(resp->func_param, desc_count);
3035 sizeof(resp->func_param));
3036 if (!desc) { 3231 if (!desc) {
3037 status = -EINVAL; 3232 status = -EINVAL;
3038 goto err; 3233 goto err;
3039 } 3234 }
3040 3235
3041 adapter->pf_number = desc->pf_num; 3236 adapter->pf_number = desc->pf_num;
3042 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count); 3237 be_copy_nic_desc(res, desc);
3043 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3044 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3045 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3046 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3047 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3048
3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3051
3052 /* Clear flags that driver is not interested in */
3053 adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
3054 } 3238 }
3055err: 3239err:
3056 mutex_unlock(&adapter->mbox_lock); 3240 mutex_unlock(&adapter->mbox_lock);
@@ -3060,8 +3244,8 @@ err:
3060} 3244}
3061 3245
3062/* Uses mbox */ 3246/* Uses mbox */
3063int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3247static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3064 u8 domain, struct be_dma_mem *cmd) 3248 u8 domain, struct be_dma_mem *cmd)
3065{ 3249{
3066 struct be_mcc_wrb *wrb; 3250 struct be_mcc_wrb *wrb;
3067 struct be_cmd_req_get_profile_config *req; 3251 struct be_cmd_req_get_profile_config *req;
@@ -3088,8 +3272,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3088} 3272}
3089 3273
3090/* Uses sync mcc */ 3274/* Uses sync mcc */
3091int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3275static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3092 u8 domain, struct be_dma_mem *cmd) 3276 u8 domain, struct be_dma_mem *cmd)
3093{ 3277{
3094 struct be_mcc_wrb *wrb; 3278 struct be_mcc_wrb *wrb;
3095 struct be_cmd_req_get_profile_config *req; 3279 struct be_cmd_req_get_profile_config *req;
@@ -3121,54 +3305,51 @@ err:
3121} 3305}
3122 3306
3123/* Uses sync mcc, if MCCQ is already created otherwise mbox */ 3307/* Uses sync mcc, if MCCQ is already created otherwise mbox */
3124int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, 3308int be_cmd_get_profile_config(struct be_adapter *adapter,
3125 u16 *txq_count, u8 domain) 3309 struct be_resources *res, u8 domain)
3126{ 3310{
3311 struct be_cmd_resp_get_profile_config *resp;
3312 struct be_pcie_res_desc *pcie;
3313 struct be_nic_res_desc *nic;
3127 struct be_queue_info *mccq = &adapter->mcc_obj.q; 3314 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3128 struct be_dma_mem cmd; 3315 struct be_dma_mem cmd;
3316 u32 desc_count;
3129 int status; 3317 int status;
3130 3318
3131 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3319 memset(&cmd, 0, sizeof(struct be_dma_mem));
3132 if (!lancer_chip(adapter)) 3320 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3133 cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1); 3321 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3134 else 3322 if (!cmd.va)
3135 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3136 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3137 &cmd.dma);
3138 if (!cmd.va) {
3139 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3140 return -ENOMEM; 3323 return -ENOMEM;
3141 }
3142 3324
3143 if (!mccq->created) 3325 if (!mccq->created)
3144 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd); 3326 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3145 else 3327 else
3146 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd); 3328 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3147 if (!status) { 3329 if (status)
3148 struct be_cmd_resp_get_profile_config *resp = cmd.va; 3330 goto err;
3149 u32 desc_count = le32_to_cpu(resp->desc_count);
3150 struct be_nic_resource_desc *desc;
3151 3331
3152 desc = be_get_nic_desc(resp->func_param, desc_count, 3332 resp = cmd.va;
3153 sizeof(resp->func_param)); 3333 desc_count = le32_to_cpu(resp->desc_count);
3334
3335 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3336 desc_count);
3337 if (pcie)
3338 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3339
3340 nic = be_get_nic_desc(resp->func_param, desc_count);
3341 if (nic)
3342 be_copy_nic_desc(res, nic);
3154 3343
3155 if (!desc) {
3156 status = -EINVAL;
3157 goto err;
3158 }
3159 if (cap_flags)
3160 *cap_flags = le32_to_cpu(desc->cap_flags);
3161 if (txq_count)
3162 *txq_count = le32_to_cpu(desc->txq_count);
3163 }
3164err: 3344err:
3165 if (cmd.va) 3345 if (cmd.va)
3166 pci_free_consistent(adapter->pdev, cmd.size, 3346 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3167 cmd.va, cmd.dma);
3168 return status; 3347 return status;
3169} 3348}
3170 3349
3171/* Uses sync mcc */ 3350/* Currently only Lancer uses this command and it supports version 0 only
3351 * Uses sync mcc
3352 */
3172int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, 3353int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3173 u8 domain) 3354 u8 domain)
3174{ 3355{
@@ -3189,12 +3370,10 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3189 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3370 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3190 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req), 3371 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3191 wrb, NULL); 3372 wrb, NULL);
3192
3193 req->hdr.domain = domain; 3373 req->hdr.domain = domain;
3194 req->desc_count = cpu_to_le32(1); 3374 req->desc_count = cpu_to_le32(1);
3195 3375 req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3196 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3376 req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3197 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3198 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV); 3377 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3199 req->nic_desc.pf_num = adapter->pf_number; 3378 req->nic_desc.pf_num = adapter->pf_number;
3200 req->nic_desc.vf_num = domain; 3379 req->nic_desc.vf_num = domain;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1b3b9e886412..d026226db88c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -202,6 +202,7 @@ struct be_mcc_mailbox {
202#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 202#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
203#define OPCODE_COMMON_GET_PORT_NAME 77 203#define OPCODE_COMMON_GET_PORT_NAME 77
204#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 204#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
205#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
205#define OPCODE_COMMON_GET_PHY_DETAILS 102 206#define OPCODE_COMMON_GET_PHY_DETAILS 102
206#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 207#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
207#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 208#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -306,7 +307,7 @@ struct be_cmd_req_eq_create {
306struct be_cmd_resp_eq_create { 307struct be_cmd_resp_eq_create {
307 struct be_cmd_resp_hdr resp_hdr; 308 struct be_cmd_resp_hdr resp_hdr;
308 u16 eq_id; /* sword */ 309 u16 eq_id; /* sword */
309 u16 rsvd0; /* sword */ 310 u16 msix_idx; /* available only in v2 */
310} __packed; 311} __packed;
311 312
312/******************** Mac query ***************************/ 313/******************** Mac query ***************************/
@@ -965,7 +966,10 @@ enum {
965 PHY_LINK_SPEED_10MBPS = 0x1, 966 PHY_LINK_SPEED_10MBPS = 0x1,
966 PHY_LINK_SPEED_100MBPS = 0x2, 967 PHY_LINK_SPEED_100MBPS = 0x2,
967 PHY_LINK_SPEED_1GBPS = 0x3, 968 PHY_LINK_SPEED_1GBPS = 0x3,
968 PHY_LINK_SPEED_10GBPS = 0x4 969 PHY_LINK_SPEED_10GBPS = 0x4,
970 PHY_LINK_SPEED_20GBPS = 0x5,
971 PHY_LINK_SPEED_25GBPS = 0x6,
972 PHY_LINK_SPEED_40GBPS = 0x7
969}; 973};
970 974
971struct be_cmd_resp_link_status { 975struct be_cmd_resp_link_status {
@@ -1480,6 +1484,11 @@ struct be_cmd_resp_get_fn_privileges {
1480 u32 privilege_mask; 1484 u32 privilege_mask;
1481}; 1485};
1482 1486
1487struct be_cmd_req_set_fn_privileges {
1488 struct be_cmd_req_hdr hdr;
1489 u32 privileges; /* Used by BE3, SH-R */
1490 u32 privileges_lancer; /* Used by Lancer */
1491};
1483 1492
1484/******************** GET/SET_MACLIST **************************/ 1493/******************** GET/SET_MACLIST **************************/
1485#define BE_MAX_MAC 64 1494#define BE_MAX_MAC 64
@@ -1524,12 +1533,17 @@ struct be_cmd_req_set_mac_list {
1524} __packed; 1533} __packed;
1525 1534
1526/*********************** HSW Config ***********************/ 1535/*********************** HSW Config ***********************/
1536#define PORT_FWD_TYPE_VEPA 0x3
1537#define PORT_FWD_TYPE_VEB 0x2
1538
1527struct amap_set_hsw_context { 1539struct amap_set_hsw_context {
1528 u8 interface_id[16]; 1540 u8 interface_id[16];
1529 u8 rsvd0[14]; 1541 u8 rsvd0[14];
1530 u8 pvid_valid; 1542 u8 pvid_valid;
1531 u8 rsvd1; 1543 u8 pport;
1532 u8 rsvd2[16]; 1544 u8 rsvd1[6];
1545 u8 port_fwd_type[3];
1546 u8 rsvd2[7];
1533 u8 pvid[16]; 1547 u8 pvid[16];
1534 u8 rsvd3[32]; 1548 u8 rsvd3[32];
1535 u8 rsvd4[32]; 1549 u8 rsvd4[32];
@@ -1554,7 +1568,9 @@ struct amap_get_hsw_req_context {
1554} __packed; 1568} __packed;
1555 1569
1556struct amap_get_hsw_resp_context { 1570struct amap_get_hsw_resp_context {
1557 u8 rsvd1[16]; 1571 u8 rsvd0[6];
1572 u8 port_fwd_type[3];
1573 u8 rsvd1[7];
1558 u8 pvid[16]; 1574 u8 pvid[16];
1559 u8 rsvd2[32]; 1575 u8 rsvd2[32];
1560 u8 rsvd3[32]; 1576 u8 rsvd3[32];
@@ -1709,11 +1725,13 @@ struct be_cmd_req_set_ext_fat_caps {
1709 struct be_fat_conf_params set_params; 1725 struct be_fat_conf_params set_params;
1710}; 1726};
1711 1727
1712#define RESOURCE_DESC_SIZE 88 1728#define RESOURCE_DESC_SIZE_V0 72
1729#define RESOURCE_DESC_SIZE_V1 88
1730#define PCIE_RESOURCE_DESC_TYPE_V0 0x40
1713#define NIC_RESOURCE_DESC_TYPE_V0 0x41 1731#define NIC_RESOURCE_DESC_TYPE_V0 0x41
1732#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
1714#define NIC_RESOURCE_DESC_TYPE_V1 0x51 1733#define NIC_RESOURCE_DESC_TYPE_V1 0x51
1715#define MAX_RESOURCE_DESC 4 1734#define MAX_RESOURCE_DESC 264
1716#define MAX_RESOURCE_DESC_V1 32
1717 1735
1718/* QOS unit number */ 1736/* QOS unit number */
1719#define QUN 4 1737#define QUN 4
@@ -1722,9 +1740,30 @@ struct be_cmd_req_set_ext_fat_caps {
1722/* No save */ 1740/* No save */
1723#define NOSV 7 1741#define NOSV 7
1724 1742
1725struct be_nic_resource_desc { 1743struct be_res_desc_hdr {
1726 u8 desc_type; 1744 u8 desc_type;
1727 u8 desc_len; 1745 u8 desc_len;
1746} __packed;
1747
1748struct be_pcie_res_desc {
1749 struct be_res_desc_hdr hdr;
1750 u8 rsvd0;
1751 u8 flags;
1752 u16 rsvd1;
1753 u8 pf_num;
1754 u8 rsvd2;
1755 u32 rsvd3;
1756 u8 sriov_state;
1757 u8 pf_state;
1758 u8 pf_type;
1759 u8 rsvd4;
1760 u16 num_vfs;
1761 u16 rsvd5;
1762 u32 rsvd6[17];
1763} __packed;
1764
1765struct be_nic_res_desc {
1766 struct be_res_desc_hdr hdr;
1728 u8 rsvd1; 1767 u8 rsvd1;
1729 u8 flags; 1768 u8 flags;
1730 u8 vf_num; 1769 u8 vf_num;
@@ -1753,7 +1792,7 @@ struct be_nic_resource_desc {
1753 u8 wol_param; 1792 u8 wol_param;
1754 u16 rsvd7; 1793 u16 rsvd7;
1755 u32 rsvd8[3]; 1794 u32 rsvd8[3];
1756}; 1795} __packed;
1757 1796
1758struct be_cmd_req_get_func_config { 1797struct be_cmd_req_get_func_config {
1759 struct be_cmd_req_hdr hdr; 1798 struct be_cmd_req_hdr hdr;
@@ -1762,7 +1801,7 @@ struct be_cmd_req_get_func_config {
1762struct be_cmd_resp_get_func_config { 1801struct be_cmd_resp_get_func_config {
1763 struct be_cmd_resp_hdr hdr; 1802 struct be_cmd_resp_hdr hdr;
1764 u32 desc_count; 1803 u32 desc_count;
1765 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE]; 1804 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
1766}; 1805};
1767 1806
1768#define ACTIVE_PROFILE_TYPE 0x2 1807#define ACTIVE_PROFILE_TYPE 0x2
@@ -1774,26 +1813,20 @@ struct be_cmd_req_get_profile_config {
1774}; 1813};
1775 1814
1776struct be_cmd_resp_get_profile_config { 1815struct be_cmd_resp_get_profile_config {
1777 struct be_cmd_req_hdr hdr; 1816 struct be_cmd_resp_hdr hdr;
1778 u32 desc_count;
1779 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1780};
1781
1782struct be_cmd_resp_get_profile_config_v1 {
1783 struct be_cmd_req_hdr hdr;
1784 u32 desc_count; 1817 u32 desc_count;
1785 u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE]; 1818 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
1786}; 1819};
1787 1820
1788struct be_cmd_req_set_profile_config { 1821struct be_cmd_req_set_profile_config {
1789 struct be_cmd_req_hdr hdr; 1822 struct be_cmd_req_hdr hdr;
1790 u32 rsvd; 1823 u32 rsvd;
1791 u32 desc_count; 1824 u32 desc_count;
1792 struct be_nic_resource_desc nic_desc; 1825 struct be_nic_res_desc nic_desc;
1793}; 1826};
1794 1827
1795struct be_cmd_resp_set_profile_config { 1828struct be_cmd_resp_set_profile_config {
1796 struct be_cmd_req_hdr hdr; 1829 struct be_cmd_resp_hdr hdr;
1797}; 1830};
1798 1831
1799struct be_cmd_enable_disable_vf { 1832struct be_cmd_enable_disable_vf {
@@ -1842,8 +1875,7 @@ extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1842 u32 en_flags, u32 *if_handle, u32 domain); 1875 u32 en_flags, u32 *if_handle, u32 domain);
1843extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, 1876extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
1844 u32 domain); 1877 u32 domain);
1845extern int be_cmd_eq_create(struct be_adapter *adapter, 1878extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
1846 struct be_queue_info *eq, int eq_delay);
1847extern int be_cmd_cq_create(struct be_adapter *adapter, 1879extern int be_cmd_cq_create(struct be_adapter *adapter,
1848 struct be_queue_info *cq, struct be_queue_info *eq, 1880 struct be_queue_info *cq, struct be_queue_info *eq,
1849 bool no_delay, int num_cqe_dma_coalesce); 1881 bool no_delay, int num_cqe_dma_coalesce);
@@ -1927,15 +1959,22 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1927extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1959extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1928extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, 1960extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
1929 u32 *privilege, u32 domain); 1961 u32 *privilege, u32 domain);
1962extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
1963 u32 privileges, u32 vf_num);
1930extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 1964extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1931 bool *pmac_id_active, u32 *pmac_id, 1965 bool *pmac_id_active, u32 *pmac_id,
1932 u8 domain); 1966 u8 domain);
1967extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
1968 u8 *mac);
1969extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
1933extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 1970extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1934 u8 mac_count, u32 domain); 1971 u8 mac_count, u32 domain);
1972extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
1973 u32 dom);
1935extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 1974extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
1936 u32 domain, u16 intf_id); 1975 u32 domain, u16 intf_id, u16 hsw_mode);
1937extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 1976extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
1938 u32 domain, u16 intf_id); 1977 u32 domain, u16 intf_id, u8 *mode);
1939extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); 1978extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
1940extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 1979extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
1941 struct be_dma_mem *cmd); 1980 struct be_dma_mem *cmd);
@@ -1948,10 +1987,10 @@ extern int lancer_initiate_dump(struct be_adapter *adapter);
1948extern bool dump_present(struct be_adapter *adapter); 1987extern bool dump_present(struct be_adapter *adapter);
1949extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 1988extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1950extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 1989extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1951extern int be_cmd_get_func_config(struct be_adapter *adapter); 1990int be_cmd_get_func_config(struct be_adapter *adapter,
1952extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, 1991 struct be_resources *res);
1953 u16 *txq_count, u8 domain); 1992int be_cmd_get_profile_config(struct be_adapter *adapter,
1954 1993 struct be_resources *res, u8 domain);
1955extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, 1994extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
1956 u8 domain); 1995 u8 domain);
1957extern int be_cmd_get_if_id(struct be_adapter *adapter, 1996extern int be_cmd_get_if_id(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4f8c941217cc..b440a1fac77b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1119 return status; 1119 return status;
1120} 1120}
1121 1121
1122static void be_get_channels(struct net_device *netdev,
1123 struct ethtool_channels *ch)
1124{
1125 struct be_adapter *adapter = netdev_priv(netdev);
1126
1127 ch->combined_count = adapter->num_evt_qs;
1128 ch->max_combined = be_max_qs(adapter);
1129}
1130
1131static int be_set_channels(struct net_device *netdev,
1132 struct ethtool_channels *ch)
1133{
1134 struct be_adapter *adapter = netdev_priv(netdev);
1135
1136 if (ch->rx_count || ch->tx_count || ch->other_count ||
1137 !ch->combined_count || ch->combined_count > be_max_qs(adapter))
1138 return -EINVAL;
1139
1140 adapter->cfg_num_qs = ch->combined_count;
1141
1142 return be_update_queues(adapter);
1143}
1144
1122const struct ethtool_ops be_ethtool_ops = { 1145const struct ethtool_ops be_ethtool_ops = {
1123 .get_settings = be_get_settings, 1146 .get_settings = be_get_settings,
1124 .get_drvinfo = be_get_drvinfo, 1147 .get_drvinfo = be_get_drvinfo,
@@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = {
1145 .self_test = be_self_test, 1168 .self_test = be_self_test,
1146 .get_rxnfc = be_get_rxnfc, 1169 .get_rxnfc = be_get_rxnfc,
1147 .set_rxnfc = be_set_rxnfc, 1170 .set_rxnfc = be_set_rxnfc,
1171 .get_channels = be_get_channels,
1172 .set_channels = be_set_channels
1148}; 1173};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d91a5ec61a4..3224d28cdad4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,6 +21,7 @@
21#include "be_cmds.h" 21#include "be_cmds.h"
22#include <asm/div64.h> 22#include <asm/div64.h>
23#include <linux/aer.h> 23#include <linux/aer.h>
24#include <linux/if_bridge.h>
24 25
25MODULE_VERSION(DRV_VER); 26MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids); 27MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -145,8 +146,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
145 q->len = len; 146 q->len = len;
146 q->entry_size = entry_size; 147 q->entry_size = entry_size;
147 mem->size = len * entry_size; 148 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO); 150 GFP_KERNEL);
150 if (!mem->va) 151 if (!mem->va)
151 return -ENOMEM; 152 return -ENOMEM;
152 return 0; 153 return 0;
@@ -247,54 +248,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
247static int be_mac_addr_set(struct net_device *netdev, void *p) 248static int be_mac_addr_set(struct net_device *netdev, void *p)
248{ 249{
249 struct be_adapter *adapter = netdev_priv(netdev); 250 struct be_adapter *adapter = netdev_priv(netdev);
251 struct device *dev = &adapter->pdev->dev;
250 struct sockaddr *addr = p; 252 struct sockaddr *addr = p;
251 int status = 0; 253 int status;
252 u8 current_mac[ETH_ALEN]; 254 u8 mac[ETH_ALEN];
253 u32 pmac_id = adapter->pmac_id[0]; 255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
254 bool active_mac = true;
255 256
256 if (!is_valid_ether_addr(addr->sa_data)) 257 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL; 258 return -EADDRNOTAVAIL;
258 259
259 /* For BE VF, MAC address is already activated by PF. 260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * Hence only operation left is updating netdev->devaddr. 261 * privilege or if PF did not provision the new MAC address.
261 * Update it if user is passing the same MAC which was used 262 * On BE3, this cmd will always fail if the VF doesn't have the
262 * during configuring VF MAC from PF(Hypervisor). 263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
263 */ 265 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) { 266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
265 status = be_cmd_mac_addr_query(adapter, current_mac, 267 adapter->if_handle, &adapter->pmac_id[0], 0);
266 false, adapter->if_handle, 0); 268 if (!status) {
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN)) 269 curr_pmac_id = adapter->pmac_id[0];
268 goto done;
269 else
270 goto err;
271 }
272 270
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN)) 271 /* Delete the old programmed MAC. This call may fail if the
274 goto done; 272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
277 }
275 278
276 /* For Lancer check if any MAC is active. 279 /* Decide if the new MAC is successfully activated only after
277 * If active, get its mac id. 280 * querying the FW
278 */ 281 */
279 if (lancer_chip(adapter) && !be_physfn(adapter)) 282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
287 if (status) 283 if (status)
288 goto err; 284 goto err;
289 285
290 if (active_mac) 286 /* The MAC change did not happen, either due to lack of privilege
291 be_cmd_pmac_del(adapter, adapter->if_handle, 287 * or PF didn't pre-provision.
292 pmac_id, 0); 288 */
293done: 289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 dev_info(dev, "MAC address changed to %pM\n", mac);
295 return 0; 296 return 0;
296err: 297err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data); 298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
298 return status; 299 return status;
299} 300}
300 301
@@ -472,7 +473,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
472 ACCESS_ONCE(*acc) = newacc; 473 ACCESS_ONCE(*acc) = newacc;
473} 474}
474 475
475void populate_erx_stats(struct be_adapter *adapter, 476static void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo, 477 struct be_rx_obj *rxo,
477 u32 erx_stat) 478 u32 erx_stat)
478{ 479{
@@ -1001,7 +1002,7 @@ static int be_vid_config(struct be_adapter *adapter)
1001 if (adapter->promiscuous) 1002 if (adapter->promiscuous)
1002 return 0; 1003 return 0;
1003 1004
1004 if (adapter->vlans_added > adapter->max_vlans) 1005 if (adapter->vlans_added > be_max_vlans(adapter))
1005 goto set_vlan_promisc; 1006 goto set_vlan_promisc;
1006 1007
1007 /* Construct VLAN Table to give to HW */ 1008 /* Construct VLAN Table to give to HW */
@@ -1042,7 +1043,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1042 goto ret; 1043 goto ret;
1043 1044
1044 adapter->vlan_tag[vid] = 1; 1045 adapter->vlan_tag[vid] = 1;
1045 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 1046 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1046 status = be_vid_config(adapter); 1047 status = be_vid_config(adapter);
1047 1048
1048 if (!status) 1049 if (!status)
@@ -1068,7 +1069,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1068 goto ret; 1069 goto ret;
1069 1070
1070 adapter->vlan_tag[vid] = 0; 1071 adapter->vlan_tag[vid] = 0;
1071 if (adapter->vlans_added <= adapter->max_vlans) 1072 if (adapter->vlans_added <= be_max_vlans(adapter))
1072 status = be_vid_config(adapter); 1073 status = be_vid_config(adapter);
1073 1074
1074 if (!status) 1075 if (!status)
@@ -1101,7 +1102,7 @@ static void be_set_rx_mode(struct net_device *netdev)
1101 1102
1102 /* Enable multicast promisc if num configured exceeds what we support */ 1103 /* Enable multicast promisc if num configured exceeds what we support */
1103 if (netdev->flags & IFF_ALLMULTI || 1104 if (netdev->flags & IFF_ALLMULTI ||
1104 netdev_mc_count(netdev) > adapter->max_mcast_mac) { 1105 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1106 goto done; 1107 goto done;
1107 } 1108 }
@@ -1115,7 +1116,7 @@ static void be_set_rx_mode(struct net_device *netdev)
1115 adapter->pmac_id[i], 0); 1116 adapter->pmac_id[i], 0);
1116 } 1117 }
1117 1118
1118 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) { 1119 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 1120 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true; 1121 adapter->promiscuous = true;
1121 goto done; 1122 goto done;
@@ -1146,9 +1147,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1146 struct be_adapter *adapter = netdev_priv(netdev); 1147 struct be_adapter *adapter = netdev_priv(netdev);
1147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1148 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1148 int status; 1149 int status;
1149 bool active_mac = false;
1150 u32 pmac_id;
1151 u8 old_mac[ETH_ALEN];
1152 1150
1153 if (!sriov_enabled(adapter)) 1151 if (!sriov_enabled(adapter))
1154 return -EPERM; 1152 return -EPERM;
@@ -1156,20 +1154,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1156 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) 1154 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1157 return -EINVAL; 1155 return -EINVAL;
1158 1156
1159 if (lancer_chip(adapter)) { 1157 if (BEx_chip(adapter)) {
1160 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 1158 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1161 &pmac_id, vf + 1); 1159 vf + 1);
1162 if (!status && active_mac)
1163 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1164 pmac_id, vf + 1);
1165
1166 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1167 } else {
1168 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1169 vf_cfg->pmac_id, vf + 1);
1170 1160
1171 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, 1161 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1172 &vf_cfg->pmac_id, vf + 1); 1162 &vf_cfg->pmac_id, vf + 1);
1163 } else {
1164 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1165 vf + 1);
1173 } 1166 }
1174 1167
1175 if (status) 1168 if (status)
@@ -1220,14 +1213,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
1220 adapter->vf_cfg[vf].vlan_tag = vlan; 1213 adapter->vf_cfg[vf].vlan_tag = vlan;
1221 1214
1222 status = be_cmd_set_hsw_config(adapter, vlan, 1215 status = be_cmd_set_hsw_config(adapter, vlan,
1223 vf + 1, adapter->vf_cfg[vf].if_handle); 1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1224 } 1217 }
1225 } else { 1218 } else {
1226 /* Reset Transparent Vlan Tagging. */ 1219 /* Reset Transparent Vlan Tagging. */
1227 adapter->vf_cfg[vf].vlan_tag = 0; 1220 adapter->vf_cfg[vf].vlan_tag = 0;
1228 vlan = adapter->vf_cfg[vf].def_vid; 1221 vlan = adapter->vf_cfg[vf].def_vid;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 adapter->vf_cfg[vf].if_handle); 1223 adapter->vf_cfg[vf].if_handle, 0);
1231 } 1224 }
1232 1225
1233 1226
@@ -1490,8 +1483,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
1490} 1483}
1491 1484
1492/* Process the RX completion indicated by rxcp when GRO is enabled */ 1485/* Process the RX completion indicated by rxcp when GRO is enabled */
1493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi, 1486static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1494 struct be_rx_compl_info *rxcp) 1487 struct napi_struct *napi,
1488 struct be_rx_compl_info *rxcp)
1495{ 1489{
1496 struct be_adapter *adapter = rxo->adapter; 1490 struct be_adapter *adapter = rxo->adapter;
1497 struct be_rx_page_info *page_info; 1491 struct be_rx_page_info *page_info;
@@ -1920,6 +1914,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
1920 if (eqo->q.created) { 1914 if (eqo->q.created) {
1921 be_eq_clean(eqo); 1915 be_eq_clean(eqo);
1922 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 1916 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1917 netif_napi_del(&eqo->napi);
1923 } 1918 }
1924 be_queue_free(adapter, &eqo->q); 1919 be_queue_free(adapter, &eqo->q);
1925 } 1920 }
@@ -1931,9 +1926,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
1931 struct be_eq_obj *eqo; 1926 struct be_eq_obj *eqo;
1932 int i, rc; 1927 int i, rc;
1933 1928
1934 adapter->num_evt_qs = num_irqs(adapter); 1929 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1930 adapter->cfg_num_qs);
1935 1931
1936 for_all_evt_queues(adapter, eqo, i) { 1932 for_all_evt_queues(adapter, eqo, i) {
1933 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1934 BE_NAPI_WEIGHT);
1937 eqo->adapter = adapter; 1935 eqo->adapter = adapter;
1938 eqo->tx_budget = BE_TX_BUDGET; 1936 eqo->tx_budget = BE_TX_BUDGET;
1939 eqo->idx = i; 1937 eqo->idx = i;
@@ -1946,7 +1944,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
1946 if (rc) 1944 if (rc)
1947 return rc; 1945 return rc;
1948 1946
1949 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd); 1947 rc = be_cmd_eq_create(adapter, eqo);
1950 if (rc) 1948 if (rc)
1951 return rc; 1949 return rc;
1952 } 1950 }
@@ -2020,31 +2018,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
2020 } 2018 }
2021} 2019}
2022 2020
2023static int be_num_txqs_want(struct be_adapter *adapter) 2021static int be_tx_qs_create(struct be_adapter *adapter)
2024{
2025 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2026 be_is_mc(adapter) ||
2027 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2028 BE2_chip(adapter))
2029 return 1;
2030 else
2031 return adapter->max_tx_queues;
2032}
2033
2034static int be_tx_cqs_create(struct be_adapter *adapter)
2035{ 2022{
2036 struct be_queue_info *cq, *eq; 2023 struct be_queue_info *cq, *eq;
2037 int status;
2038 struct be_tx_obj *txo; 2024 struct be_tx_obj *txo;
2039 u8 i; 2025 int status, i;
2040 2026
2041 adapter->num_tx_qs = be_num_txqs_want(adapter); 2027 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2042 if (adapter->num_tx_qs != MAX_TX_QS) {
2043 rtnl_lock();
2044 netif_set_real_num_tx_queues(adapter->netdev,
2045 adapter->num_tx_qs);
2046 rtnl_unlock();
2047 }
2048 2028
2049 for_all_tx_queues(adapter, txo, i) { 2029 for_all_tx_queues(adapter, txo, i) {
2050 cq = &txo->cq; 2030 cq = &txo->cq;
@@ -2060,16 +2040,7 @@ static int be_tx_cqs_create(struct be_adapter *adapter)
2060 status = be_cmd_cq_create(adapter, cq, eq, false, 3); 2040 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2061 if (status) 2041 if (status)
2062 return status; 2042 return status;
2063 }
2064 return 0;
2065}
2066 2043
2067static int be_tx_qs_create(struct be_adapter *adapter)
2068{
2069 struct be_tx_obj *txo;
2070 int i, status;
2071
2072 for_all_tx_queues(adapter, txo, i) {
2073 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN, 2044 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2074 sizeof(struct be_eth_wrb)); 2045 sizeof(struct be_eth_wrb));
2075 if (status) 2046 if (status)
@@ -2105,17 +2076,14 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
2105 struct be_rx_obj *rxo; 2076 struct be_rx_obj *rxo;
2106 int rc, i; 2077 int rc, i;
2107 2078
2108 /* We'll create as many RSS rings as there are irqs. 2079 /* We can create as many RSS rings as there are EQs. */
2109 * But when there's only one irq there's no use creating RSS rings 2080 adapter->num_rx_qs = adapter->num_evt_qs;
2081
2082 /* We'll use RSS only if atleast 2 RSS rings are supported.
2083 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2110 */ 2084 */
2111 adapter->num_rx_qs = (num_irqs(adapter) > 1) ? 2085 if (adapter->num_rx_qs > 1)
2112 num_irqs(adapter) + 1 : 1; 2086 adapter->num_rx_qs++;
2113 if (adapter->num_rx_qs != MAX_RX_QS) {
2114 rtnl_lock();
2115 netif_set_real_num_rx_queues(adapter->netdev,
2116 adapter->num_rx_qs);
2117 rtnl_unlock();
2118 }
2119 2087
2120 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 2088 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2121 for_all_rx_queues(adapter, rxo, i) { 2089 for_all_rx_queues(adapter, rxo, i) {
@@ -2379,38 +2347,24 @@ static void be_msix_disable(struct be_adapter *adapter)
2379 if (msix_enabled(adapter)) { 2347 if (msix_enabled(adapter)) {
2380 pci_disable_msix(adapter->pdev); 2348 pci_disable_msix(adapter->pdev);
2381 adapter->num_msix_vec = 0; 2349 adapter->num_msix_vec = 0;
2350 adapter->num_msix_roce_vec = 0;
2382 } 2351 }
2383} 2352}
2384 2353
2385static uint be_num_rss_want(struct be_adapter *adapter)
2386{
2387 u32 num = 0;
2388
2389 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2390 (lancer_chip(adapter) ||
2391 (!sriov_want(adapter) && be_physfn(adapter)))) {
2392 num = adapter->max_rss_queues;
2393 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2394 }
2395 return num;
2396}
2397
2398static int be_msix_enable(struct be_adapter *adapter) 2354static int be_msix_enable(struct be_adapter *adapter)
2399{ 2355{
2400#define BE_MIN_MSIX_VECTORS 1 2356 int i, status, num_vec;
2401 int i, status, num_vec, num_roce_vec = 0;
2402 struct device *dev = &adapter->pdev->dev; 2357 struct device *dev = &adapter->pdev->dev;
2403 2358
2404 /* If RSS queues are not used, need a vec for default RX Q */ 2359 /* If RoCE is supported, program the max number of NIC vectors that
2405 num_vec = min(be_num_rss_want(adapter), num_online_cpus()); 2360 * may be configured via set-channels, along with vectors needed for
2406 if (be_roce_supported(adapter)) { 2361 * RoCe. Else, just program the number we'll use initially.
2407 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS, 2362 */
2408 (num_online_cpus() + 1)); 2363 if (be_roce_supported(adapter))
2409 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS); 2364 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2410 num_vec += num_roce_vec; 2365 2 * num_online_cpus());
2411 num_vec = min(num_vec, MAX_MSIX_VECTORS); 2366 else
2412 } 2367 num_vec = adapter->cfg_num_qs;
2413 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2414 2368
2415 for (i = 0; i < num_vec; i++) 2369 for (i = 0; i < num_vec; i++)
2416 adapter->msix_entries[i].entry = i; 2370 adapter->msix_entries[i].entry = i;
@@ -2418,7 +2372,7 @@ static int be_msix_enable(struct be_adapter *adapter)
2418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); 2372 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2419 if (status == 0) { 2373 if (status == 0) {
2420 goto done; 2374 goto done;
2421 } else if (status >= BE_MIN_MSIX_VECTORS) { 2375 } else if (status >= MIN_MSIX_VECTORS) {
2422 num_vec = status; 2376 num_vec = status;
2423 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2377 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2424 num_vec); 2378 num_vec);
@@ -2427,30 +2381,29 @@ static int be_msix_enable(struct be_adapter *adapter)
2427 } 2381 }
2428 2382
2429 dev_warn(dev, "MSIx enable failed\n"); 2383 dev_warn(dev, "MSIx enable failed\n");
2384
2430 /* INTx is not supported in VFs, so fail probe if enable_msix fails */ 2385 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2431 if (!be_physfn(adapter)) 2386 if (!be_physfn(adapter))
2432 return status; 2387 return status;
2433 return 0; 2388 return 0;
2434done: 2389done:
2435 if (be_roce_supported(adapter)) { 2390 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2436 if (num_vec > num_roce_vec) { 2391 adapter->num_msix_roce_vec = num_vec / 2;
2437 adapter->num_msix_vec = num_vec - num_roce_vec; 2392 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2438 adapter->num_msix_roce_vec = 2393 adapter->num_msix_roce_vec);
2439 num_vec - adapter->num_msix_vec; 2394 }
2440 } else { 2395
2441 adapter->num_msix_vec = num_vec; 2396 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2442 adapter->num_msix_roce_vec = 0; 2397
2443 } 2398 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2444 } else 2399 adapter->num_msix_vec);
2445 adapter->num_msix_vec = num_vec;
2446 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2447 return 0; 2400 return 0;
2448} 2401}
2449 2402
2450static inline int be_msix_vec_get(struct be_adapter *adapter, 2403static inline int be_msix_vec_get(struct be_adapter *adapter,
2451 struct be_eq_obj *eqo) 2404 struct be_eq_obj *eqo)
2452{ 2405{
2453 return adapter->msix_entries[eqo->idx].vector; 2406 return adapter->msix_entries[eqo->msix_idx].vector;
2454} 2407}
2455 2408
2456static int be_msix_register(struct be_adapter *adapter) 2409static int be_msix_register(struct be_adapter *adapter)
@@ -2690,8 +2643,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2690 memset(mac, 0, ETH_ALEN); 2643 memset(mac, 0, ETH_ALEN);
2691 2644
2692 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2645 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2693 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2646 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2694 GFP_KERNEL | __GFP_ZERO); 2647 GFP_KERNEL);
2695 if (cmd.va == NULL) 2648 if (cmd.va == NULL)
2696 return -1; 2649 return -1;
2697 2650
@@ -2735,13 +2688,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
2735 be_vf_eth_addr_generate(adapter, mac); 2688 be_vf_eth_addr_generate(adapter, mac);
2736 2689
2737 for_all_vfs(adapter, vf_cfg, vf) { 2690 for_all_vfs(adapter, vf_cfg, vf) {
2738 if (lancer_chip(adapter)) { 2691 if (BEx_chip(adapter))
2739 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2740 } else {
2741 status = be_cmd_pmac_add(adapter, mac, 2692 status = be_cmd_pmac_add(adapter, mac,
2742 vf_cfg->if_handle, 2693 vf_cfg->if_handle,
2743 &vf_cfg->pmac_id, vf + 1); 2694 &vf_cfg->pmac_id, vf + 1);
2744 } 2695 else
2696 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2697 vf + 1);
2745 2698
2746 if (status) 2699 if (status)
2747 dev_err(&adapter->pdev->dev, 2700 dev_err(&adapter->pdev->dev,
@@ -2759,7 +2712,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
2759 int status, vf; 2712 int status, vf;
2760 u8 mac[ETH_ALEN]; 2713 u8 mac[ETH_ALEN];
2761 struct be_vf_cfg *vf_cfg; 2714 struct be_vf_cfg *vf_cfg;
2762 bool active; 2715 bool active = false;
2763 2716
2764 for_all_vfs(adapter, vf_cfg, vf) { 2717 for_all_vfs(adapter, vf_cfg, vf) {
2765 be_cmd_get_mac_from_list(adapter, mac, &active, 2718 be_cmd_get_mac_from_list(adapter, mac, &active,
@@ -2788,11 +2741,12 @@ static void be_vf_clear(struct be_adapter *adapter)
2788 pci_disable_sriov(adapter->pdev); 2741 pci_disable_sriov(adapter->pdev);
2789 2742
2790 for_all_vfs(adapter, vf_cfg, vf) { 2743 for_all_vfs(adapter, vf_cfg, vf) {
2791 if (lancer_chip(adapter)) 2744 if (BEx_chip(adapter))
2792 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2793 else
2794 be_cmd_pmac_del(adapter, vf_cfg->if_handle, 2745 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2795 vf_cfg->pmac_id, vf + 1); 2746 vf_cfg->pmac_id, vf + 1);
2747 else
2748 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2749 vf + 1);
2796 2750
2797 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); 2751 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2798 } 2752 }
@@ -2801,28 +2755,40 @@ done:
2801 adapter->num_vfs = 0; 2755 adapter->num_vfs = 0;
2802} 2756}
2803 2757
2804static int be_clear(struct be_adapter *adapter) 2758static void be_clear_queues(struct be_adapter *adapter)
2805{ 2759{
2806 int i = 1; 2760 be_mcc_queues_destroy(adapter);
2761 be_rx_cqs_destroy(adapter);
2762 be_tx_queues_destroy(adapter);
2763 be_evt_queues_destroy(adapter);
2764}
2807 2765
2766static void be_cancel_worker(struct be_adapter *adapter)
2767{
2808 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { 2768 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2809 cancel_delayed_work_sync(&adapter->work); 2769 cancel_delayed_work_sync(&adapter->work);
2810 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED; 2770 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2811 } 2771 }
2772}
2773
2774static int be_clear(struct be_adapter *adapter)
2775{
2776 int i;
2777
2778 be_cancel_worker(adapter);
2812 2779
2813 if (sriov_enabled(adapter)) 2780 if (sriov_enabled(adapter))
2814 be_vf_clear(adapter); 2781 be_vf_clear(adapter);
2815 2782
2816 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) 2783 /* delete the primary mac along with the uc-mac list */
2784 for (i = 0; i < (adapter->uc_macs + 1); i++)
2817 be_cmd_pmac_del(adapter, adapter->if_handle, 2785 be_cmd_pmac_del(adapter, adapter->if_handle,
2818 adapter->pmac_id[i], 0); 2786 adapter->pmac_id[i], 0);
2787 adapter->uc_macs = 0;
2819 2788
2820 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 2789 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2821 2790
2822 be_mcc_queues_destroy(adapter); 2791 be_clear_queues(adapter);
2823 be_rx_cqs_destroy(adapter);
2824 be_tx_queues_destroy(adapter);
2825 be_evt_queues_destroy(adapter);
2826 2792
2827 kfree(adapter->pmac_id); 2793 kfree(adapter->pmac_id);
2828 adapter->pmac_id = NULL; 2794 adapter->pmac_id = NULL;
@@ -2833,6 +2799,7 @@ static int be_clear(struct be_adapter *adapter)
2833 2799
2834static int be_vfs_if_create(struct be_adapter *adapter) 2800static int be_vfs_if_create(struct be_adapter *adapter)
2835{ 2801{
2802 struct be_resources res = {0};
2836 struct be_vf_cfg *vf_cfg; 2803 struct be_vf_cfg *vf_cfg;
2837 u32 cap_flags, en_flags, vf; 2804 u32 cap_flags, en_flags, vf;
2838 int status; 2805 int status;
@@ -2841,9 +2808,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
2841 BE_IF_FLAGS_MULTICAST; 2808 BE_IF_FLAGS_MULTICAST;
2842 2809
2843 for_all_vfs(adapter, vf_cfg, vf) { 2810 for_all_vfs(adapter, vf_cfg, vf) {
2844 if (!BE3_chip(adapter)) 2811 if (!BE3_chip(adapter)) {
2845 be_cmd_get_profile_config(adapter, &cap_flags, 2812 status = be_cmd_get_profile_config(adapter, &res,
2846 NULL, vf + 1); 2813 vf + 1);
2814 if (!status)
2815 cap_flags = res.if_cap_flags;
2816 }
2847 2817
2848 /* If a FW profile exists, then cap_flags are updated */ 2818 /* If a FW profile exists, then cap_flags are updated */
2849 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 2819 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2880,6 +2850,7 @@ static int be_vf_setup(struct be_adapter *adapter)
2880 u16 def_vlan, lnk_speed; 2850 u16 def_vlan, lnk_speed;
2881 int status, old_vfs, vf; 2851 int status, old_vfs, vf;
2882 struct device *dev = &adapter->pdev->dev; 2852 struct device *dev = &adapter->pdev->dev;
2853 u32 privileges;
2883 2854
2884 old_vfs = pci_num_vf(adapter->pdev); 2855 old_vfs = pci_num_vf(adapter->pdev);
2885 if (old_vfs) { 2856 if (old_vfs) {
@@ -2888,10 +2859,10 @@ static int be_vf_setup(struct be_adapter *adapter)
2888 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); 2859 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2889 adapter->num_vfs = old_vfs; 2860 adapter->num_vfs = old_vfs;
2890 } else { 2861 } else {
2891 if (num_vfs > adapter->dev_num_vfs) 2862 if (num_vfs > be_max_vfs(adapter))
2892 dev_info(dev, "Device supports %d VFs and not %d\n", 2863 dev_info(dev, "Device supports %d VFs and not %d\n",
2893 adapter->dev_num_vfs, num_vfs); 2864 be_max_vfs(adapter), num_vfs);
2894 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); 2865 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2895 if (!adapter->num_vfs) 2866 if (!adapter->num_vfs)
2896 return 0; 2867 return 0;
2897 } 2868 }
@@ -2923,6 +2894,18 @@ static int be_vf_setup(struct be_adapter *adapter)
2923 } 2894 }
2924 2895
2925 for_all_vfs(adapter, vf_cfg, vf) { 2896 for_all_vfs(adapter, vf_cfg, vf) {
2897 /* Allow VFs to programs MAC/VLAN filters */
2898 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2899 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2900 status = be_cmd_set_fn_privileges(adapter,
2901 privileges |
2902 BE_PRIV_FILTMGMT,
2903 vf + 1);
2904 if (!status)
2905 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2906 vf);
2907 }
2908
2926 /* BE3 FW, by default, caps VF TX-rate to 100mbps. 2909 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2927 * Allow full available bandwidth 2910 * Allow full available bandwidth
2928 */ 2911 */
@@ -2935,7 +2918,7 @@ static int be_vf_setup(struct be_adapter *adapter)
2935 vf_cfg->tx_rate = lnk_speed; 2918 vf_cfg->tx_rate = lnk_speed;
2936 2919
2937 status = be_cmd_get_hsw_config(adapter, &def_vlan, 2920 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2938 vf + 1, vf_cfg->if_handle); 2921 vf + 1, vf_cfg->if_handle, NULL);
2939 if (status) 2922 if (status)
2940 goto err; 2923 goto err;
2941 vf_cfg->def_vid = def_vlan; 2924 vf_cfg->def_vid = def_vlan;
@@ -2958,6 +2941,51 @@ err:
2958 return status; 2941 return status;
2959} 2942}
2960 2943
2944/* On BE2/BE3 FW does not suggest the supported limits */
2945static void BEx_get_resources(struct be_adapter *adapter,
2946 struct be_resources *res)
2947{
2948 struct pci_dev *pdev = adapter->pdev;
2949 bool use_sriov = false;
2950
2951 if (BE3_chip(adapter) && be_physfn(adapter)) {
2952 int max_vfs;
2953
2954 max_vfs = pci_sriov_get_totalvfs(pdev);
2955 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2956 use_sriov = res->max_vfs && num_vfs;
2957 }
2958
2959 if (be_physfn(adapter))
2960 res->max_uc_mac = BE_UC_PMAC_COUNT;
2961 else
2962 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2963
2964 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2966 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC;
2969
2970 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2971 !be_physfn(adapter))
2972 res->max_tx_qs = 1;
2973 else
2974 res->max_tx_qs = BE3_MAX_TX_QS;
2975
2976 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2977 !use_sriov && be_physfn(adapter))
2978 res->max_rss_qs = (adapter->be3_native) ?
2979 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2980 res->max_rx_qs = res->max_rss_qs + 1;
2981
2982 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
2983
2984 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2985 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2986 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2987}
2988
2961static void be_setup_init(struct be_adapter *adapter) 2989static void be_setup_init(struct be_adapter *adapter)
2962{ 2990{
2963 adapter->vlan_prio_bmap = 0xff; 2991 adapter->vlan_prio_bmap = 0xff;
@@ -2971,118 +2999,56 @@ static void be_setup_init(struct be_adapter *adapter)
2971 adapter->cmd_privileges = MIN_PRIVILEGES; 2999 adapter->cmd_privileges = MIN_PRIVILEGES;
2972} 3000}
2973 3001
2974static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, 3002static int be_get_resources(struct be_adapter *adapter)
2975 bool *active_mac, u32 *pmac_id)
2976{ 3003{
2977 int status = 0; 3004 struct device *dev = &adapter->pdev->dev;
2978 3005 struct be_resources res = {0};
2979 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) { 3006 int status;
2980 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2981 if (!lancer_chip(adapter) && !be_physfn(adapter))
2982 *active_mac = true;
2983 else
2984 *active_mac = false;
2985 3007
2986 return status; 3008 if (BEx_chip(adapter)) {
3009 BEx_get_resources(adapter, &res);
3010 adapter->res = res;
2987 } 3011 }
2988 3012
2989 if (lancer_chip(adapter)) { 3013 /* For BE3 only check if FW suggests a different max-txqs value */
2990 status = be_cmd_get_mac_from_list(adapter, mac, 3014 if (BE3_chip(adapter)) {
2991 active_mac, pmac_id, 0); 3015 status = be_cmd_get_profile_config(adapter, &res, 0);
2992 if (*active_mac) { 3016 if (!status && res.max_tx_qs)
2993 status = be_cmd_mac_addr_query(adapter, mac, false, 3017 adapter->res.max_tx_qs =
2994 if_handle, *pmac_id); 3018 min(adapter->res.max_tx_qs, res.max_tx_qs);
2995 }
2996 } else if (be_physfn(adapter)) {
2997 /* For BE3, for PF get permanent MAC */
2998 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2999 *active_mac = false;
3000 } else {
3001 /* For BE3, for VF get soft MAC assigned by PF*/
3002 status = be_cmd_mac_addr_query(adapter, mac, false,
3003 if_handle, 0);
3004 *active_mac = true;
3005 } 3019 }
3006 return status;
3007}
3008
3009static void be_get_resources(struct be_adapter *adapter)
3010{
3011 u16 dev_num_vfs;
3012 int pos, status;
3013 bool profile_present = false;
3014 u16 txq_count = 0;
3015 3020
3021 /* For Lancer, SH etc read per-function resource limits from FW.
3022 * GET_FUNC_CONFIG returns per function guaranteed limits.
3023 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3024 */
3016 if (!BEx_chip(adapter)) { 3025 if (!BEx_chip(adapter)) {
3017 status = be_cmd_get_func_config(adapter); 3026 status = be_cmd_get_func_config(adapter, &res);
3018 if (!status) 3027 if (status)
3019 profile_present = true; 3028 return status;
3020 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3021 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3022 }
3023
3024 if (profile_present) {
3025 /* Sanity fixes for Lancer */
3026 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3027 BE_UC_PMAC_COUNT);
3028 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3029 BE_NUM_VLANS_SUPPORTED);
3030 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3031 BE_MAX_MC);
3032 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3033 MAX_TX_QS);
3034 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3035 BE3_MAX_RSS_QS);
3036 adapter->max_event_queues = min_t(u16,
3037 adapter->max_event_queues,
3038 BE3_MAX_RSS_QS);
3039
3040 if (adapter->max_rss_queues &&
3041 adapter->max_rss_queues == adapter->max_rx_queues)
3042 adapter->max_rss_queues -= 1;
3043
3044 if (adapter->max_event_queues < adapter->max_rss_queues)
3045 adapter->max_rss_queues = adapter->max_event_queues;
3046
3047 } else {
3048 if (be_physfn(adapter))
3049 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3050 else
3051 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3052
3053 if (adapter->function_mode & FLEX10_MODE)
3054 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3055 else
3056 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3057 3029
3058 adapter->max_mcast_mac = BE_MAX_MC; 3030 /* If RoCE may be enabled stash away half the EQs for RoCE */
3059 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS; 3031 if (be_roce_supported(adapter))
3060 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues, 3032 res.max_evt_qs /= 2;
3061 MAX_TX_QS); 3033 adapter->res = res;
3062 adapter->max_rss_queues = (adapter->be3_native) ?
3063 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3064 adapter->max_event_queues = BE3_MAX_RSS_QS;
3065 3034
3066 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED | 3035 if (be_physfn(adapter)) {
3067 BE_IF_FLAGS_BROADCAST | 3036 status = be_cmd_get_profile_config(adapter, &res, 0);
3068 BE_IF_FLAGS_MULTICAST | 3037 if (status)
3069 BE_IF_FLAGS_PASS_L3L4_ERRORS | 3038 return status;
3070 BE_IF_FLAGS_MCAST_PROMISCUOUS | 3039 adapter->res.max_vfs = res.max_vfs;
3071 BE_IF_FLAGS_VLAN_PROMISCUOUS | 3040 }
3072 BE_IF_FLAGS_PROMISCUOUS;
3073 3041
3074 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) 3042 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3075 adapter->if_cap_flags |= BE_IF_FLAGS_RSS; 3043 be_max_txqs(adapter), be_max_rxqs(adapter),
3044 be_max_rss(adapter), be_max_eqs(adapter),
3045 be_max_vfs(adapter));
3046 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3047 be_max_uc(adapter), be_max_mc(adapter),
3048 be_max_vlans(adapter));
3076 } 3049 }
3077 3050
3078 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); 3051 return 0;
3079 if (pos) {
3080 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3081 &dev_num_vfs);
3082 if (BE3_chip(adapter))
3083 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3084 adapter->dev_num_vfs = dev_num_vfs;
3085 }
3086} 3052}
3087 3053
3088/* Routine to query per function resource limits */ 3054/* Routine to query per function resource limits */
@@ -3095,100 +3061,171 @@ static int be_get_config(struct be_adapter *adapter)
3095 &adapter->function_caps, 3061 &adapter->function_caps,
3096 &adapter->asic_rev); 3062 &adapter->asic_rev);
3097 if (status) 3063 if (status)
3098 goto err; 3064 return status;
3099 3065
3100 be_get_resources(adapter); 3066 status = be_get_resources(adapter);
3067 if (status)
3068 return status;
3101 3069
3102 /* primary mac needs 1 pmac entry */ 3070 /* primary mac needs 1 pmac entry */
3103 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, 3071 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3104 sizeof(u32), GFP_KERNEL); 3072 GFP_KERNEL);
3105 if (!adapter->pmac_id) { 3073 if (!adapter->pmac_id)
3106 status = -ENOMEM; 3074 return -ENOMEM;
3107 goto err;
3108 }
3109 3075
3110err: 3076 /* Sanitize cfg_num_qs based on HW and platform limits */
3111 return status; 3077 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3078
3079 return 0;
3112} 3080}
3113 3081
3114static int be_setup(struct be_adapter *adapter) 3082static int be_mac_setup(struct be_adapter *adapter)
3115{ 3083{
3116 struct device *dev = &adapter->pdev->dev;
3117 u32 en_flags;
3118 u32 tx_fc, rx_fc;
3119 int status;
3120 u8 mac[ETH_ALEN]; 3084 u8 mac[ETH_ALEN];
3121 bool active_mac; 3085 int status;
3122 3086
3123 be_setup_init(adapter); 3087 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3088 status = be_cmd_get_perm_mac(adapter, mac);
3089 if (status)
3090 return status;
3124 3091
3125 if (!lancer_chip(adapter)) 3092 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3126 be_cmd_req_native_mode(adapter); 3093 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3094 } else {
3095 /* Maybe the HW was reset; dev_addr must be re-programmed */
3096 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3097 }
3127 3098
3128 status = be_get_config(adapter); 3099 /* On BE3 VFs this cmd may fail due to lack of privilege.
3100 * Ignore the failure as in this case pmac_id is fetched
3101 * in the IFACE_CREATE cmd.
3102 */
3103 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3104 &adapter->pmac_id[0], 0);
3105 return 0;
3106}
3107
3108static void be_schedule_worker(struct be_adapter *adapter)
3109{
3110 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3111 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3112}
3113
3114static int be_setup_queues(struct be_adapter *adapter)
3115{
3116 struct net_device *netdev = adapter->netdev;
3117 int status;
3118
3119 status = be_evt_queues_create(adapter);
3129 if (status) 3120 if (status)
3130 goto err; 3121 goto err;
3131 3122
3132 status = be_msix_enable(adapter); 3123 status = be_tx_qs_create(adapter);
3133 if (status) 3124 if (status)
3134 goto err; 3125 goto err;
3135 3126
3136 status = be_evt_queues_create(adapter); 3127 status = be_rx_cqs_create(adapter);
3137 if (status) 3128 if (status)
3138 goto err; 3129 goto err;
3139 3130
3140 status = be_tx_cqs_create(adapter); 3131 status = be_mcc_queues_create(adapter);
3141 if (status) 3132 if (status)
3142 goto err; 3133 goto err;
3143 3134
3144 status = be_rx_cqs_create(adapter); 3135 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3145 if (status) 3136 if (status)
3146 goto err; 3137 goto err;
3147 3138
3148 status = be_mcc_queues_create(adapter); 3139 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3149 if (status) 3140 if (status)
3150 goto err; 3141 goto err;
3151 3142
3152 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); 3143 return 0;
3153 /* In UMC mode FW does not return right privileges. 3144err:
3154 * Override with correct privilege equivalent to PF. 3145 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3146 return status;
3147}
3148
3149int be_update_queues(struct be_adapter *adapter)
3150{
3151 struct net_device *netdev = adapter->netdev;
3152 int status;
3153
3154 if (netif_running(netdev))
3155 be_close(netdev);
3156
3157 be_cancel_worker(adapter);
3158
3159 /* If any vectors have been shared with RoCE we cannot re-program
3160 * the MSIx table.
3155 */ 3161 */
3156 if (be_is_mc(adapter)) 3162 if (!adapter->num_msix_roce_vec)
3157 adapter->cmd_privileges = MAX_PRIVILEGES; 3163 be_msix_disable(adapter);
3158 3164
3159 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3165 be_clear_queues(adapter);
3160 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3161 3166
3162 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) 3167 if (!msix_enabled(adapter)) {
3163 en_flags |= BE_IF_FLAGS_RSS; 3168 status = be_msix_enable(adapter);
3169 if (status)
3170 return status;
3171 }
3164 3172
3165 en_flags = en_flags & adapter->if_cap_flags; 3173 status = be_setup_queues(adapter);
3174 if (status)
3175 return status;
3166 3176
3167 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags, 3177 be_schedule_worker(adapter);
3168 &adapter->if_handle, 0); 3178
3169 if (status != 0) 3179 if (netif_running(netdev))
3180 status = be_open(netdev);
3181
3182 return status;
3183}
3184
3185static int be_setup(struct be_adapter *adapter)
3186{
3187 struct device *dev = &adapter->pdev->dev;
3188 u32 tx_fc, rx_fc, en_flags;
3189 int status;
3190
3191 be_setup_init(adapter);
3192
3193 if (!lancer_chip(adapter))
3194 be_cmd_req_native_mode(adapter);
3195
3196 status = be_get_config(adapter);
3197 if (status)
3170 goto err; 3198 goto err;
3171 3199
3172 memset(mac, 0, ETH_ALEN); 3200 status = be_msix_enable(adapter);
3173 active_mac = false; 3201 if (status)
3174 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3175 &active_mac, &adapter->pmac_id[0]);
3176 if (status != 0)
3177 goto err; 3202 goto err;
3178 3203
3179 if (!active_mac) { 3204 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3180 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, 3205 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3181 &adapter->pmac_id[0], 0); 3206 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3182 if (status != 0) 3207 en_flags |= BE_IF_FLAGS_RSS;
3183 goto err; 3208 en_flags = en_flags & be_if_cap_flags(adapter);
3184 } 3209 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3210 &adapter->if_handle, 0);
3211 if (status)
3212 goto err;
3185 3213
3186 if (is_zero_ether_addr(adapter->netdev->dev_addr)) { 3214 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3187 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 3215 rtnl_lock();
3188 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 3216 status = be_setup_queues(adapter);
3189 } 3217 rtnl_unlock();
3218 if (status)
3219 goto err;
3190 3220
3191 status = be_tx_qs_create(adapter); 3221 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3222 /* In UMC mode FW does not return right privileges.
3223 * Override with correct privilege equivalent to PF.
3224 */
3225 if (be_is_mc(adapter))
3226 adapter->cmd_privileges = MAX_PRIVILEGES;
3227
3228 status = be_mac_setup(adapter);
3192 if (status) 3229 if (status)
3193 goto err; 3230 goto err;
3194 3231
@@ -3205,8 +3242,8 @@ static int be_setup(struct be_adapter *adapter)
3205 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3242 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3206 adapter->rx_fc); 3243 adapter->rx_fc);
3207 3244
3208 if (be_physfn(adapter)) { 3245 if (be_physfn(adapter) && num_vfs) {
3209 if (adapter->dev_num_vfs) 3246 if (be_max_vfs(adapter))
3210 be_vf_setup(adapter); 3247 be_vf_setup(adapter);
3211 else 3248 else
3212 dev_warn(dev, "device doesn't support SRIOV\n"); 3249 dev_warn(dev, "device doesn't support SRIOV\n");
@@ -3216,8 +3253,7 @@ static int be_setup(struct be_adapter *adapter)
3216 if (!status && be_pause_supported(adapter)) 3253 if (!status && be_pause_supported(adapter))
3217 adapter->phy.fc_autoneg = 1; 3254 adapter->phy.fc_autoneg = 1;
3218 3255
3219 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 3256 be_schedule_worker(adapter);
3220 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3221 return 0; 3257 return 0;
3222err: 3258err:
3223 be_clear(adapter); 3259 be_clear(adapter);
@@ -3241,7 +3277,7 @@ static void be_netpoll(struct net_device *netdev)
3241#endif 3277#endif
3242 3278
3243#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 3279#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3244char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; 3280static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3245 3281
3246static bool be_flash_redboot(struct be_adapter *adapter, 3282static bool be_flash_redboot(struct be_adapter *adapter,
3247 const u8 *p, u32 img_start, int image_size, 3283 const u8 *p, u32 img_start, int image_size,
@@ -3298,7 +3334,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
3298 3334
3299} 3335}
3300 3336
3301struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 3337static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3302 int header_size, 3338 int header_size,
3303 const struct firmware *fw) 3339 const struct firmware *fw)
3304{ 3340{
@@ -3760,6 +3796,74 @@ fw_exit:
3760 return status; 3796 return status;
3761} 3797}
3762 3798
3799static int be_ndo_bridge_setlink(struct net_device *dev,
3800 struct nlmsghdr *nlh)
3801{
3802 struct be_adapter *adapter = netdev_priv(dev);
3803 struct nlattr *attr, *br_spec;
3804 int rem;
3805 int status = 0;
3806 u16 mode = 0;
3807
3808 if (!sriov_enabled(adapter))
3809 return -EOPNOTSUPP;
3810
3811 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3812
3813 nla_for_each_nested(attr, br_spec, rem) {
3814 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3815 continue;
3816
3817 mode = nla_get_u16(attr);
3818 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3819 return -EINVAL;
3820
3821 status = be_cmd_set_hsw_config(adapter, 0, 0,
3822 adapter->if_handle,
3823 mode == BRIDGE_MODE_VEPA ?
3824 PORT_FWD_TYPE_VEPA :
3825 PORT_FWD_TYPE_VEB);
3826 if (status)
3827 goto err;
3828
3829 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3830 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3831
3832 return status;
3833 }
3834err:
3835 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3836 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3837
3838 return status;
3839}
3840
3841static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3842 struct net_device *dev,
3843 u32 filter_mask)
3844{
3845 struct be_adapter *adapter = netdev_priv(dev);
3846 int status = 0;
3847 u8 hsw_mode;
3848
3849 if (!sriov_enabled(adapter))
3850 return 0;
3851
3852 /* BE and Lancer chips support VEB mode only */
3853 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3854 hsw_mode = PORT_FWD_TYPE_VEB;
3855 } else {
3856 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3857 adapter->if_handle, &hsw_mode);
3858 if (status)
3859 return 0;
3860 }
3861
3862 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3863 hsw_mode == PORT_FWD_TYPE_VEPA ?
3864 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3865}
3866
3763static const struct net_device_ops be_netdev_ops = { 3867static const struct net_device_ops be_netdev_ops = {
3764 .ndo_open = be_open, 3868 .ndo_open = be_open,
3765 .ndo_stop = be_close, 3869 .ndo_stop = be_close,
@@ -3778,13 +3882,13 @@ static const struct net_device_ops be_netdev_ops = {
3778#ifdef CONFIG_NET_POLL_CONTROLLER 3882#ifdef CONFIG_NET_POLL_CONTROLLER
3779 .ndo_poll_controller = be_netpoll, 3883 .ndo_poll_controller = be_netpoll,
3780#endif 3884#endif
3885 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3886 .ndo_bridge_getlink = be_ndo_bridge_getlink,
3781}; 3887};
3782 3888
3783static void be_netdev_init(struct net_device *netdev) 3889static void be_netdev_init(struct net_device *netdev)
3784{ 3890{
3785 struct be_adapter *adapter = netdev_priv(netdev); 3891 struct be_adapter *adapter = netdev_priv(netdev);
3786 struct be_eq_obj *eqo;
3787 int i;
3788 3892
3789 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 3893 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3790 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 3894 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
@@ -3807,9 +3911,6 @@ static void be_netdev_init(struct net_device *netdev)
3807 netdev->netdev_ops = &be_netdev_ops; 3911 netdev->netdev_ops = &be_netdev_ops;
3808 3912
3809 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 3913 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3810
3811 for_all_evt_queues(adapter, eqo, i)
3812 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3813} 3914}
3814 3915
3815static void be_unmap_pci_bars(struct be_adapter *adapter) 3916static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3916,9 +4017,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
3916 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 4017 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3917 4018
3918 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 4019 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3919 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, 4020 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
3920 &rx_filter->dma, 4021 rx_filter->size, &rx_filter->dma,
3921 GFP_KERNEL | __GFP_ZERO); 4022 GFP_KERNEL);
3922 if (rx_filter->va == NULL) { 4023 if (rx_filter->va == NULL) {
3923 status = -ENOMEM; 4024 status = -ENOMEM;
3924 goto free_mbox; 4025 goto free_mbox;
@@ -3964,8 +4065,8 @@ static int be_stats_init(struct be_adapter *adapter)
3964 /* BE3 and Skyhawk */ 4065 /* BE3 and Skyhawk */
3965 cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 4066 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3966 4067
3967 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 4068 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3968 GFP_KERNEL | __GFP_ZERO); 4069 GFP_KERNEL);
3969 if (cmd->va == NULL) 4070 if (cmd->va == NULL)
3970 return -1; 4071 return -1;
3971 return 0; 4072 return 0;
@@ -4072,6 +4173,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
4072 level = be_get_fw_log_level(adapter); 4173 level = be_get_fw_log_level(adapter);
4073 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; 4174 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4074 4175
4176 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4075 return 0; 4177 return 0;
4076} 4178}
4077 4179
@@ -4164,7 +4266,8 @@ static void be_worker(struct work_struct *work)
4164 be_cmd_get_stats(adapter, &adapter->stats_cmd); 4266 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4165 } 4267 }
4166 4268
4167 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) 4269 if (be_physfn(adapter) &&
4270 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4168 be_cmd_get_die_temperature(adapter); 4271 be_cmd_get_die_temperature(adapter);
4169 4272
4170 for_all_rx_queues(adapter, rxo, i) { 4273 for_all_rx_queues(adapter, rxo, i) {
@@ -4253,7 +4356,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4253 4356
4254 status = pci_enable_pcie_error_reporting(pdev); 4357 status = pci_enable_pcie_error_reporting(pdev);
4255 if (status) 4358 if (status)
4256 dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); 4359 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4257 4360
4258 status = be_ctrl_init(adapter); 4361 status = be_ctrl_init(adapter);
4259 if (status) 4362 if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index f3d126dcc104..9cd5415fe017 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -60,7 +60,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
60 */ 60 */
61 num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec; 61 num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
62 dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX; 62 dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
63 dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS); 63 dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
64 /* provide start index of the vector, 64 /* provide start index of the vector,
65 * so in case of linear usage, 65 * so in case of linear usage,
66 * it can use the base as starting point. 66 * it can use the base as starting point.
@@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter)
93 } 93 }
94} 94}
95 95
96void _be_roce_dev_remove(struct be_adapter *adapter) 96static void _be_roce_dev_remove(struct be_adapter *adapter)
97{ 97{
98 if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev) 98 if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
99 ocrdma_drv->remove(adapter->ocrdma_dev); 99 ocrdma_drv->remove(adapter->ocrdma_dev);
@@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter)
110 } 110 }
111} 111}
112 112
113void _be_roce_dev_open(struct be_adapter *adapter) 113static void _be_roce_dev_open(struct be_adapter *adapter)
114{ 114{
115 if (ocrdma_drv && adapter->ocrdma_dev && 115 if (ocrdma_drv && adapter->ocrdma_dev &&
116 ocrdma_drv->state_change_handler) 116 ocrdma_drv->state_change_handler)
@@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter)
126 } 126 }
127} 127}
128 128
129void _be_roce_dev_close(struct be_adapter *adapter) 129static void _be_roce_dev_close(struct be_adapter *adapter)
130{ 130{
131 if (ocrdma_drv && adapter->ocrdma_dev && 131 if (ocrdma_drv && adapter->ocrdma_dev &&
132 ocrdma_drv->state_change_handler) 132 ocrdma_drv->state_change_handler)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 276572998463..2cd1129e19af 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -29,7 +29,7 @@ enum be_interrupt_mode {
29 BE_INTERRUPT_MODE_MSI = 2, 29 BE_INTERRUPT_MODE_MSI = 2,
30}; 30};
31 31
32#define MAX_ROCE_MSIX_VECTORS 16 32#define MAX_MSIX_VECTORS 32
33struct be_dev_info { 33struct be_dev_info {
34 u8 __iomem *db; 34 u8 __iomem *db;
35 u64 unmapped_db; 35 u64 unmapped_db;
@@ -45,7 +45,7 @@ struct be_dev_info {
45 struct { 45 struct {
46 int num_vectors; 46 int num_vectors;
47 int start_vector; 47 int start_vector;
48 u32 vector_list[MAX_ROCE_MSIX_VECTORS]; 48 u32 vector_list[MAX_MSIX_VECTORS];
49 } msix; 49 } msix;
50}; 50};
51 51
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index cf579fb39bc5..4de8cfd149cf 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1030,8 +1030,8 @@ static int ethoc_probe(struct platform_device *pdev)
1030 } 1030 }
1031 1031
1032 /* Allow the platform setup code to pass in a MAC address. */ 1032 /* Allow the platform setup code to pass in a MAC address. */
1033 if (pdev->dev.platform_data) { 1033 if (dev_get_platdata(&pdev->dev)) {
1034 struct ethoc_platform_data *pdata = pdev->dev.platform_data; 1034 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1036 priv->phy_id = pdata->phy_id; 1036 priv->phy_id = pdata->phy_id;
1037 } else { 1037 } else {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 934e1ae279f0..212f44b3a773 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -778,10 +778,9 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
778{ 778{
779 int i; 779 int i;
780 780
781 priv->descs = dma_alloc_coherent(priv->dev, 781 priv->descs = dma_zalloc_coherent(priv->dev,
782 sizeof(struct ftgmac100_descs), 782 sizeof(struct ftgmac100_descs),
783 &priv->descs_dma_addr, 783 &priv->descs_dma_addr, GFP_KERNEL);
784 GFP_KERNEL | __GFP_ZERO);
785 if (!priv->descs) 784 if (!priv->descs)
786 return -ENOMEM; 785 return -ENOMEM;
787 786
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 4658f4cc1969..8be5b40c0a12 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,10 +732,10 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
732{ 732{
733 int i; 733 int i;
734 734
735 priv->descs = dma_alloc_coherent(priv->dev, 735 priv->descs = dma_zalloc_coherent(priv->dev,
736 sizeof(struct ftmac100_descs), 736 sizeof(struct ftmac100_descs),
737 &priv->descs_dma_addr, 737 &priv->descs_dma_addr,
738 GFP_KERNEL | __GFP_ZERO); 738 GFP_KERNEL);
739 if (!priv->descs) 739 if (!priv->descs)
740 return -ENOMEM; 740 return -ENOMEM;
741 741
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ae236009f1a8..0120217a16dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,9 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short tx_ring_size;
300 unsigned short rx_ring_size;
301
299 struct platform_device *pdev; 302 struct platform_device *pdev;
300 303
301 int opened; 304 int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c610a2716be4..f9aacf5d8523 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -69,7 +69,6 @@ static void set_multicast_list(struct net_device *ndev);
69#endif 69#endif
70 70
71#define DRIVER_NAME "fec" 71#define DRIVER_NAME "fec"
72#define FEC_NAPI_WEIGHT 64
73 72
74/* Pause frame feild and FIFO threshold */ 73/* Pause frame feild and FIFO threshold */
75#define FEC_ENET_FCE (1 << 5) 74#define FEC_ENET_FCE (1 << 5)
@@ -239,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
239 238
240static int mii_cnt; 239static int mii_cnt;
241 240
242static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) 241static inline
242struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
243{ 243{
244 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 244 struct bufdesc *new_bd = bdp + 1;
245 if (is_ex) 245 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
246 return (struct bufdesc *)(ex + 1); 246 struct bufdesc_ex *ex_base;
247 struct bufdesc *base;
248 int ring_size;
249
250 if (bdp >= fep->tx_bd_base) {
251 base = fep->tx_bd_base;
252 ring_size = fep->tx_ring_size;
253 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
254 } else {
255 base = fep->rx_bd_base;
256 ring_size = fep->rx_ring_size;
257 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
258 }
259
260 if (fep->bufdesc_ex)
261 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
262 ex_base : ex_new_bd);
247 else 263 else
248 return bdp + 1; 264 return (new_bd >= (base + ring_size)) ?
265 base : new_bd;
249} 266}
250 267
251static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) 268static inline
269struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
252{ 270{
253 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 271 struct bufdesc *new_bd = bdp - 1;
254 if (is_ex) 272 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
255 return (struct bufdesc *)(ex - 1); 273 struct bufdesc_ex *ex_base;
274 struct bufdesc *base;
275 int ring_size;
276
277 if (bdp >= fep->tx_bd_base) {
278 base = fep->tx_bd_base;
279 ring_size = fep->tx_ring_size;
280 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
281 } else {
282 base = fep->rx_bd_base;
283 ring_size = fep->rx_ring_size;
284 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
285 }
286
287 if (fep->bufdesc_ex)
288 return (struct bufdesc *)((ex_new_bd < ex_base) ?
289 (ex_new_bd + ring_size) : ex_new_bd);
256 else 290 else
257 return bdp - 1; 291 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
258} 292}
259 293
260static void *swap_buffer(void *bufaddr, int len) 294static void *swap_buffer(void *bufaddr, int len)
@@ -380,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
380 } 414 }
381 } 415 }
382 416
383 bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 417 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
384 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && 418 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
385 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { 419 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
386 fep->delay_work.trig_tx = true; 420 fep->delay_work.trig_tx = true;
@@ -389,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
389 } 423 }
390 424
391 /* If this was the last BD in the ring, start at the beginning again. */ 425 /* If this was the last BD in the ring, start at the beginning again. */
392 if (status & BD_ENET_TX_WRAP) 426 bdp = fec_enet_get_nextdesc(bdp, fep);
393 bdp = fep->tx_bd_base;
394 else
395 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
396 427
397 fep->cur_tx = bdp; 428 fep->cur_tx = bdp;
398 429
@@ -417,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
417 448
418 /* Initialize the receive buffer descriptors. */ 449 /* Initialize the receive buffer descriptors. */
419 bdp = fep->rx_bd_base; 450 bdp = fep->rx_bd_base;
420 for (i = 0; i < RX_RING_SIZE; i++) { 451 for (i = 0; i < fep->rx_ring_size; i++) {
421 452
422 /* Initialize the BD for every fragment in the page. */ 453 /* Initialize the BD for every fragment in the page. */
423 if (bdp->cbd_bufaddr) 454 if (bdp->cbd_bufaddr)
424 bdp->cbd_sc = BD_ENET_RX_EMPTY; 455 bdp->cbd_sc = BD_ENET_RX_EMPTY;
425 else 456 else
426 bdp->cbd_sc = 0; 457 bdp->cbd_sc = 0;
427 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 458 bdp = fec_enet_get_nextdesc(bdp, fep);
428 } 459 }
429 460
430 /* Set the last buffer to wrap */ 461 /* Set the last buffer to wrap */
431 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 462 bdp = fec_enet_get_prevdesc(bdp, fep);
432 bdp->cbd_sc |= BD_SC_WRAP; 463 bdp->cbd_sc |= BD_SC_WRAP;
433 464
434 fep->cur_rx = fep->rx_bd_base; 465 fep->cur_rx = fep->rx_bd_base;
@@ -436,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
436 /* ...and the same for transmit */ 467 /* ...and the same for transmit */
437 bdp = fep->tx_bd_base; 468 bdp = fep->tx_bd_base;
438 fep->cur_tx = bdp; 469 fep->cur_tx = bdp;
439 for (i = 0; i < TX_RING_SIZE; i++) { 470 for (i = 0; i < fep->tx_ring_size; i++) {
440 471
441 /* Initialize the BD for every fragment in the page. */ 472 /* Initialize the BD for every fragment in the page. */
442 bdp->cbd_sc = 0; 473 bdp->cbd_sc = 0;
@@ -445,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
445 fep->tx_skbuff[i] = NULL; 476 fep->tx_skbuff[i] = NULL;
446 } 477 }
447 bdp->cbd_bufaddr = 0; 478 bdp->cbd_bufaddr = 0;
448 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 479 bdp = fec_enet_get_nextdesc(bdp, fep);
449 } 480 }
450 481
451 /* Set the last buffer to wrap */ 482 /* Set the last buffer to wrap */
452 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 483 bdp = fec_enet_get_prevdesc(bdp, fep);
453 bdp->cbd_sc |= BD_SC_WRAP; 484 bdp->cbd_sc |= BD_SC_WRAP;
454 fep->dirty_tx = bdp; 485 fep->dirty_tx = bdp;
455} 486}
@@ -510,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
510 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 541 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
511 if (fep->bufdesc_ex) 542 if (fep->bufdesc_ex)
512 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) 543 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
513 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 544 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
514 else 545 else
515 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 546 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
516 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 547 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
517 548
518 549
519 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 550 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -727,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
727 bdp = fep->dirty_tx; 758 bdp = fep->dirty_tx;
728 759
729 /* get next bdp of dirty_tx */ 760 /* get next bdp of dirty_tx */
730 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 761 bdp = fec_enet_get_nextdesc(bdp, fep);
731 bdp = fep->tx_bd_base;
732 else
733 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
734 762
735 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 763 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
736 764
@@ -800,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
800 fep->dirty_tx = bdp; 828 fep->dirty_tx = bdp;
801 829
802 /* Update pointer to next buffer descriptor to be transmitted */ 830 /* Update pointer to next buffer descriptor to be transmitted */
803 if (status & BD_ENET_TX_WRAP) 831 bdp = fec_enet_get_nextdesc(bdp, fep);
804 bdp = fep->tx_bd_base;
805 else
806 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
807 832
808 /* Since we have freed up a buffer, the ring is no longer full 833 /* Since we have freed up a buffer, the ring is no longer full
809 */ 834 */
@@ -993,10 +1018,8 @@ rx_processing_done:
993 } 1018 }
994 1019
995 /* Update BD pointer to next entry */ 1020 /* Update BD pointer to next entry */
996 if (status & BD_ENET_RX_WRAP) 1021 bdp = fec_enet_get_nextdesc(bdp, fep);
997 bdp = fep->rx_bd_base; 1022
998 else
999 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1000 /* Doing this here will keep the FEC running while we process 1023 /* Doing this here will keep the FEC running while we process
1001 * incoming frames. On a heavily loaded network, we should be 1024 * incoming frames. On a heavily loaded network, we should be
1002 * able to keep up at the expense of system resources. 1025 * able to keep up at the expense of system resources.
@@ -1059,7 +1082,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1059static void fec_get_mac(struct net_device *ndev) 1082static void fec_get_mac(struct net_device *ndev)
1060{ 1083{
1061 struct fec_enet_private *fep = netdev_priv(ndev); 1084 struct fec_enet_private *fep = netdev_priv(ndev);
1062 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1085 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1063 unsigned char *iap, tmpaddr[ETH_ALEN]; 1086 unsigned char *iap, tmpaddr[ETH_ALEN];
1064 1087
1065 /* 1088 /*
@@ -1099,10 +1122,10 @@ static void fec_get_mac(struct net_device *ndev)
1099 * 4) FEC mac registers set by bootloader 1122 * 4) FEC mac registers set by bootloader
1100 */ 1123 */
1101 if (!is_valid_ether_addr(iap)) { 1124 if (!is_valid_ether_addr(iap)) {
1102 *((unsigned long *) &tmpaddr[0]) = 1125 *((__be32 *) &tmpaddr[0]) =
1103 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); 1126 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1104 *((unsigned short *) &tmpaddr[4]) = 1127 *((__be16 *) &tmpaddr[4]) =
1105 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1128 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1106 iap = &tmpaddr[0]; 1129 iap = &tmpaddr[0];
1107 } 1130 }
1108 1131
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1662 struct bufdesc *bdp; 1685 struct bufdesc *bdp;
1663 1686
1664 bdp = fep->rx_bd_base; 1687 bdp = fep->rx_bd_base;
1665 for (i = 0; i < RX_RING_SIZE; i++) { 1688 for (i = 0; i < fep->rx_ring_size; i++) {
1666 skb = fep->rx_skbuff[i]; 1689 skb = fep->rx_skbuff[i];
1667 1690
1668 if (bdp->cbd_bufaddr) 1691 if (bdp->cbd_bufaddr)
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1670 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1693 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1671 if (skb) 1694 if (skb)
1672 dev_kfree_skb(skb); 1695 dev_kfree_skb(skb);
1673 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1696 bdp = fec_enet_get_nextdesc(bdp, fep);
1674 } 1697 }
1675 1698
1676 bdp = fep->tx_bd_base; 1699 bdp = fep->tx_bd_base;
1677 for (i = 0; i < TX_RING_SIZE; i++) 1700 for (i = 0; i < fep->tx_ring_size; i++)
1678 kfree(fep->tx_bounce[i]); 1701 kfree(fep->tx_bounce[i]);
1679} 1702}
1680 1703
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1686 struct bufdesc *bdp; 1709 struct bufdesc *bdp;
1687 1710
1688 bdp = fep->rx_bd_base; 1711 bdp = fep->rx_bd_base;
1689 for (i = 0; i < RX_RING_SIZE; i++) { 1712 for (i = 0; i < fep->rx_ring_size; i++) {
1690 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1713 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1691 if (!skb) { 1714 if (!skb) {
1692 fec_enet_free_buffers(ndev); 1715 fec_enet_free_buffers(ndev);
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1703 ebdp->cbd_esc = BD_ENET_RX_INT; 1726 ebdp->cbd_esc = BD_ENET_RX_INT;
1704 } 1727 }
1705 1728
1706 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1729 bdp = fec_enet_get_nextdesc(bdp, fep);
1707 } 1730 }
1708 1731
1709 /* Set the last buffer to wrap. */ 1732 /* Set the last buffer to wrap. */
1710 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1733 bdp = fec_enet_get_prevdesc(bdp, fep);
1711 bdp->cbd_sc |= BD_SC_WRAP; 1734 bdp->cbd_sc |= BD_SC_WRAP;
1712 1735
1713 bdp = fep->tx_bd_base; 1736 bdp = fep->tx_bd_base;
1714 for (i = 0; i < TX_RING_SIZE; i++) { 1737 for (i = 0; i < fep->tx_ring_size; i++) {
1715 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1738 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1716 1739
1717 bdp->cbd_sc = 0; 1740 bdp->cbd_sc = 0;
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1722 ebdp->cbd_esc = BD_ENET_TX_INT; 1745 ebdp->cbd_esc = BD_ENET_TX_INT;
1723 } 1746 }
1724 1747
1725 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1748 bdp = fec_enet_get_nextdesc(bdp, fep);
1726 } 1749 }
1727 1750
1728 /* Set the last buffer to wrap. */ 1751 /* Set the last buffer to wrap. */
1729 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1752 bdp = fec_enet_get_prevdesc(bdp, fep);
1730 bdp->cbd_sc |= BD_SC_WRAP; 1753 bdp->cbd_sc |= BD_SC_WRAP;
1731 1754
1732 return 0; 1755 return 0;
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
1966 /* Get the Ethernet address */ 1989 /* Get the Ethernet address */
1967 fec_get_mac(ndev); 1990 fec_get_mac(ndev);
1968 1991
1992 /* init the tx & rx ring size */
1993 fep->tx_ring_size = TX_RING_SIZE;
1994 fep->rx_ring_size = RX_RING_SIZE;
1995
1969 /* Set receive and transmit descriptor base. */ 1996 /* Set receive and transmit descriptor base. */
1970 fep->rx_bd_base = cbd_base; 1997 fep->rx_bd_base = cbd_base;
1971 if (fep->bufdesc_ex) 1998 if (fep->bufdesc_ex)
1972 fep->tx_bd_base = (struct bufdesc *) 1999 fep->tx_bd_base = (struct bufdesc *)
1973 (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); 2000 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
1974 else 2001 else
1975 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2002 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
1976 2003
1977 /* The FEC Ethernet specific entries in the device structure */ 2004 /* The FEC Ethernet specific entries in the device structure */
1978 ndev->watchdog_timeo = TX_TIMEOUT; 2005 ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1980,7 +2007,7 @@ static int fec_enet_init(struct net_device *ndev)
1980 ndev->ethtool_ops = &fec_enet_ethtool_ops; 2007 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1981 2008
1982 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 2009 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1983 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 2010 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
1984 2011
1985 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { 2012 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
1986 /* enable hw VLAN support */ 2013 /* enable hw VLAN support */
@@ -2055,10 +2082,6 @@ fec_probe(struct platform_device *pdev)
2055 if (of_id) 2082 if (of_id)
2056 pdev->id_entry = of_id->data; 2083 pdev->id_entry = of_id->data;
2057 2084
2058 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2059 if (!r)
2060 return -ENXIO;
2061
2062 /* Init network device */ 2085 /* Init network device */
2063 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 2086 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2064 if (!ndev) 2087 if (!ndev)
@@ -2076,6 +2099,7 @@ fec_probe(struct platform_device *pdev)
2076 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 2099 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2077#endif 2100#endif
2078 2101
2102 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2079 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 2103 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2080 if (IS_ERR(fep->hwp)) { 2104 if (IS_ERR(fep->hwp)) {
2081 ret = PTR_ERR(fep->hwp); 2105 ret = PTR_ERR(fep->hwp);
@@ -2091,7 +2115,7 @@ fec_probe(struct platform_device *pdev)
2091 2115
2092 ret = of_get_phy_mode(pdev->dev.of_node); 2116 ret = of_get_phy_mode(pdev->dev.of_node);
2093 if (ret < 0) { 2117 if (ret < 0) {
2094 pdata = pdev->dev.platform_data; 2118 pdata = dev_get_platdata(&pdev->dev);
2095 if (pdata) 2119 if (pdata)
2096 fep->phy_interface = pdata->phy; 2120 fep->phy_interface = pdata->phy;
2097 else 2121 else
@@ -2125,10 +2149,25 @@ fec_probe(struct platform_device *pdev)
2125 fep->bufdesc_ex = 0; 2149 fep->bufdesc_ex = 0;
2126 } 2150 }
2127 2151
2128 clk_prepare_enable(fep->clk_ahb); 2152 ret = clk_prepare_enable(fep->clk_ahb);
2129 clk_prepare_enable(fep->clk_ipg); 2153 if (ret)
2130 clk_prepare_enable(fep->clk_enet_out); 2154 goto failed_clk;
2131 clk_prepare_enable(fep->clk_ptp); 2155
2156 ret = clk_prepare_enable(fep->clk_ipg);
2157 if (ret)
2158 goto failed_clk_ipg;
2159
2160 if (fep->clk_enet_out) {
2161 ret = clk_prepare_enable(fep->clk_enet_out);
2162 if (ret)
2163 goto failed_clk_enet_out;
2164 }
2165
2166 if (fep->clk_ptp) {
2167 ret = clk_prepare_enable(fep->clk_ptp);
2168 if (ret)
2169 goto failed_clk_ptp;
2170 }
2132 2171
2133 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 2172 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2134 if (!IS_ERR(fep->reg_phy)) { 2173 if (!IS_ERR(fep->reg_phy)) {
@@ -2159,14 +2198,10 @@ fec_probe(struct platform_device *pdev)
2159 ret = irq; 2198 ret = irq;
2160 goto failed_irq; 2199 goto failed_irq;
2161 } 2200 }
2162 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2163 if (ret) { 2202 IRQF_DISABLED, pdev->name, ndev);
2164 while (--i >= 0) { 2203 if (ret)
2165 irq = platform_get_irq(pdev, i);
2166 free_irq(irq, ndev);
2167 }
2168 goto failed_irq; 2204 goto failed_irq;
2169 }
2170 } 2205 }
2171 2206
2172 ret = fec_enet_mii_init(pdev); 2207 ret = fec_enet_mii_init(pdev);
@@ -2190,19 +2225,19 @@ failed_register:
2190 fec_enet_mii_remove(fep); 2225 fec_enet_mii_remove(fep);
2191failed_mii_init: 2226failed_mii_init:
2192failed_irq: 2227failed_irq:
2193 for (i = 0; i < FEC_IRQ_NUM; i++) {
2194 irq = platform_get_irq(pdev, i);
2195 if (irq > 0)
2196 free_irq(irq, ndev);
2197 }
2198failed_init: 2228failed_init:
2199 if (fep->reg_phy) 2229 if (fep->reg_phy)
2200 regulator_disable(fep->reg_phy); 2230 regulator_disable(fep->reg_phy);
2201failed_regulator: 2231failed_regulator:
2202 clk_disable_unprepare(fep->clk_ahb); 2232 if (fep->clk_ptp)
2233 clk_disable_unprepare(fep->clk_ptp);
2234failed_clk_ptp:
2235 if (fep->clk_enet_out)
2236 clk_disable_unprepare(fep->clk_enet_out);
2237failed_clk_enet_out:
2203 clk_disable_unprepare(fep->clk_ipg); 2238 clk_disable_unprepare(fep->clk_ipg);
2204 clk_disable_unprepare(fep->clk_enet_out); 2239failed_clk_ipg:
2205 clk_disable_unprepare(fep->clk_ptp); 2240 clk_disable_unprepare(fep->clk_ahb);
2206failed_clk: 2241failed_clk:
2207failed_ioremap: 2242failed_ioremap:
2208 free_netdev(ndev); 2243 free_netdev(ndev);
@@ -2215,25 +2250,21 @@ fec_drv_remove(struct platform_device *pdev)
2215{ 2250{
2216 struct net_device *ndev = platform_get_drvdata(pdev); 2251 struct net_device *ndev = platform_get_drvdata(pdev);
2217 struct fec_enet_private *fep = netdev_priv(ndev); 2252 struct fec_enet_private *fep = netdev_priv(ndev);
2218 int i;
2219 2253
2220 cancel_delayed_work_sync(&(fep->delay_work.delay_work)); 2254 cancel_delayed_work_sync(&(fep->delay_work.delay_work));
2221 unregister_netdev(ndev); 2255 unregister_netdev(ndev);
2222 fec_enet_mii_remove(fep); 2256 fec_enet_mii_remove(fep);
2223 del_timer_sync(&fep->time_keep); 2257 del_timer_sync(&fep->time_keep);
2224 for (i = 0; i < FEC_IRQ_NUM; i++) {
2225 int irq = platform_get_irq(pdev, i);
2226 if (irq > 0)
2227 free_irq(irq, ndev);
2228 }
2229 if (fep->reg_phy) 2258 if (fep->reg_phy)
2230 regulator_disable(fep->reg_phy); 2259 regulator_disable(fep->reg_phy);
2231 clk_disable_unprepare(fep->clk_ptp); 2260 if (fep->clk_ptp)
2261 clk_disable_unprepare(fep->clk_ptp);
2232 if (fep->ptp_clock) 2262 if (fep->ptp_clock)
2233 ptp_clock_unregister(fep->ptp_clock); 2263 ptp_clock_unregister(fep->ptp_clock);
2234 clk_disable_unprepare(fep->clk_enet_out); 2264 if (fep->clk_enet_out)
2235 clk_disable_unprepare(fep->clk_ahb); 2265 clk_disable_unprepare(fep->clk_enet_out);
2236 clk_disable_unprepare(fep->clk_ipg); 2266 clk_disable_unprepare(fep->clk_ipg);
2267 clk_disable_unprepare(fep->clk_ahb);
2237 free_netdev(ndev); 2268 free_netdev(ndev);
2238 2269
2239 return 0; 2270 return 0;
@@ -2250,9 +2281,12 @@ fec_suspend(struct device *dev)
2250 fec_stop(ndev); 2281 fec_stop(ndev);
2251 netif_device_detach(ndev); 2282 netif_device_detach(ndev);
2252 } 2283 }
2253 clk_disable_unprepare(fep->clk_enet_out); 2284 if (fep->clk_ptp)
2254 clk_disable_unprepare(fep->clk_ahb); 2285 clk_disable_unprepare(fep->clk_ptp);
2286 if (fep->clk_enet_out)
2287 clk_disable_unprepare(fep->clk_enet_out);
2255 clk_disable_unprepare(fep->clk_ipg); 2288 clk_disable_unprepare(fep->clk_ipg);
2289 clk_disable_unprepare(fep->clk_ahb);
2256 2290
2257 if (fep->reg_phy) 2291 if (fep->reg_phy)
2258 regulator_disable(fep->reg_phy); 2292 regulator_disable(fep->reg_phy);
@@ -2273,15 +2307,44 @@ fec_resume(struct device *dev)
2273 return ret; 2307 return ret;
2274 } 2308 }
2275 2309
2276 clk_prepare_enable(fep->clk_enet_out); 2310 ret = clk_prepare_enable(fep->clk_ahb);
2277 clk_prepare_enable(fep->clk_ahb); 2311 if (ret)
2278 clk_prepare_enable(fep->clk_ipg); 2312 goto failed_clk_ahb;
2313
2314 ret = clk_prepare_enable(fep->clk_ipg);
2315 if (ret)
2316 goto failed_clk_ipg;
2317
2318 if (fep->clk_enet_out) {
2319 ret = clk_prepare_enable(fep->clk_enet_out);
2320 if (ret)
2321 goto failed_clk_enet_out;
2322 }
2323
2324 if (fep->clk_ptp) {
2325 ret = clk_prepare_enable(fep->clk_ptp);
2326 if (ret)
2327 goto failed_clk_ptp;
2328 }
2329
2279 if (netif_running(ndev)) { 2330 if (netif_running(ndev)) {
2280 fec_restart(ndev, fep->full_duplex); 2331 fec_restart(ndev, fep->full_duplex);
2281 netif_device_attach(ndev); 2332 netif_device_attach(ndev);
2282 } 2333 }
2283 2334
2284 return 0; 2335 return 0;
2336
2337failed_clk_ptp:
2338 if (fep->clk_enet_out)
2339 clk_disable_unprepare(fep->clk_enet_out);
2340failed_clk_enet_out:
2341 clk_disable_unprepare(fep->clk_ipg);
2342failed_clk_ipg:
2343 clk_disable_unprepare(fep->clk_ahb);
2344failed_clk_ahb:
2345 if (fep->reg_phy)
2346 regulator_disable(fep->reg_phy);
2347 return ret;
2285} 2348}
2286#endif /* CONFIG_PM_SLEEP */ 2349#endif /* CONFIG_PM_SLEEP */
2287 2350
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 360a578c2bb7..e0528900db02 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
123 123
124static int mpc52xx_fec_mdio_remove(struct platform_device *of) 124static int mpc52xx_fec_mdio_remove(struct platform_device *of)
125{ 125{
126 struct device *dev = &of->dev; 126 struct mii_bus *bus = platform_get_drvdata(of);
127 struct mii_bus *bus = dev_get_drvdata(dev);
128 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 127 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
129 128
130 mdiobus_unregister(bus); 129 mdiobus_unregister(bus);
131 dev_set_drvdata(dev, NULL);
132 iounmap(priv->regs); 130 iounmap(priv->regs);
133 kfree(priv); 131 kfree(priv);
134 mdiobus_free(bus); 132 mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8de53a14a6f4..6b60582ce8cf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -583,7 +583,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
583 struct sk_buff *skb) 583 struct sk_buff *skb)
584{ 584{
585 struct sk_buff *new_skb; 585 struct sk_buff *new_skb;
586 struct fs_enet_private *fep = netdev_priv(dev);
587 586
588 /* Alloc new skb */ 587 /* Alloc new skb */
589 new_skb = netdev_alloc_skb(dev, skb->len + 4); 588 new_skb = netdev_alloc_skb(dev, skb->len + 4);
@@ -1000,6 +999,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
1000 struct fs_enet_private *fep; 999 struct fs_enet_private *fep;
1001 struct fs_platform_info *fpi; 1000 struct fs_platform_info *fpi;
1002 const u32 *data; 1001 const u32 *data;
1002 struct clk *clk;
1003 int err;
1003 const u8 *mac_addr; 1004 const u8 *mac_addr;
1004 const char *phy_connection_type; 1005 const char *phy_connection_type;
1005 int privsize, len, ret = -ENODEV; 1006 int privsize, len, ret = -ENODEV;
@@ -1037,6 +1038,20 @@ static int fs_enet_probe(struct platform_device *ofdev)
1037 fpi->use_rmii = 1; 1038 fpi->use_rmii = 1;
1038 } 1039 }
1039 1040
1041 /* make clock lookup non-fatal (the driver is shared among platforms),
1042 * but require enable to succeed when a clock was specified/found,
1043 * keep a reference to the clock upon successful acquisition
1044 */
1045 clk = devm_clk_get(&ofdev->dev, "per");
1046 if (!IS_ERR(clk)) {
1047 err = clk_prepare_enable(clk);
1048 if (err) {
1049 ret = err;
1050 goto out_free_fpi;
1051 }
1052 fpi->clk_per = clk;
1053 }
1054
1040 privsize = sizeof(*fep) + 1055 privsize = sizeof(*fep) +
1041 sizeof(struct sk_buff **) * 1056 sizeof(struct sk_buff **) *
1042 (fpi->rx_ring + fpi->tx_ring); 1057 (fpi->rx_ring + fpi->tx_ring);
@@ -1108,6 +1123,8 @@ out_free_dev:
1108 free_netdev(ndev); 1123 free_netdev(ndev);
1109out_put: 1124out_put:
1110 of_node_put(fpi->phy_node); 1125 of_node_put(fpi->phy_node);
1126 if (fpi->clk_per)
1127 clk_disable_unprepare(fpi->clk_per);
1111out_free_fpi: 1128out_free_fpi:
1112 kfree(fpi); 1129 kfree(fpi);
1113 return ret; 1130 return ret;
@@ -1124,6 +1141,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
1124 fep->ops->cleanup_data(ndev); 1141 fep->ops->cleanup_data(ndev);
1125 dev_set_drvdata(fep->dev, NULL); 1142 dev_set_drvdata(fep->dev, NULL);
1126 of_node_put(fep->fpi->phy_node); 1143 of_node_put(fep->fpi->phy_node);
1144 if (fep->fpi->clk_per)
1145 clk_disable_unprepare(fep->fpi->clk_per);
1127 free_netdev(ndev); 1146 free_netdev(ndev);
1128 return 0; 1147 return 0;
1129} 1148}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c93a05654b46..c4f65067cf7c 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
409 priv->regs = priv->map + data->mii_offset; 409 priv->regs = priv->map + data->mii_offset;
410 410
411 new_bus->parent = &pdev->dev; 411 new_bus->parent = &pdev->dev;
412 dev_set_drvdata(&pdev->dev, new_bus); 412 platform_set_drvdata(pdev, new_bus);
413 413
414 if (data->get_tbipa) { 414 if (data->get_tbipa) {
415 for_each_child_of_node(np, tbi) { 415 for_each_child_of_node(np, tbi) {
@@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
468 468
469 mdiobus_unregister(bus); 469 mdiobus_unregister(bus);
470 470
471 dev_set_drvdata(device, NULL);
472
473 iounmap(priv->map); 471 iounmap(priv->map);
474 mdiobus_free(bus); 472 mdiobus_free(bus);
475 473
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 8d2db7b808b7..c4eaadeb572f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np,
593 return -EINVAL; 593 return -EINVAL;
594 } 594 }
595 595
596 grp->grp_id = priv->num_grps;
597 grp->priv = priv; 596 grp->priv = priv;
598 spin_lock_init(&grp->grplock); 597 spin_lock_init(&grp->grplock);
599 if (priv->mode == MQ_MG_MODE) { 598 if (priv->mode == MQ_MG_MODE) {
@@ -1017,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev)
1017 /* We need to delay at least 3 TX clocks */ 1016 /* We need to delay at least 3 TX clocks */
1018 udelay(2); 1017 udelay(2);
1019 1018
1020 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 1019 tempval = 0;
1020 if (!priv->pause_aneg_en && priv->tx_pause_en)
1021 tempval |= MACCFG1_TX_FLOW;
1022 if (!priv->pause_aneg_en && priv->rx_pause_en)
1023 tempval |= MACCFG1_RX_FLOW;
1024 /* the soft reset bit is not self-resetting, so we need to
1025 * clear it before resuming normal operation
1026 */
1021 gfar_write(&regs->maccfg1, tempval); 1027 gfar_write(&regs->maccfg1, tempval);
1022 1028
1023 /* Initialize MACCFG2. */ 1029 /* Initialize MACCFG2. */
@@ -1461,7 +1467,7 @@ static int init_phy(struct net_device *dev)
1461 struct gfar_private *priv = netdev_priv(dev); 1467 struct gfar_private *priv = netdev_priv(dev);
1462 uint gigabit_support = 1468 uint gigabit_support =
1463 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1469 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1464 SUPPORTED_1000baseT_Full : 0; 1470 GFAR_SUPPORTED_GBIT : 0;
1465 phy_interface_t interface; 1471 phy_interface_t interface;
1466 1472
1467 priv->oldlink = 0; 1473 priv->oldlink = 0;
@@ -2052,6 +2058,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2052 return skip_txbd(bdp, 1, base, ring_size); 2058 return skip_txbd(bdp, 1, base, ring_size);
2053} 2059}
2054 2060
2061/* eTSEC12: csum generation not supported for some fcb offsets */
2062static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2063 unsigned long fcb_addr)
2064{
2065 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2066 (fcb_addr % 0x20) > 0x18);
2067}
2068
2069/* eTSEC76: csum generation for frames larger than 2500 may
2070 * cause excess delays before start of transmission
2071 */
2072static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2073 unsigned int len)
2074{
2075 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2076 (len > 2500));
2077}
2078
2055/* This is called by the kernel when a frame is ready for transmission. 2079/* This is called by the kernel when a frame is ready for transmission.
2056 * It is pointed to by the dev->hard_start_xmit function pointer 2080 * It is pointed to by the dev->hard_start_xmit function pointer
2057 */ 2081 */
@@ -2064,23 +2088,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 struct txfcb *fcb = NULL; 2088 struct txfcb *fcb = NULL;
2065 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2089 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2066 u32 lstatus; 2090 u32 lstatus;
2067 int i, rq = 0, do_tstamp = 0; 2091 int i, rq = 0;
2092 int do_tstamp, do_csum, do_vlan;
2068 u32 bufaddr; 2093 u32 bufaddr;
2069 unsigned long flags; 2094 unsigned long flags;
2070 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; 2095 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2071
2072 /* TOE=1 frames larger than 2500 bytes may see excess delays
2073 * before start of transmission.
2074 */
2075 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2076 skb->ip_summed == CHECKSUM_PARTIAL &&
2077 skb->len > 2500)) {
2078 int ret;
2079
2080 ret = skb_checksum_help(skb);
2081 if (ret)
2082 return ret;
2083 }
2084 2096
2085 rq = skb->queue_mapping; 2097 rq = skb->queue_mapping;
2086 tx_queue = priv->tx_queue[rq]; 2098 tx_queue = priv->tx_queue[rq];
@@ -2088,21 +2100,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2088 base = tx_queue->tx_bd_base; 2100 base = tx_queue->tx_bd_base;
2089 regs = tx_queue->grp->regs; 2101 regs = tx_queue->grp->regs;
2090 2102
2103 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2104 do_vlan = vlan_tx_tag_present(skb);
2105 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2106 priv->hwts_tx_en;
2107
2108 if (do_csum || do_vlan)
2109 fcb_len = GMAC_FCB_LEN;
2110
2091 /* check if time stamp should be generated */ 2111 /* check if time stamp should be generated */
2092 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2112 if (unlikely(do_tstamp))
2093 priv->hwts_tx_en)) { 2113 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2094 do_tstamp = 1;
2095 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2096 }
2097 2114
2098 /* make space for additional header when fcb is needed */ 2115 /* make space for additional header when fcb is needed */
2099 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2116 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2100 vlan_tx_tag_present(skb) ||
2101 unlikely(do_tstamp)) &&
2102 (skb_headroom(skb) < fcb_length)) {
2103 struct sk_buff *skb_new; 2117 struct sk_buff *skb_new;
2104 2118
2105 skb_new = skb_realloc_headroom(skb, fcb_length); 2119 skb_new = skb_realloc_headroom(skb, fcb_len);
2106 if (!skb_new) { 2120 if (!skb_new) {
2107 dev->stats.tx_errors++; 2121 dev->stats.tx_errors++;
2108 kfree_skb(skb); 2122 kfree_skb(skb);
@@ -2133,7 +2147,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2133 } 2147 }
2134 2148
2135 /* Update transmit stats */ 2149 /* Update transmit stats */
2136 tx_queue->stats.tx_bytes += skb->len; 2150 bytes_sent = skb->len;
2151 tx_queue->stats.tx_bytes += bytes_sent;
2152 /* keep Tx bytes on wire for BQL accounting */
2153 GFAR_CB(skb)->bytes_sent = bytes_sent;
2137 tx_queue->stats.tx_packets++; 2154 tx_queue->stats.tx_packets++;
2138 2155
2139 txbdp = txbdp_start = tx_queue->cur_tx; 2156 txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2153,12 +2170,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2153 } else { 2170 } else {
2154 /* Place the fragment addresses and lengths into the TxBDs */ 2171 /* Place the fragment addresses and lengths into the TxBDs */
2155 for (i = 0; i < nr_frags; i++) { 2172 for (i = 0; i < nr_frags; i++) {
2173 unsigned int frag_len;
2156 /* Point at the next BD, wrapping as needed */ 2174 /* Point at the next BD, wrapping as needed */
2157 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2175 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2158 2176
2159 length = skb_shinfo(skb)->frags[i].size; 2177 frag_len = skb_shinfo(skb)->frags[i].size;
2160 2178
2161 lstatus = txbdp->lstatus | length | 2179 lstatus = txbdp->lstatus | frag_len |
2162 BD_LFLAG(TXBD_READY); 2180 BD_LFLAG(TXBD_READY);
2163 2181
2164 /* Handle the last BD specially */ 2182 /* Handle the last BD specially */
@@ -2168,7 +2186,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2168 bufaddr = skb_frag_dma_map(priv->dev, 2186 bufaddr = skb_frag_dma_map(priv->dev,
2169 &skb_shinfo(skb)->frags[i], 2187 &skb_shinfo(skb)->frags[i],
2170 0, 2188 0,
2171 length, 2189 frag_len,
2172 DMA_TO_DEVICE); 2190 DMA_TO_DEVICE);
2173 2191
2174 /* set the TxBD length and buffer pointer */ 2192 /* set the TxBD length and buffer pointer */
@@ -2185,36 +2203,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2185 memset(skb->data, 0, GMAC_TXPAL_LEN); 2203 memset(skb->data, 0, GMAC_TXPAL_LEN);
2186 } 2204 }
2187 2205
2188 /* Set up checksumming */ 2206 /* Add TxFCB if required */
2189 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2207 if (fcb_len) {
2190 fcb = gfar_add_fcb(skb); 2208 fcb = gfar_add_fcb(skb);
2191 /* as specified by errata */ 2209 lstatus |= BD_LFLAG(TXBD_TOE);
2192 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) && 2210 }
2193 ((unsigned long)fcb % 0x20) > 0x18)) { 2211
2212 /* Set up checksumming */
2213 if (do_csum) {
2214 gfar_tx_checksum(skb, fcb, fcb_len);
2215
2216 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2217 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2194 __skb_pull(skb, GMAC_FCB_LEN); 2218 __skb_pull(skb, GMAC_FCB_LEN);
2195 skb_checksum_help(skb); 2219 skb_checksum_help(skb);
2196 } else { 2220 if (do_vlan || do_tstamp) {
2197 lstatus |= BD_LFLAG(TXBD_TOE); 2221 /* put back a new fcb for vlan/tstamp TOE */
2198 gfar_tx_checksum(skb, fcb, fcb_length); 2222 fcb = gfar_add_fcb(skb);
2223 } else {
2224 /* Tx TOE not used */
2225 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2226 fcb = NULL;
2227 }
2199 } 2228 }
2200 } 2229 }
2201 2230
2202 if (vlan_tx_tag_present(skb)) { 2231 if (do_vlan)
2203 if (unlikely(NULL == fcb)) {
2204 fcb = gfar_add_fcb(skb);
2205 lstatus |= BD_LFLAG(TXBD_TOE);
2206 }
2207
2208 gfar_tx_vlan(skb, fcb); 2232 gfar_tx_vlan(skb, fcb);
2209 }
2210 2233
2211 /* Setup tx hardware time stamping if requested */ 2234 /* Setup tx hardware time stamping if requested */
2212 if (unlikely(do_tstamp)) { 2235 if (unlikely(do_tstamp)) {
2213 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2236 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2214 if (fcb == NULL)
2215 fcb = gfar_add_fcb(skb);
2216 fcb->ptp = 1; 2237 fcb->ptp = 1;
2217 lstatus |= BD_LFLAG(TXBD_TOE);
2218 } 2238 }
2219 2239
2220 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, 2240 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
@@ -2226,15 +2246,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2226 * the full frame length. 2246 * the full frame length.
2227 */ 2247 */
2228 if (unlikely(do_tstamp)) { 2248 if (unlikely(do_tstamp)) {
2229 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2249 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2230 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2250 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2231 (skb_headlen(skb) - fcb_length); 2251 (skb_headlen(skb) - fcb_len);
2232 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2252 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2233 } else { 2253 } else {
2234 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2254 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2235 } 2255 }
2236 2256
2237 netdev_tx_sent_queue(txq, skb->len); 2257 netdev_tx_sent_queue(txq, bytes_sent);
2238 2258
2239 /* We can work in parallel with gfar_clean_tx_ring(), except 2259 /* We can work in parallel with gfar_clean_tx_ring(), except
2240 * when modifying num_txbdfree. Note that we didn't grab the lock 2260 * when modifying num_txbdfree. Note that we didn't grab the lock
@@ -2554,7 +2574,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2554 bdp = next_txbd(bdp, base, tx_ring_size); 2574 bdp = next_txbd(bdp, base, tx_ring_size);
2555 } 2575 }
2556 2576
2557 bytes_sent += skb->len; 2577 bytes_sent += GFAR_CB(skb)->bytes_sent;
2558 2578
2559 dev_kfree_skb_any(skb); 2579 dev_kfree_skb_any(skb);
2560 2580
@@ -3014,6 +3034,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3014 return IRQ_HANDLED; 3034 return IRQ_HANDLED;
3015} 3035}
3016 3036
3037static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3038{
3039 struct phy_device *phydev = priv->phydev;
3040 u32 val = 0;
3041
3042 if (!phydev->duplex)
3043 return val;
3044
3045 if (!priv->pause_aneg_en) {
3046 if (priv->tx_pause_en)
3047 val |= MACCFG1_TX_FLOW;
3048 if (priv->rx_pause_en)
3049 val |= MACCFG1_RX_FLOW;
3050 } else {
3051 u16 lcl_adv, rmt_adv;
3052 u8 flowctrl;
3053 /* get link partner capabilities */
3054 rmt_adv = 0;
3055 if (phydev->pause)
3056 rmt_adv = LPA_PAUSE_CAP;
3057 if (phydev->asym_pause)
3058 rmt_adv |= LPA_PAUSE_ASYM;
3059
3060 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3061
3062 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3063 if (flowctrl & FLOW_CTRL_TX)
3064 val |= MACCFG1_TX_FLOW;
3065 if (flowctrl & FLOW_CTRL_RX)
3066 val |= MACCFG1_RX_FLOW;
3067 }
3068
3069 return val;
3070}
3071
3017/* Called every time the controller might need to be made 3072/* Called every time the controller might need to be made
3018 * aware of new link state. The PHY code conveys this 3073 * aware of new link state. The PHY code conveys this
3019 * information through variables in the phydev structure, and this 3074 * information through variables in the phydev structure, and this
@@ -3032,6 +3087,7 @@ static void adjust_link(struct net_device *dev)
3032 lock_tx_qs(priv); 3087 lock_tx_qs(priv);
3033 3088
3034 if (phydev->link) { 3089 if (phydev->link) {
3090 u32 tempval1 = gfar_read(&regs->maccfg1);
3035 u32 tempval = gfar_read(&regs->maccfg2); 3091 u32 tempval = gfar_read(&regs->maccfg2);
3036 u32 ecntrl = gfar_read(&regs->ecntrl); 3092 u32 ecntrl = gfar_read(&regs->ecntrl);
3037 3093
@@ -3080,6 +3136,10 @@ static void adjust_link(struct net_device *dev)
3080 priv->oldspeed = phydev->speed; 3136 priv->oldspeed = phydev->speed;
3081 } 3137 }
3082 3138
3139 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3140 tempval1 |= gfar_get_flowctrl_cfg(priv);
3141
3142 gfar_write(&regs->maccfg1, tempval1);
3083 gfar_write(&regs->maccfg2, tempval); 3143 gfar_write(&regs->maccfg2, tempval);
3084 gfar_write(&regs->ecntrl, ecntrl); 3144 gfar_write(&regs->ecntrl, ecntrl);
3085 3145
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04b552cd419d..04112b98ff5d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -146,6 +146,10 @@ extern const char gfar_driver_version[];
146 | SUPPORTED_Autoneg \ 146 | SUPPORTED_Autoneg \
147 | SUPPORTED_MII) 147 | SUPPORTED_MII)
148 148
149#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
150 | SUPPORTED_Pause \
151 | SUPPORTED_Asym_Pause)
152
149/* TBI register addresses */ 153/* TBI register addresses */
150#define MII_TBICON 0x11 154#define MII_TBICON 0x11
151 155
@@ -571,7 +575,7 @@ struct rxfcb {
571}; 575};
572 576
573struct gianfar_skb_cb { 577struct gianfar_skb_cb {
574 int alignamount; 578 unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
575}; 579};
576 580
577#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) 581#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
@@ -1009,7 +1013,6 @@ struct gfar_irqinfo {
1009 * @napi: the napi poll function 1013 * @napi: the napi poll function
1010 * @priv: back pointer to the priv structure 1014 * @priv: back pointer to the priv structure
1011 * @regs: the ioremapped register space for this group 1015 * @regs: the ioremapped register space for this group
1012 * @grp_id: group id for this group
1013 * @irqinfo: TX/RX/ER irq data for this group 1016 * @irqinfo: TX/RX/ER irq data for this group
1014 */ 1017 */
1015 1018
@@ -1018,11 +1021,10 @@ struct gfar_priv_grp {
1018 struct napi_struct napi; 1021 struct napi_struct napi;
1019 struct gfar_private *priv; 1022 struct gfar_private *priv;
1020 struct gfar __iomem *regs; 1023 struct gfar __iomem *regs;
1021 unsigned int grp_id; 1024 unsigned int rstat;
1022 unsigned long num_rx_queues; 1025 unsigned long num_rx_queues;
1023 unsigned long rx_bit_map; 1026 unsigned long rx_bit_map;
1024 /* cacheline 3 */ 1027 /* cacheline 3 */
1025 unsigned int rstat;
1026 unsigned int tstat; 1028 unsigned int tstat;
1027 unsigned long num_tx_queues; 1029 unsigned long num_tx_queues;
1028 unsigned long tx_bit_map; 1030 unsigned long tx_bit_map;
@@ -1102,7 +1104,11 @@ struct gfar_private {
1102 /* Wake-on-LAN enabled */ 1104 /* Wake-on-LAN enabled */
1103 wol_en:1, 1105 wol_en:1,
1104 /* Enable priorty based Tx scheduling in Hw */ 1106 /* Enable priorty based Tx scheduling in Hw */
1105 prio_sched_en:1; 1107 prio_sched_en:1,
1108 /* Flow control flags */
1109 pause_aneg_en:1,
1110 tx_pause_en:1,
1111 rx_pause_en:1;
1106 1112
1107 /* The total tx and rx ring size for the enabled queues */ 1113 /* The total tx and rx ring size for the enabled queues */
1108 unsigned int total_tx_ring_size; 1114 unsigned int total_tx_ring_size;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 21cd88124ca9..d3d7ede27ef1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev,
535 return err; 535 return err;
536} 536}
537 537
538static void gfar_gpauseparam(struct net_device *dev,
539 struct ethtool_pauseparam *epause)
540{
541 struct gfar_private *priv = netdev_priv(dev);
542
543 epause->autoneg = !!priv->pause_aneg_en;
544 epause->rx_pause = !!priv->rx_pause_en;
545 epause->tx_pause = !!priv->tx_pause_en;
546}
547
548static int gfar_spauseparam(struct net_device *dev,
549 struct ethtool_pauseparam *epause)
550{
551 struct gfar_private *priv = netdev_priv(dev);
552 struct phy_device *phydev = priv->phydev;
553 struct gfar __iomem *regs = priv->gfargrp[0].regs;
554 u32 oldadv, newadv;
555
556 if (!(phydev->supported & SUPPORTED_Pause) ||
557 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
558 (epause->rx_pause != epause->tx_pause)))
559 return -EINVAL;
560
561 priv->rx_pause_en = priv->tx_pause_en = 0;
562 if (epause->rx_pause) {
563 priv->rx_pause_en = 1;
564
565 if (epause->tx_pause) {
566 priv->tx_pause_en = 1;
567 /* FLOW_CTRL_RX & TX */
568 newadv = ADVERTISED_Pause;
569 } else /* FLOW_CTLR_RX */
570 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
571 } else if (epause->tx_pause) {
572 priv->tx_pause_en = 1;
573 /* FLOW_CTLR_TX */
574 newadv = ADVERTISED_Asym_Pause;
575 } else
576 newadv = 0;
577
578 if (epause->autoneg)
579 priv->pause_aneg_en = 1;
580 else
581 priv->pause_aneg_en = 0;
582
583 oldadv = phydev->advertising &
584 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
585 if (oldadv != newadv) {
586 phydev->advertising &=
587 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
588 phydev->advertising |= newadv;
589 if (phydev->autoneg)
590 /* inform link partner of our
591 * new flow ctrl settings
592 */
593 return phy_start_aneg(phydev);
594
595 if (!epause->autoneg) {
596 u32 tempval;
597 tempval = gfar_read(&regs->maccfg1);
598 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
599 if (priv->tx_pause_en)
600 tempval |= MACCFG1_TX_FLOW;
601 if (priv->rx_pause_en)
602 tempval |= MACCFG1_RX_FLOW;
603 gfar_write(&regs->maccfg1, tempval);
604 }
605 }
606
607 return 0;
608}
609
538int gfar_set_features(struct net_device *dev, netdev_features_t features) 610int gfar_set_features(struct net_device *dev, netdev_features_t features)
539{ 611{
540 struct gfar_private *priv = netdev_priv(dev); 612 struct gfar_private *priv = netdev_priv(dev);
@@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
1806 .set_coalesce = gfar_scoalesce, 1878 .set_coalesce = gfar_scoalesce,
1807 .get_ringparam = gfar_gringparam, 1879 .get_ringparam = gfar_gringparam,
1808 .set_ringparam = gfar_sringparam, 1880 .set_ringparam = gfar_sringparam,
1881 .get_pauseparam = gfar_gpauseparam,
1882 .set_pauseparam = gfar_spauseparam,
1809 .get_strings = gfar_gstrings, 1883 .get_strings = gfar_gstrings,
1810 .get_sset_count = gfar_sset_count, 1884 .get_sset_count = gfar_sset_count,
1811 .get_ethtool_stats = gfar_fill_stats, 1885 .get_ethtool_stats = gfar_fill_stats,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3c43dac894ec..5930c39672db 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3911,14 +3911,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3911 3911
3912static int ucc_geth_remove(struct platform_device* ofdev) 3912static int ucc_geth_remove(struct platform_device* ofdev)
3913{ 3913{
3914 struct device *device = &ofdev->dev; 3914 struct net_device *dev = platform_get_drvdata(ofdev);
3915 struct net_device *dev = dev_get_drvdata(device);
3916 struct ucc_geth_private *ugeth = netdev_priv(dev); 3915 struct ucc_geth_private *ugeth = netdev_priv(dev);
3917 3916
3918 unregister_netdev(dev); 3917 unregister_netdev(dev);
3919 free_netdev(dev); 3918 free_netdev(dev);
3920 ucc_geth_memclean(ugeth); 3919 ucc_geth_memclean(ugeth);
3921 dev_set_drvdata(device, NULL);
3922 3920
3923 return 0; 3921 return 0;
3924} 3922}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 93346f00486b..79aef681ac85 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -133,8 +133,8 @@ struct rfd_struct
133 unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ 133 unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
134 unsigned short next; /* linkoffset to next RFD */ 134 unsigned short next; /* linkoffset to next RFD */
135 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ 135 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
136 unsigned char dest[6]; /* ethernet-address, destination */ 136 unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */
137 unsigned char source[6]; /* ethernet-address, source */ 137 unsigned char source[ETH_ALEN]; /* ethernet-address, source */
138 unsigned short length; /* 802.3 frame-length */ 138 unsigned short length; /* 802.3 frame-length */
139 unsigned short zero_dummy; /* dummy */ 139 unsigned short zero_dummy; /* dummy */
140}; 140};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index d300a0c0eafc..6b5c7222342c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2312,7 +2312,7 @@ static int emac_check_deps(struct emac_instance *dev,
2312 if (deps[i].ofdev == NULL) 2312 if (deps[i].ofdev == NULL)
2313 continue; 2313 continue;
2314 if (deps[i].drvdata == NULL) 2314 if (deps[i].drvdata == NULL)
2315 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev); 2315 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2316 if (deps[i].drvdata != NULL) 2316 if (deps[i].drvdata != NULL)
2317 there++; 2317 there++;
2318 } 2318 }
@@ -2799,9 +2799,9 @@ static int emac_probe(struct platform_device *ofdev)
2799 /* display more info about what's missing ? */ 2799 /* display more info about what's missing ? */
2800 goto err_reg_unmap; 2800 goto err_reg_unmap;
2801 } 2801 }
2802 dev->mal = dev_get_drvdata(&dev->mal_dev->dev); 2802 dev->mal = platform_get_drvdata(dev->mal_dev);
2803 if (dev->mdio_dev != NULL) 2803 if (dev->mdio_dev != NULL)
2804 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev); 2804 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2805 2805
2806 /* Register with MAL */ 2806 /* Register with MAL */
2807 dev->commac.ops = &emac_commac_ops; 2807 dev->commac.ops = &emac_commac_ops;
@@ -2892,7 +2892,7 @@ static int emac_probe(struct platform_device *ofdev)
2892 * fully initialized 2892 * fully initialized
2893 */ 2893 */
2894 wmb(); 2894 wmb();
2895 dev_set_drvdata(&ofdev->dev, dev); 2895 platform_set_drvdata(ofdev, dev);
2896 2896
2897 /* There's a new kid in town ! Let's tell everybody */ 2897 /* There's a new kid in town ! Let's tell everybody */
2898 wake_up_all(&emac_probe_wait); 2898 wake_up_all(&emac_probe_wait);
@@ -2951,12 +2951,10 @@ static int emac_probe(struct platform_device *ofdev)
2951 2951
2952static int emac_remove(struct platform_device *ofdev) 2952static int emac_remove(struct platform_device *ofdev)
2953{ 2953{
2954 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev); 2954 struct emac_instance *dev = platform_get_drvdata(ofdev);
2955 2955
2956 DBG(dev, "remove" NL); 2956 DBG(dev, "remove" NL);
2957 2957
2958 dev_set_drvdata(&ofdev->dev, NULL);
2959
2960 unregister_netdev(dev->ndev); 2958 unregister_netdev(dev->ndev);
2961 2959
2962 cancel_work_sync(&dev->reset_work); 2960 cancel_work_sync(&dev->reset_work);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 856ea66c9223..dac564c25440 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,8 +637,8 @@ static int mal_probe(struct platform_device *ofdev)
637 bd_size = sizeof(struct mal_descriptor) * 637 bd_size = sizeof(struct mal_descriptor) *
638 (NUM_TX_BUFF * mal->num_tx_chans + 638 (NUM_TX_BUFF * mal->num_tx_chans +
639 NUM_RX_BUFF * mal->num_rx_chans); 639 NUM_RX_BUFF * mal->num_rx_chans);
640 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 640 mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
641 GFP_KERNEL | __GFP_ZERO); 641 GFP_KERNEL);
642 if (mal->bd_virt == NULL) { 642 if (mal->bd_virt == NULL) {
643 err = -ENOMEM; 643 err = -ENOMEM;
644 goto fail_unmap; 644 goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 70fd55968844..5d41aee69d16 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -106,7 +106,7 @@ struct ibmveth_stat ibmveth_stats[] = {
106/* simple methods of getting data from the current rxq entry */ 106/* simple methods of getting data from the current rxq entry */
107static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) 107static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108{ 108{
109 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; 109 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
110} 110}
111 111
112static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 112static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
@@ -132,7 +132,7 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
132 132
133static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 133static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134{ 134{
135 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length; 135 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
136} 136}
137 137
138static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 138static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 43a794fab9ff..84066bafe057 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -164,14 +164,26 @@ struct ibmveth_adapter {
164 u64 tx_send_failed; 164 u64 tx_send_failed;
165}; 165};
166 166
167/*
168 * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
169 * so we don't need to byteswap the two elements. However since we use
170 * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
171 * do end up with endian specific ordering of the elements and that
172 * needs correcting.
173 */
167struct ibmveth_buf_desc_fields { 174struct ibmveth_buf_desc_fields {
175#ifdef __BIG_ENDIAN
176 u32 flags_len;
177 u32 address;
178#else
179 u32 address;
168 u32 flags_len; 180 u32 flags_len;
181#endif
169#define IBMVETH_BUF_VALID 0x80000000 182#define IBMVETH_BUF_VALID 0x80000000
170#define IBMVETH_BUF_TOGGLE 0x40000000 183#define IBMVETH_BUF_TOGGLE 0x40000000
171#define IBMVETH_BUF_NO_CSUM 0x02000000 184#define IBMVETH_BUF_NO_CSUM 0x02000000
172#define IBMVETH_BUF_CSUM_GOOD 0x01000000 185#define IBMVETH_BUF_CSUM_GOOD 0x01000000
173#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF 186#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
174 u32 address;
175}; 187};
176 188
177union ibmveth_buf_desc { 189union ibmveth_buf_desc {
@@ -180,7 +192,7 @@ union ibmveth_buf_desc {
180}; 192};
181 193
182struct ibmveth_rx_q_entry { 194struct ibmveth_rx_q_entry {
183 u32 flags_off; 195 __be32 flags_off;
184#define IBMVETH_RXQ_TOGGLE 0x80000000 196#define IBMVETH_RXQ_TOGGLE 0x80000000
185#define IBMVETH_RXQ_TOGGLE_SHIFT 31 197#define IBMVETH_RXQ_TOGGLE_SHIFT 31
186#define IBMVETH_RXQ_VALID 0x40000000 198#define IBMVETH_RXQ_VALID 0x40000000
@@ -188,7 +200,8 @@ struct ibmveth_rx_q_entry {
188#define IBMVETH_RXQ_CSUM_GOOD 0x01000000 200#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
189#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF 201#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
190 202
191 u32 length; 203 __be32 length;
204 /* correlator is only used by the OS, no need to byte swap */
192 u64 correlator; 205 u64 correlator;
193}; 206};
194 207
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1fde90b96685..bdf5023724e7 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1004 /* Check to see if the NIC has been initialized via nic_open, 1004 /* Check to see if the NIC has been initialized via nic_open,
1005 * before trying to read statistic registers. 1005 * before trying to read statistic registers.
1006 */ 1006 */
1007 if (!test_bit(__LINK_STATE_START, &dev->state)) 1007 if (!netif_running(dev))
1008 return &sp->stats; 1008 return &sp->stats;
1009 1009
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); 1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5115ae76a5d1..ada6e210279f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ 1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1176 } 1176 }
1177 1177
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, 1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1179 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1179 c + 0);
1180 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); 1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, 1181 c + 8);
1182 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1183 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); 1183 c + 16);
1184 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1185 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1186 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1187 return 0; 1184 return 0;
1188} 1185}
1189 1186
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 82a967c95598..73a8aeefb92a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1019,8 +1019,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1019 1019
1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1021 txdr->size = ALIGN(txdr->size, 4096); 1021 txdr->size = ALIGN(txdr->size, 4096);
1022 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1022 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1023 GFP_KERNEL | __GFP_ZERO); 1023 GFP_KERNEL);
1024 if (!txdr->desc) { 1024 if (!txdr->desc) {
1025 ret_val = 2; 1025 ret_val = 2;
1026 goto err_nomem; 1026 goto err_nomem;
@@ -1077,8 +1077,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1077 } 1077 }
1078 1078
1079 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1079 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1080 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1080 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1081 GFP_KERNEL | __GFP_ZERO); 1081 GFP_KERNEL);
1082 if (!rxdr->desc) { 1082 if (!rxdr->desc) {
1083 ret_val = 6; 1083 ret_val = 6;
1084 goto err_nomem; 1084 goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 4c303e2a7cb3..8fed74e3fa53 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1011,6 +1011,11 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1011 1011
1012 /* Must release MDIO ownership and mutex after MAC reset. */ 1012 /* Must release MDIO ownership and mutex after MAC reset. */
1013 switch (hw->mac.type) { 1013 switch (hw->mac.type) {
1014 case e1000_82573:
1015 /* Release mutex only if the hw semaphore is acquired */
1016 if (!ret_val)
1017 e1000_put_hw_semaphore_82573(hw);
1018 break;
1014 case e1000_82574: 1019 case e1000_82574:
1015 case e1000_82583: 1020 case e1000_82583:
1016 /* Release mutex only if the hw semaphore is acquired */ 1021 /* Release mutex only if the hw semaphore is acquired */
@@ -2057,6 +2062,7 @@ const struct e1000_info e1000_82583_info = {
2057 | FLAG_HAS_JUMBO_FRAMES 2062 | FLAG_HAS_JUMBO_FRAMES
2058 | FLAG_HAS_CTRLEXT_ON_LOAD, 2063 | FLAG_HAS_CTRLEXT_ON_LOAD,
2059 .flags2 = FLAG2_DISABLE_ASPM_L0S 2064 .flags2 = FLAG2_DISABLE_ASPM_L0S
2065 | FLAG2_DISABLE_ASPM_L1
2060 | FLAG2_NO_DISABLE_RX, 2066 | FLAG2_NO_DISABLE_RX,
2061 .pba = 32, 2067 .pba = 32,
2062 .max_hw_frame_size = DEFAULT_JUMBO, 2068 .max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ffbc08f56c40..ad0edd11015d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -90,9 +90,6 @@ struct e1000_info;
90 90
91#define E1000_MNG_VLAN_NONE (-1) 91#define E1000_MNG_VLAN_NONE (-1)
92 92
93/* Number of packet split data buffers (not including the header buffer) */
94#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
95
96#define DEFAULT_JUMBO 9234 93#define DEFAULT_JUMBO 9234
97 94
98/* Time to wait before putting the device into D3 if there's no link (in ms). */ 95/* Time to wait before putting the device into D3 if there's no link (in ms). */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59c22bf18701..a8633b8f0ac5 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev,
173 speed = adapter->link_speed; 173 speed = adapter->link_speed;
174 ecmd->duplex = adapter->link_duplex - 1; 174 ecmd->duplex = adapter->link_duplex - 1;
175 } 175 }
176 } else { 176 } else if (!pm_runtime_suspended(netdev->dev.parent)) {
177 u32 status = er32(STATUS); 177 u32 status = er32(STATUS);
178 if (status & E1000_STATUS_LU) { 178 if (status & E1000_STATUS_LU) {
179 if (status & E1000_STATUS_SPEED_1000) 179 if (status & E1000_STATUS_SPEED_1000)
@@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev,
264{ 264{
265 struct e1000_adapter *adapter = netdev_priv(netdev); 265 struct e1000_adapter *adapter = netdev_priv(netdev);
266 struct e1000_hw *hw = &adapter->hw; 266 struct e1000_hw *hw = &adapter->hw;
267 int ret_val = 0;
268
269 pm_runtime_get_sync(netdev->dev.parent);
267 270
268 /* When SoL/IDER sessions are active, autoneg/speed/duplex 271 /* When SoL/IDER sessions are active, autoneg/speed/duplex
269 * cannot be changed 272 * cannot be changed
@@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev,
271 if (hw->phy.ops.check_reset_block && 274 if (hw->phy.ops.check_reset_block &&
272 hw->phy.ops.check_reset_block(hw)) { 275 hw->phy.ops.check_reset_block(hw)) {
273 e_err("Cannot change link characteristics when SoL/IDER is active.\n"); 276 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
274 return -EINVAL; 277 ret_val = -EINVAL;
278 goto out;
275 } 279 }
276 280
277 /* MDI setting is only allowed when autoneg enabled because 281 /* MDI setting is only allowed when autoneg enabled because
@@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev,
279 * duplex is forced. 283 * duplex is forced.
280 */ 284 */
281 if (ecmd->eth_tp_mdix_ctrl) { 285 if (ecmd->eth_tp_mdix_ctrl) {
282 if (hw->phy.media_type != e1000_media_type_copper) 286 if (hw->phy.media_type != e1000_media_type_copper) {
283 return -EOPNOTSUPP; 287 ret_val = -EOPNOTSUPP;
288 goto out;
289 }
284 290
285 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && 291 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
286 (ecmd->autoneg != AUTONEG_ENABLE)) { 292 (ecmd->autoneg != AUTONEG_ENABLE)) {
287 e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); 293 e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
288 return -EINVAL; 294 ret_val = -EINVAL;
295 goto out;
289 } 296 }
290 } 297 }
291 298
@@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev,
307 u32 speed = ethtool_cmd_speed(ecmd); 314 u32 speed = ethtool_cmd_speed(ecmd);
308 /* calling this overrides forced MDI setting */ 315 /* calling this overrides forced MDI setting */
309 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { 316 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
310 clear_bit(__E1000_RESETTING, &adapter->state); 317 ret_val = -EINVAL;
311 return -EINVAL; 318 goto out;
312 } 319 }
313 } 320 }
314 321
@@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev,
331 e1000e_reset(adapter); 338 e1000e_reset(adapter);
332 } 339 }
333 340
341out:
342 pm_runtime_put_sync(netdev->dev.parent);
334 clear_bit(__E1000_RESETTING, &adapter->state); 343 clear_bit(__E1000_RESETTING, &adapter->state);
335 return 0; 344 return ret_val;
336} 345}
337 346
338static void e1000_get_pauseparam(struct net_device *netdev, 347static void e1000_get_pauseparam(struct net_device *netdev,
@@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
366 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 375 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
367 usleep_range(1000, 2000); 376 usleep_range(1000, 2000);
368 377
378 pm_runtime_get_sync(netdev->dev.parent);
379
369 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 380 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
370 hw->fc.requested_mode = e1000_fc_default; 381 hw->fc.requested_mode = e1000_fc_default;
371 if (netif_running(adapter->netdev)) { 382 if (netif_running(adapter->netdev)) {
@@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
398 } 409 }
399 410
400out: 411out:
412 pm_runtime_put_sync(netdev->dev.parent);
401 clear_bit(__E1000_RESETTING, &adapter->state); 413 clear_bit(__E1000_RESETTING, &adapter->state);
402 return retval; 414 return retval;
403} 415}
@@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev,
428 u32 *regs_buff = p; 440 u32 *regs_buff = p;
429 u16 phy_data; 441 u16 phy_data;
430 442
443 pm_runtime_get_sync(netdev->dev.parent);
444
431 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 445 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
432 446
433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 447 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
@@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev,
472 e1e_rphy(hw, MII_STAT1000, &phy_data); 486 e1e_rphy(hw, MII_STAT1000, &phy_data);
473 regs_buff[24] = (u32)phy_data; /* phy local receiver status */ 487 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
474 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 488 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
489
490 pm_runtime_put_sync(netdev->dev.parent);
475} 491}
476 492
477static int e1000_get_eeprom_len(struct net_device *netdev) 493static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
504 if (!eeprom_buff) 520 if (!eeprom_buff)
505 return -ENOMEM; 521 return -ENOMEM;
506 522
523 pm_runtime_get_sync(netdev->dev.parent);
524
507 if (hw->nvm.type == e1000_nvm_eeprom_spi) { 525 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
508 ret_val = e1000_read_nvm(hw, first_word, 526 ret_val = e1000_read_nvm(hw, first_word,
509 last_word - first_word + 1, 527 last_word - first_word + 1,
@@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
517 } 535 }
518 } 536 }
519 537
538 pm_runtime_put_sync(netdev->dev.parent);
539
520 if (ret_val) { 540 if (ret_val) {
521 /* a read error occurred, throw away the result */ 541 /* a read error occurred, throw away the result */
522 memset(eeprom_buff, 0xff, sizeof(u16) * 542 memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
566 586
567 ptr = (void *)eeprom_buff; 587 ptr = (void *)eeprom_buff;
568 588
589 pm_runtime_get_sync(netdev->dev.parent);
590
569 if (eeprom->offset & 1) { 591 if (eeprom->offset & 1) {
570 /* need read/modify/write of first changed EEPROM word */ 592 /* need read/modify/write of first changed EEPROM word */
571 /* only the second byte of the word is being modified */ 593 /* only the second byte of the word is being modified */
@@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
606 ret_val = e1000e_update_nvm_checksum(hw); 628 ret_val = e1000e_update_nvm_checksum(hw);
607 629
608out: 630out:
631 pm_runtime_put_sync(netdev->dev.parent);
609 kfree(eeprom_buff); 632 kfree(eeprom_buff);
610 return ret_val; 633 return ret_val;
611} 634}
@@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
701 } 724 }
702 } 725 }
703 726
727 pm_runtime_get_sync(netdev->dev.parent);
728
704 e1000e_down(adapter); 729 e1000e_down(adapter);
705 730
706 /* We can't just free everything and then setup again, because the 731 /* We can't just free everything and then setup again, because the
@@ -739,6 +764,7 @@ err_setup_rx:
739 e1000e_free_tx_resources(temp_tx); 764 e1000e_free_tx_resources(temp_tx);
740err_setup: 765err_setup:
741 e1000e_up(adapter); 766 e1000e_up(adapter);
767 pm_runtime_put_sync(netdev->dev.parent);
742free_temp: 768free_temp:
743 vfree(temp_tx); 769 vfree(temp_tx);
744 vfree(temp_rx); 770 vfree(temp_rx);
@@ -1639,7 +1665,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1639 ret_val = 13; /* ret_val is the same as mis-compare */ 1665 ret_val = 13; /* ret_val is the same as mis-compare */
1640 break; 1666 break;
1641 } 1667 }
1642 if (jiffies >= (time + 20)) { 1668 if (time_after(jiffies, time + 20)) {
1643 ret_val = 14; /* error code for time out error */ 1669 ret_val = 14; /* error code for time out error */
1644 break; 1670 break;
1645 } 1671 }
@@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev,
1732 u8 autoneg; 1758 u8 autoneg;
1733 bool if_running = netif_running(netdev); 1759 bool if_running = netif_running(netdev);
1734 1760
1761 pm_runtime_get_sync(netdev->dev.parent);
1762
1735 set_bit(__E1000_TESTING, &adapter->state); 1763 set_bit(__E1000_TESTING, &adapter->state);
1736 1764
1737 if (!if_running) { 1765 if (!if_running) {
@@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev,
1817 } 1845 }
1818 1846
1819 msleep_interruptible(4 * 1000); 1847 msleep_interruptible(4 * 1000);
1848
1849 pm_runtime_put_sync(netdev->dev.parent);
1820} 1850}
1821 1851
1822static void e1000_get_wol(struct net_device *netdev, 1852static void e1000_get_wol(struct net_device *netdev,
@@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev,
1891 1921
1892 switch (state) { 1922 switch (state) {
1893 case ETHTOOL_ID_ACTIVE: 1923 case ETHTOOL_ID_ACTIVE:
1924 pm_runtime_get_sync(netdev->dev.parent);
1925
1894 if (!hw->mac.ops.blink_led) 1926 if (!hw->mac.ops.blink_led)
1895 return 2; /* cycle on/off twice per second */ 1927 return 2; /* cycle on/off twice per second */
1896 1928
@@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
1902 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 1934 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1903 hw->mac.ops.led_off(hw); 1935 hw->mac.ops.led_off(hw);
1904 hw->mac.ops.cleanup_led(hw); 1936 hw->mac.ops.cleanup_led(hw);
1937 pm_runtime_put_sync(netdev->dev.parent);
1905 break; 1938 break;
1906 1939
1907 case ETHTOOL_ID_ON: 1940 case ETHTOOL_ID_ON:
@@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
1912 hw->mac.ops.led_off(hw); 1945 hw->mac.ops.led_off(hw);
1913 break; 1946 break;
1914 } 1947 }
1948
1915 return 0; 1949 return 0;
1916} 1950}
1917 1951
@@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
1950 adapter->itr_setting = adapter->itr & ~3; 1984 adapter->itr_setting = adapter->itr & ~3;
1951 } 1985 }
1952 1986
1987 pm_runtime_get_sync(netdev->dev.parent);
1988
1953 if (adapter->itr_setting != 0) 1989 if (adapter->itr_setting != 0)
1954 e1000e_write_itr(adapter, adapter->itr); 1990 e1000e_write_itr(adapter, adapter->itr);
1955 else 1991 else
1956 e1000e_write_itr(adapter, 0); 1992 e1000e_write_itr(adapter, 0);
1957 1993
1994 pm_runtime_put_sync(netdev->dev.parent);
1995
1958 return 0; 1996 return 0;
1959} 1997}
1960 1998
@@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev)
1968 if (!adapter->hw.mac.autoneg) 2006 if (!adapter->hw.mac.autoneg)
1969 return -EINVAL; 2007 return -EINVAL;
1970 2008
2009 pm_runtime_get_sync(netdev->dev.parent);
1971 e1000e_reinit_locked(adapter); 2010 e1000e_reinit_locked(adapter);
2011 pm_runtime_put_sync(netdev->dev.parent);
1972 2012
1973 return 0; 2013 return 0;
1974} 2014}
@@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1982 int i; 2022 int i;
1983 char *p = NULL; 2023 char *p = NULL;
1984 2024
2025 pm_runtime_get_sync(netdev->dev.parent);
2026
1985 e1000e_get_stats64(netdev, &net_stats); 2027 e1000e_get_stats64(netdev, &net_stats);
2028
2029 pm_runtime_put_sync(netdev->dev.parent);
2030
1986 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 2031 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1987 switch (e1000_gstrings_stats[i].type) { 2032 switch (e1000_gstrings_stats[i].type) {
1988 case NETDEV_STATS: 2033 case NETDEV_STATS:
@@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev,
2033 case ETHTOOL_GRXFH: { 2078 case ETHTOOL_GRXFH: {
2034 struct e1000_adapter *adapter = netdev_priv(netdev); 2079 struct e1000_adapter *adapter = netdev_priv(netdev);
2035 struct e1000_hw *hw = &adapter->hw; 2080 struct e1000_hw *hw = &adapter->hw;
2036 u32 mrqc = er32(MRQC); 2081 u32 mrqc;
2082
2083 pm_runtime_get_sync(netdev->dev.parent);
2084 mrqc = er32(MRQC);
2085 pm_runtime_put_sync(netdev->dev.parent);
2037 2086
2038 if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) 2087 if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
2039 return 0; 2088 return 0;
@@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2096 return -EOPNOTSUPP; 2145 return -EOPNOTSUPP;
2097 } 2146 }
2098 2147
2148 pm_runtime_get_sync(netdev->dev.parent);
2149
2099 ret_val = hw->phy.ops.acquire(hw); 2150 ret_val = hw->phy.ops.acquire(hw);
2100 if (ret_val) 2151 if (ret_val) {
2152 pm_runtime_put_sync(netdev->dev.parent);
2101 return -EBUSY; 2153 return -EBUSY;
2154 }
2102 2155
2103 /* EEE Capability */ 2156 /* EEE Capability */
2104 ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); 2157 ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2117 2170
2118 /* EEE PCS Status */ 2171 /* EEE PCS Status */
2119 ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); 2172 ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
2173 if (ret_val)
2174 goto release;
2120 if (hw->phy.type == e1000_phy_82579) 2175 if (hw->phy.type == e1000_phy_82579)
2121 phy_data <<= 8; 2176 phy_data <<= 8;
2122 2177
2123release:
2124 hw->phy.ops.release(hw);
2125 if (ret_val)
2126 return -ENODATA;
2127
2128 /* Result of the EEE auto negotiation - there is no register that 2178 /* Result of the EEE auto negotiation - there is no register that
2129 * has the status of the EEE negotiation so do a best-guess based 2179 * has the status of the EEE negotiation so do a best-guess based
2130 * on whether Tx or Rx LPI indications have been received. 2180 * on whether Tx or Rx LPI indications have been received.
@@ -2136,7 +2186,14 @@ release:
2136 edata->tx_lpi_enabled = true; 2186 edata->tx_lpi_enabled = true;
2137 edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; 2187 edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
2138 2188
2139 return 0; 2189release:
2190 hw->phy.ops.release(hw);
2191 if (ret_val)
2192 ret_val = -ENODATA;
2193
2194 pm_runtime_put_sync(netdev->dev.parent);
2195
2196 return ret_val;
2140} 2197}
2141 2198
2142static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) 2199static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
@@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2169 2226
2170 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; 2227 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
2171 2228
2229 pm_runtime_get_sync(netdev->dev.parent);
2230
2172 /* reset the link */ 2231 /* reset the link */
2173 if (netif_running(netdev)) 2232 if (netif_running(netdev))
2174 e1000e_reinit_locked(adapter); 2233 e1000e_reinit_locked(adapter);
2175 else 2234 else
2176 e1000e_reset(adapter); 2235 e1000e_reset(adapter);
2177 2236
2237 pm_runtime_put_sync(netdev->dev.parent);
2238
2178 return 0; 2239 return 0;
2179} 2240}
2180 2241
@@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
2212 return 0; 2273 return 0;
2213} 2274}
2214 2275
2215static int e1000e_ethtool_begin(struct net_device *netdev)
2216{
2217 return pm_runtime_get_sync(netdev->dev.parent);
2218}
2219
2220static void e1000e_ethtool_complete(struct net_device *netdev)
2221{
2222 pm_runtime_put_sync(netdev->dev.parent);
2223}
2224
2225static const struct ethtool_ops e1000_ethtool_ops = { 2276static const struct ethtool_ops e1000_ethtool_ops = {
2226 .begin = e1000e_ethtool_begin,
2227 .complete = e1000e_ethtool_complete,
2228 .get_settings = e1000_get_settings, 2277 .get_settings = e1000_get_settings,
2229 .set_settings = e1000_set_settings, 2278 .set_settings = e1000_set_settings,
2230 .get_drvinfo = e1000_get_drvinfo, 2279 .get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index a6f903a9b773..b7f38435d1fd 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -90,6 +90,10 @@ struct e1000_hw;
90#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B 90#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
91#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A 91#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
92#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 92#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
93#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
94#define E1000_DEV_ID_PCH_I218_V2 0x15A1
95#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
96#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
93 97
94#define E1000_REVISION_4 4 98#define E1000_REVISION_4 4
95 99
@@ -227,6 +231,10 @@ union e1000_rx_desc_extended {
227}; 231};
228 232
229#define MAX_PS_BUFFERS 4 233#define MAX_PS_BUFFERS 4
234
235/* Number of packet split data buffers (not including the header buffer) */
236#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
237
230/* Receive Descriptor - Packet Split */ 238/* Receive Descriptor - Packet Split */
231union e1000_rx_desc_packet_split { 239union e1000_rx_desc_packet_split {
232 struct { 240 struct {
@@ -251,7 +259,8 @@ union e1000_rx_desc_packet_split {
251 } middle; 259 } middle;
252 struct { 260 struct {
253 __le16 header_status; 261 __le16 header_status;
254 __le16 length[3]; /* length of buffers 1-3 */ 262 /* length of buffers 1-3 */
263 __le16 length[PS_PAGE_BUFFERS];
255 } upper; 264 } upper;
256 __le64 reserved; 265 __le64 reserved;
257 } wb; /* writeback */ 266 } wb; /* writeback */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9dde390f7e71..af08188d7e62 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
185 u32 phy_id = 0; 185 u32 phy_id = 0;
186 s32 ret_val; 186 s32 ret_val;
187 u16 retry_count; 187 u16 retry_count;
188 u32 mac_reg = 0;
188 189
189 for (retry_count = 0; retry_count < 2; retry_count++) { 190 for (retry_count = 0; retry_count < 2; retry_count++) {
190 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); 191 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
@@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
203 204
204 if (hw->phy.id) { 205 if (hw->phy.id) {
205 if (hw->phy.id == phy_id) 206 if (hw->phy.id == phy_id)
206 return true; 207 goto out;
207 } else if (phy_id) { 208 } else if (phy_id) {
208 hw->phy.id = phy_id; 209 hw->phy.id = phy_id;
209 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); 210 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
210 return true; 211 goto out;
211 } 212 }
212 213
213 /* In case the PHY needs to be in mdio slow mode, 214 /* In case the PHY needs to be in mdio slow mode,
@@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
219 ret_val = e1000e_get_phy_id(hw); 220 ret_val = e1000e_get_phy_id(hw);
220 hw->phy.ops.acquire(hw); 221 hw->phy.ops.acquire(hw);
221 222
222 return !ret_val; 223 if (ret_val)
224 return false;
225out:
226 if (hw->mac.type == e1000_pch_lpt) {
227 /* Unforce SMBus mode in PHY */
228 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
229 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
230 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
231
232 /* Unforce SMBus mode in MAC */
233 mac_reg = er32(CTRL_EXT);
234 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
235 ew32(CTRL_EXT, mac_reg);
236 }
237
238 return true;
223} 239}
224 240
225/** 241/**
@@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
233{ 249{
234 u32 mac_reg, fwsm = er32(FWSM); 250 u32 mac_reg, fwsm = er32(FWSM);
235 s32 ret_val; 251 s32 ret_val;
236 u16 phy_reg;
237 252
238 /* Gate automatic PHY configuration by hardware on managed and 253 /* Gate automatic PHY configuration by hardware on managed and
239 * non-managed 82579 and newer adapters. 254 * non-managed 82579 and newer adapters.
@@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
262 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 277 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
263 ew32(CTRL_EXT, mac_reg); 278 ew32(CTRL_EXT, mac_reg);
264 279
280 /* Wait 50 milliseconds for MAC to finish any retries
281 * that it might be trying to perform from previous
282 * attempts to acknowledge any phy read requests.
283 */
284 msleep(50);
285
265 /* fall-through */ 286 /* fall-through */
266 case e1000_pch2lan: 287 case e1000_pch2lan:
267 if (e1000_phy_is_accessible_pchlan(hw)) { 288 if (e1000_phy_is_accessible_pchlan(hw))
268 if (hw->mac.type == e1000_pch_lpt) {
269 /* Unforce SMBus mode in PHY */
270 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
271 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
272 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
273
274 /* Unforce SMBus mode in MAC */
275 mac_reg = er32(CTRL_EXT);
276 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
277 ew32(CTRL_EXT, mac_reg);
278 }
279 break; 289 break;
280 }
281 290
282 /* fall-through */ 291 /* fall-through */
283 case e1000_pchlan: 292 case e1000_pchlan:
@@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
287 296
288 if (hw->phy.ops.check_reset_block(hw)) { 297 if (hw->phy.ops.check_reset_block(hw)) {
289 e_dbg("Required LANPHYPC toggle blocked by ME\n"); 298 e_dbg("Required LANPHYPC toggle blocked by ME\n");
299 ret_val = -E1000_ERR_PHY;
290 break; 300 break;
291 } 301 }
292 302
@@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
298 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 308 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
299 ew32(FEXTNVM3, mac_reg); 309 ew32(FEXTNVM3, mac_reg);
300 310
301 if (hw->mac.type == e1000_pch_lpt) {
302 /* Toggling LANPHYPC brings the PHY out of SMBus mode
303 * So ensure that the MAC is also out of SMBus mode
304 */
305 mac_reg = er32(CTRL_EXT);
306 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
307 ew32(CTRL_EXT, mac_reg);
308 }
309
310 /* Toggle LANPHYPC Value bit */ 311 /* Toggle LANPHYPC Value bit */
311 mac_reg = er32(CTRL); 312 mac_reg = er32(CTRL);
312 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 313 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
325 usleep_range(5000, 10000); 326 usleep_range(5000, 10000);
326 } while (!(er32(CTRL_EXT) & 327 } while (!(er32(CTRL_EXT) &
327 E1000_CTRL_EXT_LPCD) && count--); 328 E1000_CTRL_EXT_LPCD) && count--);
329 usleep_range(30000, 60000);
330 if (e1000_phy_is_accessible_pchlan(hw))
331 break;
332
333 /* Toggling LANPHYPC brings the PHY out of SMBus mode
334 * so ensure that the MAC is also out of SMBus mode
335 */
336 mac_reg = er32(CTRL_EXT);
337 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
338 ew32(CTRL_EXT, mac_reg);
339
340 if (e1000_phy_is_accessible_pchlan(hw))
341 break;
342
343 ret_val = -E1000_ERR_PHY;
328 } 344 }
329 break; 345 break;
330 default: 346 default:
@@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
332 } 348 }
333 349
334 hw->phy.ops.release(hw); 350 hw->phy.ops.release(hw);
335 351 if (!ret_val) {
336 /* Reset the PHY before any access to it. Doing so, ensures 352 /* Reset the PHY before any access to it. Doing so, ensures
337 * that the PHY is in a known good state before we read/write 353 * that the PHY is in a known good state before we read/write
338 * PHY registers. The generic reset is sufficient here, 354 * PHY registers. The generic reset is sufficient here,
339 * because we haven't determined the PHY type yet. 355 * because we haven't determined the PHY type yet.
340 */ 356 */
341 ret_val = e1000e_phy_hw_reset_generic(hw); 357 ret_val = e1000e_phy_hw_reset_generic(hw);
358 }
342 359
343out: 360out:
344 /* Ungate automatic PHY configuration on non-managed 82579 */ 361 /* Ungate automatic PHY configuration on non-managed 82579 */
@@ -793,29 +810,31 @@ release:
793 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 810 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
794 * preventing further DMA write requests. Workaround the issue by disabling 811 * preventing further DMA write requests. Workaround the issue by disabling
795 * the de-assertion of the clock request when in 1Gpbs mode. 812 * the de-assertion of the clock request when in 1Gpbs mode.
813 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
814 * speeds in order to avoid Tx hangs.
796 **/ 815 **/
797static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) 816static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
798{ 817{
799 u32 fextnvm6 = er32(FEXTNVM6); 818 u32 fextnvm6 = er32(FEXTNVM6);
819 u32 status = er32(STATUS);
800 s32 ret_val = 0; 820 s32 ret_val = 0;
821 u16 reg;
801 822
802 if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { 823 if (link && (status & E1000_STATUS_SPEED_1000)) {
803 u16 kmrn_reg;
804
805 ret_val = hw->phy.ops.acquire(hw); 824 ret_val = hw->phy.ops.acquire(hw);
806 if (ret_val) 825 if (ret_val)
807 return ret_val; 826 return ret_val;
808 827
809 ret_val = 828 ret_val =
810 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 829 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
811 &kmrn_reg); 830 &reg);
812 if (ret_val) 831 if (ret_val)
813 goto release; 832 goto release;
814 833
815 ret_val = 834 ret_val =
816 e1000e_write_kmrn_reg_locked(hw, 835 e1000e_write_kmrn_reg_locked(hw,
817 E1000_KMRNCTRLSTA_K1_CONFIG, 836 E1000_KMRNCTRLSTA_K1_CONFIG,
818 kmrn_reg & 837 reg &
819 ~E1000_KMRNCTRLSTA_K1_ENABLE); 838 ~E1000_KMRNCTRLSTA_K1_ENABLE);
820 if (ret_val) 839 if (ret_val)
821 goto release; 840 goto release;
@@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
827 ret_val = 846 ret_val =
828 e1000e_write_kmrn_reg_locked(hw, 847 e1000e_write_kmrn_reg_locked(hw,
829 E1000_KMRNCTRLSTA_K1_CONFIG, 848 E1000_KMRNCTRLSTA_K1_CONFIG,
830 kmrn_reg); 849 reg);
831release: 850release:
832 hw->phy.ops.release(hw); 851 hw->phy.ops.release(hw);
833 } else { 852 } else {
834 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 853 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
835 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 854 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
855
856 if (!link || ((status & E1000_STATUS_SPEED_100) &&
857 (status & E1000_STATUS_FD)))
858 goto update_fextnvm6;
859
860 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
861 if (ret_val)
862 return ret_val;
863
864 /* Clear link status transmit timeout */
865 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
866
867 if (status & E1000_STATUS_SPEED_100) {
868 /* Set inband Tx timeout to 5x10us for 100Half */
869 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
870
871 /* Do not extend the K1 entry latency for 100Half */
872 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
873 } else {
874 /* Set inband Tx timeout to 50x10us for 10Full/Half */
875 reg |= 50 <<
876 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
877
878 /* Extend the K1 entry latency for 10 Mbps */
879 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
880 }
881
882 ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
883 if (ret_val)
884 return ret_val;
885
886update_fextnvm6:
887 ew32(FEXTNVM6, fextnvm6);
836 } 888 }
837 889
838 return ret_val; 890 return ret_val;
@@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
993 1045
994 /* Work-around I218 hang issue */ 1046 /* Work-around I218 hang issue */
995 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1047 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
996 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { 1048 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1049 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1050 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
997 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1051 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
998 if (ret_val) 1052 if (ret_val)
999 return ret_val; 1053 return ret_val;
@@ -4168,7 +4222,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4168 u16 phy_reg, device_id = hw->adapter->pdev->device; 4222 u16 phy_reg, device_id = hw->adapter->pdev->device;
4169 4223
4170 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 4224 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4171 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { 4225 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4226 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4227 (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4172 u32 fextnvm6 = er32(FEXTNVM6); 4228 u32 fextnvm6 = er32(FEXTNVM6);
4173 4229
4174 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 4230 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 80034a2b297c..59865695b282 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -93,6 +93,7 @@
93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
94 94
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
96 97
97#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
98 99
@@ -197,6 +198,11 @@
197 198
198#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ 199#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
199 200
201/* Inband Control */
202#define I217_INBAND_CTRL PHY_REG(770, 18)
203#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
204#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
205
200/* PHY Low Power Idle Control */ 206/* PHY Low Power Idle Control */
201#define I82579_LPI_CTRL PHY_REG(772, 20) 207#define I82579_LPI_CTRL PHY_REG(772, 20)
202#define I82579_LPI_CTRL_100_ENABLE 0x2000 208#define I82579_LPI_CTRL_100_ENABLE 0x2000
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 77f81cbb601a..e87e9b01f404 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -64,8 +64,6 @@ static int debug = -1;
64module_param(debug, int, 0); 64module_param(debug, int, 0);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66 66
67static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
68
69static const struct e1000_info *e1000_info_tbl[] = { 67static const struct e1000_info *e1000_info_tbl[] = {
70 [board_82571] = &e1000_82571_info, 68 [board_82571] = &e1000_82571_info,
71 [board_82572] = &e1000_82572_info, 69 [board_82572] = &e1000_82572_info,
@@ -2979,17 +2977,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2979 u32 pages = 0; 2977 u32 pages = 0;
2980 2978
2981 /* Workaround Si errata on PCHx - configure jumbo frame flow */ 2979 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2982 if (hw->mac.type >= e1000_pch2lan) { 2980 if ((hw->mac.type >= e1000_pch2lan) &&
2983 s32 ret_val; 2981 (adapter->netdev->mtu > ETH_DATA_LEN) &&
2984 2982 e1000_lv_jumbo_workaround_ich8lan(hw, true))
2985 if (adapter->netdev->mtu > ETH_DATA_LEN) 2983 e_dbg("failed to enable jumbo frame workaround mode\n");
2986 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2987 else
2988 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2989
2990 if (ret_val)
2991 e_dbg("failed to enable jumbo frame workaround mode\n");
2992 }
2993 2984
2994 /* Program MC offset vector base */ 2985 /* Program MC offset vector base */
2995 rctl = er32(RCTL); 2986 rctl = er32(RCTL);
@@ -3826,6 +3817,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
3826 break; 3817 break;
3827 } 3818 }
3828 3819
3820 pba = 14;
3821 ew32(PBA, pba);
3829 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; 3822 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3830 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; 3823 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
3831 break; 3824 break;
@@ -4034,6 +4027,12 @@ void e1000e_down(struct e1000_adapter *adapter)
4034 adapter->link_speed = 0; 4027 adapter->link_speed = 0;
4035 adapter->link_duplex = 0; 4028 adapter->link_duplex = 0;
4036 4029
4030 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4031 if ((hw->mac.type >= e1000_pch2lan) &&
4032 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4033 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4034 e_dbg("failed to disable jumbo frame workaround mode\n");
4035
4037 if (!pci_channel_offline(adapter->pdev)) 4036 if (!pci_channel_offline(adapter->pdev))
4038 e1000e_reset(adapter); 4037 e1000e_reset(adapter);
4039 4038
@@ -4683,11 +4682,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4683 struct e1000_hw *hw = &adapter->hw; 4682 struct e1000_hw *hw = &adapter->hw;
4684 struct e1000_phy_regs *phy = &adapter->phy_regs; 4683 struct e1000_phy_regs *phy = &adapter->phy_regs;
4685 4684
4686 if ((er32(STATUS) & E1000_STATUS_LU) && 4685 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4686 (er32(STATUS) & E1000_STATUS_LU) &&
4687 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4687 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4688 int ret_val; 4688 int ret_val;
4689 4689
4690 pm_runtime_get_sync(&adapter->pdev->dev);
4691 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 4690 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4692 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 4691 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4693 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 4692 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4698,7 +4697,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4698 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 4697 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4699 if (ret_val) 4698 if (ret_val)
4700 e_warn("Error reading PHY register\n"); 4699 e_warn("Error reading PHY register\n");
4701 pm_runtime_put_sync(&adapter->pdev->dev);
4702 } else { 4700 } else {
4703 /* Do not read PHY registers if link is not up 4701 /* Do not read PHY registers if link is not up
4704 * Set values to typical power-on defaults 4702 * Set values to typical power-on defaults
@@ -5995,15 +5993,24 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5995 */ 5993 */
5996 e1000e_release_hw_control(adapter); 5994 e1000e_release_hw_control(adapter);
5997 5995
5996 pci_clear_master(pdev);
5997
5998 /* The pci-e switch on some quad port adapters will report a 5998 /* The pci-e switch on some quad port adapters will report a
5999 * correctable error when the MAC transitions from D0 to D3. To 5999 * correctable error when the MAC transitions from D0 to D3. To
6000 * prevent this we need to mask off the correctable errors on the 6000 * prevent this we need to mask off the correctable errors on the
6001 * downstream port of the pci-e switch. 6001 * downstream port of the pci-e switch.
6002 *
6003 * We don't have the associated upstream bridge while assigning
6004 * the PCI device into guest. For example, the KVM on power is
6005 * one of the cases.
6002 */ 6006 */
6003 if (adapter->flags & FLAG_IS_QUAD_PORT) { 6007 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6004 struct pci_dev *us_dev = pdev->bus->self; 6008 struct pci_dev *us_dev = pdev->bus->self;
6005 u16 devctl; 6009 u16 devctl;
6006 6010
6011 if (!us_dev)
6012 return 0;
6013
6007 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); 6014 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6008 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 6015 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6009 (devctl & ~PCI_EXP_DEVCTL_CERE)); 6016 (devctl & ~PCI_EXP_DEVCTL_CERE));
@@ -6017,38 +6024,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6017 return 0; 6024 return 0;
6018} 6025}
6019 6026
6020#ifdef CONFIG_PCIEASPM 6027/**
6021static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 6028 * e1000e_disable_aspm - Disable ASPM states
6029 * @pdev: pointer to PCI device struct
6030 * @state: bit-mask of ASPM states to disable
6031 *
6032 * Some devices *must* have certain ASPM states disabled per hardware errata.
6033 **/
6034static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6022{ 6035{
6036 struct pci_dev *parent = pdev->bus->self;
6037 u16 aspm_dis_mask = 0;
6038 u16 pdev_aspmc, parent_aspmc;
6039
6040 switch (state) {
6041 case PCIE_LINK_STATE_L0S:
6042 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6043 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6044 /* fall-through - can't have L1 without L0s */
6045 case PCIE_LINK_STATE_L1:
6046 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6047 break;
6048 default:
6049 return;
6050 }
6051
6052 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6053 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6054
6055 if (parent) {
6056 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6057 &parent_aspmc);
6058 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6059 }
6060
6061 /* Nothing to do if the ASPM states to be disabled already are */
6062 if (!(pdev_aspmc & aspm_dis_mask) &&
6063 (!parent || !(parent_aspmc & aspm_dis_mask)))
6064 return;
6065
6066 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6067 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6068 "L0s" : "",
6069 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6070 "L1" : "");
6071
6072#ifdef CONFIG_PCIEASPM
6023 pci_disable_link_state_locked(pdev, state); 6073 pci_disable_link_state_locked(pdev, state);
6024}
6025#else
6026static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6027{
6028 u16 aspm_ctl = 0;
6029 6074
6030 if (state & PCIE_LINK_STATE_L0S) 6075 /* Double-check ASPM control. If not disabled by the above, the
6031 aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S; 6076 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6032 if (state & PCIE_LINK_STATE_L1) 6077 * not enabled); override by writing PCI config space directly.
6033 aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1; 6078 */
6079 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6080 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6081
6082 if (!(aspm_dis_mask & pdev_aspmc))
6083 return;
6084#endif
6034 6085
6035 /* Both device and parent should have the same ASPM setting. 6086 /* Both device and parent should have the same ASPM setting.
6036 * Disable ASPM in downstream component first and then upstream. 6087 * Disable ASPM in downstream component first and then upstream.
6037 */ 6088 */
6038 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl); 6089 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6039 6090
6040 if (pdev->bus->self) 6091 if (parent)
6041 pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, 6092 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6042 aspm_ctl); 6093 aspm_dis_mask);
6043}
6044#endif
6045static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6046{
6047 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6048 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
6049 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
6050
6051 __e1000e_disable_aspm(pdev, state);
6052} 6094}
6053 6095
6054#ifdef CONFIG_PM 6096#ifdef CONFIG_PM
@@ -6723,10 +6765,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6723 adapter->hw.fc.current_mode = e1000_fc_default; 6765 adapter->hw.fc.current_mode = e1000_fc_default;
6724 adapter->hw.phy.autoneg_advertised = 0x2f; 6766 adapter->hw.phy.autoneg_advertised = 0x2f;
6725 6767
6726 /* ring size defaults */
6727 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6728 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6729
6730 /* Initial Wake on LAN setting - If APM wake is enabled in 6768 /* Initial Wake on LAN setting - If APM wake is enabled in
6731 * the EEPROM, enable the ACPI Magic Packet filter 6769 * the EEPROM, enable the ACPI Magic Packet filter
6732 */ 6770 */
@@ -6976,6 +7014,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6976 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 7014 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6977 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, 7015 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6978 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, 7016 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7017 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7018 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7019 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7020 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
6979 7021
6980 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 7022 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6981}; 7023};
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index f21a91a299a2..79b58353d849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -176,7 +176,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
176 176
177 /* Verify phy id and set remaining function pointers */ 177 /* Verify phy id and set remaining function pointers */
178 switch (phy->id) { 178 switch (phy->id) {
179 case M88E1545_E_PHY_ID: 179 case M88E1543_E_PHY_ID:
180 case I347AT4_E_PHY_ID: 180 case I347AT4_E_PHY_ID:
181 case M88E1112_E_PHY_ID: 181 case M88E1112_E_PHY_ID:
182 case M88E1111_I_PHY_ID: 182 case M88E1111_I_PHY_ID:
@@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
238 238
239 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 239 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
240 E1000_EECD_SIZE_EX_SHIFT); 240 E1000_EECD_SIZE_EX_SHIFT);
241
241 /* Added to a constant, "size" becomes the left-shift value 242 /* Added to a constant, "size" becomes the left-shift value
242 * for setting word_size. 243 * for setting word_size.
243 */ 244 */
@@ -250,86 +251,52 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
250 size = 15; 251 size = 15;
251 252
252 nvm->word_size = 1 << size; 253 nvm->word_size = 1 << size;
253 if (hw->mac.type < e1000_i210) { 254 nvm->opcode_bits = 8;
254 nvm->opcode_bits = 8; 255 nvm->delay_usec = 1;
255 nvm->delay_usec = 1;
256
257 switch (nvm->override) {
258 case e1000_nvm_override_spi_large:
259 nvm->page_size = 32;
260 nvm->address_bits = 16;
261 break;
262 case e1000_nvm_override_spi_small:
263 nvm->page_size = 8;
264 nvm->address_bits = 8;
265 break;
266 default:
267 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
268 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
269 16 : 8;
270 break;
271 }
272 if (nvm->word_size == (1 << 15))
273 nvm->page_size = 128;
274 256
275 nvm->type = e1000_nvm_eeprom_spi; 257 switch (nvm->override) {
276 } else { 258 case e1000_nvm_override_spi_large:
277 nvm->type = e1000_nvm_flash_hw; 259 nvm->page_size = 32;
260 nvm->address_bits = 16;
261 break;
262 case e1000_nvm_override_spi_small:
263 nvm->page_size = 8;
264 nvm->address_bits = 8;
265 break;
266 default:
267 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
268 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
269 16 : 8;
270 break;
278 } 271 }
272 if (nvm->word_size == (1 << 15))
273 nvm->page_size = 128;
274
275 nvm->type = e1000_nvm_eeprom_spi;
279 276
280 /* NVM Function Pointers */ 277 /* NVM Function Pointers */
278 nvm->ops.acquire = igb_acquire_nvm_82575;
279 nvm->ops.release = igb_release_nvm_82575;
280 nvm->ops.write = igb_write_nvm_spi;
281 nvm->ops.validate = igb_validate_nvm_checksum;
282 nvm->ops.update = igb_update_nvm_checksum;
283 if (nvm->word_size < (1 << 15))
284 nvm->ops.read = igb_read_nvm_eerd;
285 else
286 nvm->ops.read = igb_read_nvm_spi;
287
288 /* override generic family function pointers for specific descendants */
281 switch (hw->mac.type) { 289 switch (hw->mac.type) {
282 case e1000_82580: 290 case e1000_82580:
283 nvm->ops.validate = igb_validate_nvm_checksum_82580; 291 nvm->ops.validate = igb_validate_nvm_checksum_82580;
284 nvm->ops.update = igb_update_nvm_checksum_82580; 292 nvm->ops.update = igb_update_nvm_checksum_82580;
285 nvm->ops.acquire = igb_acquire_nvm_82575;
286 nvm->ops.release = igb_release_nvm_82575;
287 if (nvm->word_size < (1 << 15))
288 nvm->ops.read = igb_read_nvm_eerd;
289 else
290 nvm->ops.read = igb_read_nvm_spi;
291 nvm->ops.write = igb_write_nvm_spi;
292 break; 293 break;
293 case e1000_i354: 294 case e1000_i354:
294 case e1000_i350: 295 case e1000_i350:
295 nvm->ops.validate = igb_validate_nvm_checksum_i350; 296 nvm->ops.validate = igb_validate_nvm_checksum_i350;
296 nvm->ops.update = igb_update_nvm_checksum_i350; 297 nvm->ops.update = igb_update_nvm_checksum_i350;
297 nvm->ops.acquire = igb_acquire_nvm_82575;
298 nvm->ops.release = igb_release_nvm_82575;
299 if (nvm->word_size < (1 << 15))
300 nvm->ops.read = igb_read_nvm_eerd;
301 else
302 nvm->ops.read = igb_read_nvm_spi;
303 nvm->ops.write = igb_write_nvm_spi;
304 break;
305 case e1000_i210:
306 nvm->ops.validate = igb_validate_nvm_checksum_i210;
307 nvm->ops.update = igb_update_nvm_checksum_i210;
308 nvm->ops.acquire = igb_acquire_nvm_i210;
309 nvm->ops.release = igb_release_nvm_i210;
310 nvm->ops.read = igb_read_nvm_srrd_i210;
311 nvm->ops.write = igb_write_nvm_srwr_i210;
312 nvm->ops.valid_led_default = igb_valid_led_default_i210;
313 break;
314 case e1000_i211:
315 nvm->ops.acquire = igb_acquire_nvm_i210;
316 nvm->ops.release = igb_release_nvm_i210;
317 nvm->ops.read = igb_read_nvm_i211;
318 nvm->ops.valid_led_default = igb_valid_led_default_i210;
319 nvm->ops.validate = NULL;
320 nvm->ops.update = NULL;
321 nvm->ops.write = NULL;
322 break; 298 break;
323 default: 299 default:
324 nvm->ops.validate = igb_validate_nvm_checksum;
325 nvm->ops.update = igb_update_nvm_checksum;
326 nvm->ops.acquire = igb_acquire_nvm_82575;
327 nvm->ops.release = igb_release_nvm_82575;
328 if (nvm->word_size < (1 << 15))
329 nvm->ops.read = igb_read_nvm_eerd;
330 else
331 nvm->ops.read = igb_read_nvm_spi;
332 nvm->ops.write = igb_write_nvm_spi;
333 break; 300 break;
334 } 301 }
335 302
@@ -516,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
516 case E1000_DEV_ID_I210_FIBER: 483 case E1000_DEV_ID_I210_FIBER:
517 case E1000_DEV_ID_I210_SERDES: 484 case E1000_DEV_ID_I210_SERDES:
518 case E1000_DEV_ID_I210_SGMII: 485 case E1000_DEV_ID_I210_SGMII:
486 case E1000_DEV_ID_I210_COPPER_FLASHLESS:
487 case E1000_DEV_ID_I210_SERDES_FLASHLESS:
519 mac->type = e1000_i210; 488 mac->type = e1000_i210;
520 break; 489 break;
521 case E1000_DEV_ID_I211_COPPER: 490 case E1000_DEV_ID_I211_COPPER:
@@ -601,6 +570,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
601 570
602 /* NVM initialization */ 571 /* NVM initialization */
603 ret_val = igb_init_nvm_params_82575(hw); 572 ret_val = igb_init_nvm_params_82575(hw);
573 switch (hw->mac.type) {
574 case e1000_i210:
575 case e1000_i211:
576 ret_val = igb_init_nvm_params_i210(hw);
577 break;
578 default:
579 break;
580 }
581
604 if (ret_val) 582 if (ret_val)
605 goto out; 583 goto out;
606 584
@@ -1163,6 +1141,31 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1163} 1141}
1164 1142
1165/** 1143/**
1144 * igb_get_link_up_info_82575 - Get link speed/duplex info
1145 * @hw: pointer to the HW structure
1146 * @speed: stores the current speed
1147 * @duplex: stores the current duplex
1148 *
1149 * This is a wrapper function, if using the serial gigabit media independent
1150 * interface, use PCS to retrieve the link speed and duplex information.
1151 * Otherwise, use the generic function to get the link speed and duplex info.
1152 **/
1153static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1154 u16 *duplex)
1155{
1156 s32 ret_val;
1157
1158 if (hw->phy.media_type != e1000_media_type_copper)
1159 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
1160 duplex);
1161 else
1162 ret_val = igb_get_speed_and_duplex_copper(hw, speed,
1163 duplex);
1164
1165 return ret_val;
1166}
1167
1168/**
1166 * igb_check_for_link_82575 - Check for link 1169 * igb_check_for_link_82575 - Check for link
1167 * @hw: pointer to the HW structure 1170 * @hw: pointer to the HW structure
1168 * 1171 *
@@ -1239,7 +1242,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1239 u16 *duplex) 1242 u16 *duplex)
1240{ 1243{
1241 struct e1000_mac_info *mac = &hw->mac; 1244 struct e1000_mac_info *mac = &hw->mac;
1242 u32 pcs; 1245 u32 pcs, status;
1243 1246
1244 /* Set up defaults for the return values of this function */ 1247 /* Set up defaults for the return values of this function */
1245 mac->serdes_has_link = false; 1248 mac->serdes_has_link = false;
@@ -1260,20 +1263,31 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1260 mac->serdes_has_link = true; 1263 mac->serdes_has_link = true;
1261 1264
1262 /* Detect and store PCS speed */ 1265 /* Detect and store PCS speed */
1263 if (pcs & E1000_PCS_LSTS_SPEED_1000) { 1266 if (pcs & E1000_PCS_LSTS_SPEED_1000)
1264 *speed = SPEED_1000; 1267 *speed = SPEED_1000;
1265 } else if (pcs & E1000_PCS_LSTS_SPEED_100) { 1268 else if (pcs & E1000_PCS_LSTS_SPEED_100)
1266 *speed = SPEED_100; 1269 *speed = SPEED_100;
1267 } else { 1270 else
1268 *speed = SPEED_10; 1271 *speed = SPEED_10;
1269 }
1270 1272
1271 /* Detect and store PCS duplex */ 1273 /* Detect and store PCS duplex */
1272 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { 1274 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1273 *duplex = FULL_DUPLEX; 1275 *duplex = FULL_DUPLEX;
1274 } else { 1276 else
1275 *duplex = HALF_DUPLEX; 1277 *duplex = HALF_DUPLEX;
1278
1279 /* Check if it is an I354 2.5Gb backplane connection. */
1280 if (mac->type == e1000_i354) {
1281 status = rd32(E1000_STATUS);
1282 if ((status & E1000_STATUS_2P5_SKU) &&
1283 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1284 *speed = SPEED_2500;
1285 *duplex = FULL_DUPLEX;
1286 hw_dbg("2500 Mbs, ");
1287 hw_dbg("Full Duplex\n");
1288 }
1276 } 1289 }
1290
1277 } 1291 }
1278 1292
1279 return 0; 1293 return 0;
@@ -1320,7 +1334,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1320 **/ 1334 **/
1321static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1335static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1322{ 1336{
1323 u32 ctrl, icr; 1337 u32 ctrl;
1324 s32 ret_val; 1338 s32 ret_val;
1325 1339
1326 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1340 /* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -1365,7 +1379,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1365 1379
1366 /* Clear any pending interrupt events. */ 1380 /* Clear any pending interrupt events. */
1367 wr32(E1000_IMC, 0xffffffff); 1381 wr32(E1000_IMC, 0xffffffff);
1368 icr = rd32(E1000_ICR); 1382 rd32(E1000_ICR);
1369 1383
1370 /* Install any alternate MAC address into RAR0 */ 1384 /* Install any alternate MAC address into RAR0 */
1371 ret_val = igb_check_alt_mac_addr(hw); 1385 ret_val = igb_check_alt_mac_addr(hw);
@@ -1443,11 +1457,18 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1443 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1457 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1444 wr32(E1000_CTRL, ctrl); 1458 wr32(E1000_CTRL, ctrl);
1445 1459
1446 /* Clear Go Link Disconnect bit */ 1460 /* Clear Go Link Disconnect bit on supported devices */
1447 if (hw->mac.type >= e1000_82580) { 1461 switch (hw->mac.type) {
1462 case e1000_82580:
1463 case e1000_i350:
1464 case e1000_i210:
1465 case e1000_i211:
1448 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1466 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1449 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1467 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1450 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1468 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1469 break;
1470 default:
1471 break;
1451 } 1472 }
1452 1473
1453 ret_val = igb_setup_serdes_link_82575(hw); 1474 ret_val = igb_setup_serdes_link_82575(hw);
@@ -1470,7 +1491,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1470 switch (hw->phy.id) { 1491 switch (hw->phy.id) {
1471 case I347AT4_E_PHY_ID: 1492 case I347AT4_E_PHY_ID:
1472 case M88E1112_E_PHY_ID: 1493 case M88E1112_E_PHY_ID:
1473 case M88E1545_E_PHY_ID: 1494 case M88E1543_E_PHY_ID:
1474 case I210_I_PHY_ID: 1495 case I210_I_PHY_ID:
1475 ret_val = igb_copper_link_setup_m88_gen2(hw); 1496 ret_val = igb_copper_link_setup_m88_gen2(hw);
1476 break; 1497 break;
@@ -2103,10 +2124,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2103 s32 ret_val = 0; 2124 s32 ret_val = 0;
2104 /* BH SW mailbox bit in SW_FW_SYNC */ 2125 /* BH SW mailbox bit in SW_FW_SYNC */
2105 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2126 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2106 u32 ctrl, icr; 2127 u32 ctrl;
2107 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2128 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2108 2129
2109
2110 hw->dev_spec._82575.global_device_reset = false; 2130 hw->dev_spec._82575.global_device_reset = false;
2111 2131
2112 /* due to hw errata, global device reset doesn't always 2132 /* due to hw errata, global device reset doesn't always
@@ -2165,7 +2185,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2165 2185
2166 /* Clear any pending interrupt events. */ 2186 /* Clear any pending interrupt events. */
2167 wr32(E1000_IMC, 0xffffffff); 2187 wr32(E1000_IMC, 0xffffffff);
2168 icr = rd32(E1000_ICR); 2188 rd32(E1000_ICR);
2169 2189
2170 ret_val = igb_reset_mdicnfg_82580(hw); 2190 ret_val = igb_reset_mdicnfg_82580(hw);
2171 if (ret_val) 2191 if (ret_val)
@@ -2500,28 +2520,28 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
2500 u16 phy_data; 2520 u16 phy_data;
2501 2521
2502 if ((hw->phy.media_type != e1000_media_type_copper) || 2522 if ((hw->phy.media_type != e1000_media_type_copper) ||
2503 (phy->id != M88E1545_E_PHY_ID)) 2523 (phy->id != M88E1543_E_PHY_ID))
2504 goto out; 2524 goto out;
2505 2525
2506 if (!hw->dev_spec._82575.eee_disable) { 2526 if (!hw->dev_spec._82575.eee_disable) {
2507 /* Switch to PHY page 18. */ 2527 /* Switch to PHY page 18. */
2508 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18); 2528 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2509 if (ret_val) 2529 if (ret_val)
2510 goto out; 2530 goto out;
2511 2531
2512 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2532 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2513 &phy_data); 2533 &phy_data);
2514 if (ret_val) 2534 if (ret_val)
2515 goto out; 2535 goto out;
2516 2536
2517 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS; 2537 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2518 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2538 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2519 phy_data); 2539 phy_data);
2520 if (ret_val) 2540 if (ret_val)
2521 goto out; 2541 goto out;
2522 2542
2523 /* Return the PHY to page 0. */ 2543 /* Return the PHY to page 0. */
2524 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0); 2544 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2525 if (ret_val) 2545 if (ret_val)
2526 goto out; 2546 goto out;
2527 2547
@@ -2572,7 +2592,7 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2572 2592
2573 /* Check if EEE is supported on this device. */ 2593 /* Check if EEE is supported on this device. */
2574 if ((hw->phy.media_type != e1000_media_type_copper) || 2594 if ((hw->phy.media_type != e1000_media_type_copper) ||
2575 (phy->id != M88E1545_E_PHY_ID)) 2595 (phy->id != M88E1543_E_PHY_ID))
2576 goto out; 2596 goto out;
2577 2597
2578 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2598 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
@@ -2728,7 +2748,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
2728 .check_for_link = igb_check_for_link_82575, 2748 .check_for_link = igb_check_for_link_82575,
2729 .rar_set = igb_rar_set, 2749 .rar_set = igb_rar_set,
2730 .read_mac_addr = igb_read_mac_addr_82575, 2750 .read_mac_addr = igb_read_mac_addr_82575,
2731 .get_speed_and_duplex = igb_get_speed_and_duplex_copper, 2751 .get_speed_and_duplex = igb_get_link_up_info_82575,
2732#ifdef CONFIG_IGB_HWMON 2752#ifdef CONFIG_IGB_HWMON
2733 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2753 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2734 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2754 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index aa201abb8ad2..978eca31ceda 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -620,6 +620,7 @@
620#define E1000_EECD_SIZE_EX_SHIFT 11 620#define E1000_EECD_SIZE_EX_SHIFT 11
621#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ 621#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
622#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ 622#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
623#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
623#define E1000_FLUDONE_ATTEMPTS 20000 624#define E1000_FLUDONE_ATTEMPTS 20000
624#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ 625#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
625#define E1000_I210_FIFO_SEL_RX 0x00 626#define E1000_I210_FIFO_SEL_RX 0x00
@@ -627,6 +628,11 @@
627#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) 628#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
628#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 629#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
629#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 630#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
631#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
632/* Secure FLASH mode requires removing MSb */
633#define E1000_I210_FW_PTR_MASK 0x7FFF
634/* Firmware code revision field word offset*/
635#define E1000_I210_FW_VER_OFFSET 328
630#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ 636#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
631#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ 637#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
632#define E1000_FLUDONE_ATTEMPTS 20000 638#define E1000_FLUDONE_ATTEMPTS 20000
@@ -665,20 +671,26 @@
665#define NVM_INIT_CTRL_4 0x0013 671#define NVM_INIT_CTRL_4 0x0013
666#define NVM_LED_1_CFG 0x001C 672#define NVM_LED_1_CFG 0x001C
667#define NVM_LED_0_2_CFG 0x001F 673#define NVM_LED_0_2_CFG 0x001F
668
669/* NVM version defines */
670#define NVM_ETRACK_WORD 0x0042 674#define NVM_ETRACK_WORD 0x0042
675#define NVM_ETRACK_HIWORD 0x0043
671#define NVM_COMB_VER_OFF 0x0083 676#define NVM_COMB_VER_OFF 0x0083
672#define NVM_COMB_VER_PTR 0x003d 677#define NVM_COMB_VER_PTR 0x003d
673#define NVM_MAJOR_MASK 0xF000 678
674#define NVM_MINOR_MASK 0x0FF0 679/* NVM version defines */
675#define NVM_BUILD_MASK 0x000F 680#define NVM_MAJOR_MASK 0xF000
676#define NVM_COMB_VER_MASK 0x00FF 681#define NVM_MINOR_MASK 0x0FF0
677#define NVM_MAJOR_SHIFT 12 682#define NVM_IMAGE_ID_MASK 0x000F
678#define NVM_MINOR_SHIFT 4 683#define NVM_COMB_VER_MASK 0x00FF
679#define NVM_COMB_VER_SHFT 8 684#define NVM_MAJOR_SHIFT 12
680#define NVM_VER_INVALID 0xFFFF 685#define NVM_MINOR_SHIFT 4
681#define NVM_ETRACK_SHIFT 16 686#define NVM_COMB_VER_SHFT 8
687#define NVM_VER_INVALID 0xFFFF
688#define NVM_ETRACK_SHIFT 16
689#define NVM_ETRACK_VALID 0x8000
690#define NVM_NEW_DEC_MASK 0x0F00
691#define NVM_HEX_CONV 16
692#define NVM_HEX_TENS 10
693
682#define NVM_ETS_CFG 0x003E 694#define NVM_ETS_CFG 0x003E
683#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 695#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
684#define NVM_ETS_LTHRES_DELTA_SHIFT 6 696#define NVM_ETS_LTHRES_DELTA_SHIFT 6
@@ -775,7 +787,7 @@
775#define I350_I_PHY_ID 0x015403B0 787#define I350_I_PHY_ID 0x015403B0
776#define M88_VENDOR 0x0141 788#define M88_VENDOR 0x0141
777#define I210_I_PHY_ID 0x01410C00 789#define I210_I_PHY_ID 0x01410C00
778#define M88E1545_E_PHY_ID 0x01410EA0 790#define M88E1543_E_PHY_ID 0x01410EA0
779 791
780/* M88E1000 Specific Registers */ 792/* M88E1000 Specific Registers */
781#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 793#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -897,9 +909,9 @@
897#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ 909#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
898#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ 910#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
899#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ 911#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
900#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */ 912#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
901#define E1000_M88E1545_EEE_CTRL_1 0x0 913#define E1000_M88E1543_EEE_CTRL_1 0x0
902#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ 914#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
903#define E1000_EEE_ADV_DEV_I354 7 915#define E1000_EEE_ADV_DEV_I354 7
904#define E1000_EEE_ADV_ADDR_I354 60 916#define E1000_EEE_ADV_ADDR_I354 60
905#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ 917#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 94d7866b9c20..37a9c06a6c68 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -67,6 +67,8 @@ struct e1000_hw;
67#define E1000_DEV_ID_I210_FIBER 0x1536 67#define E1000_DEV_ID_I210_FIBER 0x1536
68#define E1000_DEV_ID_I210_SERDES 0x1537 68#define E1000_DEV_ID_I210_SERDES 0x1537
69#define E1000_DEV_ID_I210_SGMII 0x1538 69#define E1000_DEV_ID_I210_SGMII 0x1538
70#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
71#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
70#define E1000_DEV_ID_I211_COPPER 0x1539 72#define E1000_DEV_ID_I211_COPPER 0x1539
71#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 73#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
72#define E1000_DEV_ID_I354_SGMII 0x1F41 74#define E1000_DEV_ID_I354_SGMII 0x1F41
@@ -110,6 +112,7 @@ enum e1000_nvm_type {
110 e1000_nvm_none, 112 e1000_nvm_none,
111 e1000_nvm_eeprom_spi, 113 e1000_nvm_eeprom_spi,
112 e1000_nvm_flash_hw, 114 e1000_nvm_flash_hw,
115 e1000_nvm_invm,
113 e1000_nvm_flash_sw 116 e1000_nvm_flash_sw
114}; 117};
115 118
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index ddb3cf51b9b9..0c0393316a3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
335} 335}
336 336
337/** 337/**
338 * igb_read_nvm_i211 - Read NVM wrapper function for I211 338 * igb_read_invm_word_i210 - Reads OTP
339 * @hw: pointer to the HW structure
340 * @address: the word address (aka eeprom offset) to read
341 * @data: pointer to the data read
342 *
343 * Reads 16-bit words from the OTP. Return error when the word is not
344 * stored in OTP.
345 **/
346static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
347{
348 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
349 u32 invm_dword;
350 u16 i;
351 u8 record_type, word_address;
352
353 for (i = 0; i < E1000_INVM_SIZE; i++) {
354 invm_dword = rd32(E1000_INVM_DATA_REG(i));
355 /* Get record type */
356 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
357 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
358 break;
359 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
360 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
361 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
362 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
363 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
364 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
365 if (word_address == address) {
366 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
367 hw_dbg("Read INVM Word 0x%02x = %x",
368 address, *data);
369 status = E1000_SUCCESS;
370 break;
371 }
372 }
373 }
374 if (status != E1000_SUCCESS)
375 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
376 return status;
377}
378
379/**
380 * igb_read_invm_i210 - Read invm wrapper function for I210/I211
339 * @hw: pointer to the HW structure 381 * @hw: pointer to the HW structure
340 * @words: number of words to read 382 * @words: number of words to read
341 * @data: pointer to the data read 383 * @data: pointer to the data read
342 * 384 *
343 * Wrapper function to return data formerly found in the NVM. 385 * Wrapper function to return data formerly found in the NVM.
344 **/ 386 **/
345s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, 387static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
346 u16 *data) 388 u16 words __always_unused, u16 *data)
347{ 389{
348 s32 ret_val = E1000_SUCCESS; 390 s32 ret_val = E1000_SUCCESS;
349 391
350 /* Only the MAC addr is required to be present in the iNVM */ 392 /* Only the MAC addr is required to be present in the iNVM */
351 switch (offset) { 393 switch (offset) {
352 case NVM_MAC_ADDR: 394 case NVM_MAC_ADDR:
353 ret_val = igb_read_invm_i211(hw, offset, &data[0]); 395 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
354 ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]); 396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
355 ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]); 397 &data[1]);
398 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
399 &data[2]);
356 if (ret_val != E1000_SUCCESS) 400 if (ret_val != E1000_SUCCESS)
357 hw_dbg("MAC Addr not found in iNVM\n"); 401 hw_dbg("MAC Addr not found in iNVM\n");
358 break; 402 break;
359 case NVM_INIT_CTRL_2: 403 case NVM_INIT_CTRL_2:
360 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 404 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
361 if (ret_val != E1000_SUCCESS) { 405 if (ret_val != E1000_SUCCESS) {
362 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 406 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
363 ret_val = E1000_SUCCESS; 407 ret_val = E1000_SUCCESS;
364 } 408 }
365 break; 409 break;
366 case NVM_INIT_CTRL_4: 410 case NVM_INIT_CTRL_4:
367 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 411 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
368 if (ret_val != E1000_SUCCESS) { 412 if (ret_val != E1000_SUCCESS) {
369 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 413 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
370 ret_val = E1000_SUCCESS; 414 ret_val = E1000_SUCCESS;
371 } 415 }
372 break; 416 break;
373 case NVM_LED_1_CFG: 417 case NVM_LED_1_CFG:
374 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 418 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
375 if (ret_val != E1000_SUCCESS) { 419 if (ret_val != E1000_SUCCESS) {
376 *data = NVM_LED_1_CFG_DEFAULT_I211; 420 *data = NVM_LED_1_CFG_DEFAULT_I211;
377 ret_val = E1000_SUCCESS; 421 ret_val = E1000_SUCCESS;
378 } 422 }
379 break; 423 break;
380 case NVM_LED_0_2_CFG: 424 case NVM_LED_0_2_CFG:
381 igb_read_invm_i211(hw, offset, data); 425 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
382 if (ret_val != E1000_SUCCESS) { 426 if (ret_val != E1000_SUCCESS) {
383 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 427 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
384 ret_val = E1000_SUCCESS; 428 ret_val = E1000_SUCCESS;
385 } 429 }
386 break; 430 break;
387 case NVM_ID_LED_SETTINGS: 431 case NVM_ID_LED_SETTINGS:
388 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 432 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
389 if (ret_val != E1000_SUCCESS) { 433 if (ret_val != E1000_SUCCESS) {
390 *data = ID_LED_RESERVED_FFFF; 434 *data = ID_LED_RESERVED_FFFF;
391 ret_val = E1000_SUCCESS; 435 ret_val = E1000_SUCCESS;
@@ -411,48 +455,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
411} 455}
412 456
413/** 457/**
414 * igb_read_invm_i211 - Reads OTP
415 * @hw: pointer to the HW structure
416 * @address: the word address (aka eeprom offset) to read
417 * @data: pointer to the data read
418 *
419 * Reads 16-bit words from the OTP. Return error when the word is not
420 * stored in OTP.
421 **/
422s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
423{
424 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
425 u32 invm_dword;
426 u16 i;
427 u8 record_type, word_address;
428
429 for (i = 0; i < E1000_INVM_SIZE; i++) {
430 invm_dword = rd32(E1000_INVM_DATA_REG(i));
431 /* Get record type */
432 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
433 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
434 break;
435 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
436 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
437 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
438 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
439 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
440 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
441 if (word_address == (u8)address) {
442 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
443 hw_dbg("Read INVM Word 0x%02x = %x",
444 address, *data);
445 status = E1000_SUCCESS;
446 break;
447 }
448 }
449 }
450 if (status != E1000_SUCCESS)
451 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
452 return status;
453}
454
455/**
456 * igb_read_invm_version - Reads iNVM version and image type 458 * igb_read_invm_version - Reads iNVM version and image type
457 * @hw: pointer to the HW structure 459 * @hw: pointer to the HW structure
458 * @invm_ver: version structure for the version read 460 * @invm_ver: version structure for the version read
@@ -661,6 +663,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
661} 663}
662 664
663/** 665/**
666 * igb_get_flash_presence_i210 - Check if flash device is detected.
667 * @hw: pointer to the HW structure
668 *
669 **/
670bool igb_get_flash_presence_i210(struct e1000_hw *hw)
671{
672 u32 eec = 0;
673 bool ret_val = false;
674
675 eec = rd32(E1000_EECD);
676 if (eec & E1000_EECD_FLASH_DETECTED_I210)
677 ret_val = true;
678
679 return ret_val;
680}
681
682/**
664 * igb_update_flash_i210 - Commit EEPROM to the flash 683 * igb_update_flash_i210 - Commit EEPROM to the flash
665 * @hw: pointer to the HW structure 684 * @hw: pointer to the HW structure
666 * 685 *
@@ -786,3 +805,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
786{ 805{
787 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); 806 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
788} 807}
808
809/**
810 * igb_init_nvm_params_i210 - Init NVM func ptrs.
811 * @hw: pointer to the HW structure
812 **/
813s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
814{
815 s32 ret_val = 0;
816 struct e1000_nvm_info *nvm = &hw->nvm;
817
818 nvm->ops.acquire = igb_acquire_nvm_i210;
819 nvm->ops.release = igb_release_nvm_i210;
820 nvm->ops.valid_led_default = igb_valid_led_default_i210;
821
822 /* NVM Function Pointers */
823 if (igb_get_flash_presence_i210(hw)) {
824 hw->nvm.type = e1000_nvm_flash_hw;
825 nvm->ops.read = igb_read_nvm_srrd_i210;
826 nvm->ops.write = igb_write_nvm_srwr_i210;
827 nvm->ops.validate = igb_validate_nvm_checksum_i210;
828 nvm->ops.update = igb_update_nvm_checksum_i210;
829 } else {
830 hw->nvm.type = e1000_nvm_invm;
831 nvm->ops.read = igb_read_invm_i210;
832 nvm->ops.write = NULL;
833 nvm->ops.validate = NULL;
834 nvm->ops.update = NULL;
835 }
836 return ret_val;
837}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5caa332e7556..dde3c4b7ea99 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -35,20 +35,19 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
35 u16 words, u16 *data); 35 u16 words, u16 *data);
36extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, 36extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
37 u16 words, u16 *data); 37 u16 words, u16 *data);
38extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
39extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 38extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
40extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 39extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
41extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw); 40extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
42extern void igb_release_nvm_i210(struct e1000_hw *hw); 41extern void igb_release_nvm_i210(struct e1000_hw *hw);
43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 42extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data);
46extern s32 igb_read_invm_version(struct e1000_hw *hw, 43extern s32 igb_read_invm_version(struct e1000_hw *hw,
47 struct e1000_fw_version *invm_ver); 44 struct e1000_fw_version *invm_ver);
48extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, 45extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
49 u16 *data); 46 u16 *data);
50extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, 47extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
51 u16 data); 48 u16 data);
49extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
50extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
52 51
53#define E1000_STM_OPCODE 0xDB00 52#define E1000_STM_OPCODE 0xDB00
54#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 53#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index bab556a47fcc..f0dfd41dd4bd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1171,17 +1171,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1171 hw_dbg("Half Duplex\n"); 1171 hw_dbg("Half Duplex\n");
1172 } 1172 }
1173 1173
1174 /* Check if it is an I354 2.5Gb backplane connection. */
1175 if (hw->mac.type == e1000_i354) {
1176 if ((status & E1000_STATUS_2P5_SKU) &&
1177 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1178 *speed = SPEED_2500;
1179 *duplex = FULL_DUPLEX;
1180 hw_dbg("2500 Mbs, ");
1181 hw_dbg("Full Duplex\n");
1182 }
1183 }
1184
1185 return 0; 1174 return 0;
1186} 1175}
1187 1176
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 7f9cd7cbd353..a7db7f3db914 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -709,11 +709,16 @@ out:
709 **/ 709 **/
710void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) 710void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
711{ 711{
712 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; 712 u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
713 u16 fw_version; 713 u8 q, hval, rem, result;
714 u16 comb_verh, comb_verl, comb_offset;
714 715
715 memset(fw_vers, 0, sizeof(struct e1000_fw_version)); 716 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
716 717
718 /* basic eeprom version numbers and bits used vary by part and by tool
719 * used to create the nvm images. Check which data format we have.
720 */
721 hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
717 switch (hw->mac.type) { 722 switch (hw->mac.type) {
718 case e1000_i211: 723 case e1000_i211:
719 igb_read_invm_version(hw, fw_vers); 724 igb_read_invm_version(hw, fw_vers);
@@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
721 case e1000_82575: 726 case e1000_82575:
722 case e1000_82576: 727 case e1000_82576:
723 case e1000_82580: 728 case e1000_82580:
724 case e1000_i354: 729 /* Use this format, unless EETRACK ID exists,
725 case e1000_i350: 730 * then use alternate format
726 case e1000_i210: 731 */
732 if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
733 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
734 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
735 >> NVM_MAJOR_SHIFT;
736 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
737 >> NVM_MINOR_SHIFT;
738 fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
739 goto etrack_id;
740 }
727 break; 741 break;
728 default:
729 return;
730 }
731 /* basic eeprom version numbers */
732 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
733 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
734 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
735
736 /* etrack id */
737 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
738 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
739 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
740
741 switch (hw->mac.type) {
742 case e1000_i210: 742 case e1000_i210:
743 case e1000_i354: 743 if (!(igb_get_flash_presence_i210(hw))) {
744 igb_read_invm_version(hw, fw_vers);
745 return;
746 }
747 /* fall through */
744 case e1000_i350: 748 case e1000_i350:
745 /* find combo image version */ 749 /* find combo image version */
746 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); 750 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
747 if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) { 751 if ((comb_offset != 0x0) &&
752 (comb_offset != NVM_VER_INVALID)) {
748 753
749 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset 754 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
750 + 1), 1, &comb_verh); 755 + 1), 1, &comb_verh);
@@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
760 fw_vers->or_major = 765 fw_vers->or_major =
761 comb_verl >> NVM_COMB_VER_SHFT; 766 comb_verl >> NVM_COMB_VER_SHFT;
762 fw_vers->or_build = 767 fw_vers->or_build =
763 ((comb_verl << NVM_COMB_VER_SHFT) 768 (comb_verl << NVM_COMB_VER_SHFT)
764 | (comb_verh >> NVM_COMB_VER_SHFT)); 769 | (comb_verh >> NVM_COMB_VER_SHFT);
765 fw_vers->or_patch = 770 fw_vers->or_patch =
766 comb_verh & NVM_COMB_VER_MASK; 771 comb_verh & NVM_COMB_VER_MASK;
767 } 772 }
768 } 773 }
769 break; 774 break;
770 default: 775 default:
771 break; 776 return;
777 }
778 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
779 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
780 >> NVM_MAJOR_SHIFT;
781
782 /* check for old style version format in newer images*/
783 if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
784 eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
785 } else {
786 eeprom_verl = (fw_version & NVM_MINOR_MASK)
787 >> NVM_MINOR_SHIFT;
788 }
789 /* Convert minor value to hex before assigning to output struct
790 * Val to be converted will not be higher than 99, per tool output
791 */
792 q = eeprom_verl / NVM_HEX_CONV;
793 hval = q * NVM_HEX_TENS;
794 rem = eeprom_verl % NVM_HEX_CONV;
795 result = hval + rem;
796 fw_vers->eep_minor = result;
797
798etrack_id:
799 if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
800 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
801 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
802 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
803 | eeprom_verl;
772 } 804 }
773 return; 805 return;
774} 806}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 6bfc0c43aace..433b7419cb98 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -44,6 +44,7 @@ struct e1000_fw_version {
44 u32 etrack_id; 44 u32 etrack_id;
45 u16 eep_major; 45 u16 eep_major;
46 u16 eep_minor; 46 u16 eep_minor;
47 u16 eep_build;
47 48
48 u8 invm_major; 49 u8 invm_major;
49 u8 invm_minor; 50 u8 invm_minor;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 60461946f98c..e7266759a10b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -731,15 +731,13 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
731 s32 ret_val; 731 s32 ret_val;
732 u16 phy_data; 732 u16 phy_data;
733 733
734 if (phy->reset_disable) { 734 if (phy->reset_disable)
735 ret_val = 0; 735 return 0;
736 goto out;
737 }
738 736
739 /* Enable CRS on Tx. This must be set for half-duplex operation. */ 737 /* Enable CRS on Tx. This must be set for half-duplex operation. */
740 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 738 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
741 if (ret_val) 739 if (ret_val)
742 goto out; 740 return ret_val;
743 741
744 /* Options: 742 /* Options:
745 * MDI/MDI-X = 0 (default) 743 * MDI/MDI-X = 0 (default)
@@ -780,23 +778,36 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
780 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 778 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
781 779
782 /* Enable downshift and setting it to X6 */ 780 /* Enable downshift and setting it to X6 */
781 if (phy->id == M88E1543_E_PHY_ID) {
782 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
783 ret_val =
784 phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
785 if (ret_val)
786 return ret_val;
787
788 ret_val = igb_phy_sw_reset(hw);
789 if (ret_val) {
790 hw_dbg("Error committing the PHY changes\n");
791 return ret_val;
792 }
793 }
794
783 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; 795 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
784 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; 796 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
785 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; 797 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
786 798
787 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 799 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
788 if (ret_val) 800 if (ret_val)
789 goto out; 801 return ret_val;
790 802
791 /* Commit the changes. */ 803 /* Commit the changes. */
792 ret_val = igb_phy_sw_reset(hw); 804 ret_val = igb_phy_sw_reset(hw);
793 if (ret_val) { 805 if (ret_val) {
794 hw_dbg("Error committing the PHY changes\n"); 806 hw_dbg("Error committing the PHY changes\n");
795 goto out; 807 return ret_val;
796 } 808 }
797 809
798out: 810 return 0;
799 return ret_val;
800} 811}
801 812
802/** 813/**
@@ -1806,7 +1817,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1806 phy->max_cable_length = phy_data / (is_cm ? 100 : 1); 1817 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1807 phy->cable_length = phy_data / (is_cm ? 100 : 1); 1818 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1808 break; 1819 break;
1809 case M88E1545_E_PHY_ID: 1820 case M88E1543_E_PHY_ID:
1810 case I347AT4_E_PHY_ID: 1821 case I347AT4_E_PHY_ID:
1811 /* Remember the original page select and set it to 7 */ 1822 /* Remember the original page select and set it to 7 */
1812 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1823 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 15ea8dc9dad3..6807b098edae 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -343,6 +343,8 @@ struct hwmon_buff {
343 }; 343 };
344#endif 344#endif
345 345
346#define IGB_RETA_SIZE 128
347
346/* board specific private data structure */ 348/* board specific private data structure */
347struct igb_adapter { 349struct igb_adapter {
348 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 350 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -444,6 +446,10 @@ struct igb_adapter {
444 struct i2c_algo_bit_data i2c_algo; 446 struct i2c_algo_bit_data i2c_algo;
445 struct i2c_adapter i2c_adap; 447 struct i2c_adapter i2c_adap;
446 struct i2c_client *i2c_client; 448 struct i2c_client *i2c_client;
449 u32 rss_indir_tbl_init;
450 u8 rss_indir_tbl[IGB_RETA_SIZE];
451
452 unsigned long link_check_timeout;
447}; 453};
448 454
449#define IGB_FLAG_HAS_MSI (1 << 0) 455#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -455,6 +461,7 @@ struct igb_adapter {
455#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) 461#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
456#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) 462#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
457#define IGB_FLAG_WOL_SUPPORTED (1 << 8) 463#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
464#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
458 465
459/* DMA Coalescing defines */ 466/* DMA Coalescing defines */
460#define IGB_MIN_TXPBSIZE 20408 467#define IGB_MIN_TXPBSIZE 20408
@@ -480,6 +487,7 @@ extern int igb_up(struct igb_adapter *);
480extern void igb_down(struct igb_adapter *); 487extern void igb_down(struct igb_adapter *);
481extern void igb_reinit_locked(struct igb_adapter *); 488extern void igb_reinit_locked(struct igb_adapter *);
482extern void igb_reset(struct igb_adapter *); 489extern void igb_reset(struct igb_adapter *);
490extern void igb_write_rss_indir_tbl(struct igb_adapter *);
483extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); 491extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
484extern int igb_setup_tx_resources(struct igb_ring *); 492extern int igb_setup_tx_resources(struct igb_ring *);
485extern int igb_setup_rx_resources(struct igb_ring *); 493extern int igb_setup_rx_resources(struct igb_ring *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 85fe7b52f435..48cbc833b051 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -172,10 +172,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
172 SUPPORTED_Autoneg | 172 SUPPORTED_Autoneg |
173 SUPPORTED_Pause); 173 SUPPORTED_Pause);
174 ecmd->advertising = ADVERTISED_FIBRE; 174 ecmd->advertising = ADVERTISED_FIBRE;
175 if (hw->mac.type == e1000_i354) { 175
176 ecmd->supported |= SUPPORTED_2500baseX_Full;
177 ecmd->advertising |= ADVERTISED_2500baseX_Full;
178 }
179 if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) { 176 if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
180 ecmd->supported |= SUPPORTED_1000baseT_Full; 177 ecmd->supported |= SUPPORTED_1000baseT_Full;
181 ecmd->advertising |= ADVERTISED_1000baseT_Full; 178 ecmd->advertising |= ADVERTISED_1000baseT_Full;
@@ -209,16 +206,23 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
209 status = rd32(E1000_STATUS); 206 status = rd32(E1000_STATUS);
210 207
211 if (status & E1000_STATUS_LU) { 208 if (status & E1000_STATUS_LU) {
212 if ((hw->mac.type == e1000_i354) && 209 if (hw->mac.type == e1000_i354) {
213 (status & E1000_STATUS_2P5_SKU) && 210 if ((status & E1000_STATUS_2P5_SKU) &&
214 !(status & E1000_STATUS_2P5_SKU_OVER)) 211 !(status & E1000_STATUS_2P5_SKU_OVER)) {
215 ecmd->speed = SPEED_2500; 212 ecmd->supported = SUPPORTED_2500baseX_Full;
216 else if (status & E1000_STATUS_SPEED_1000) 213 ecmd->advertising = ADVERTISED_2500baseX_Full;
214 ecmd->speed = SPEED_2500;
215 } else {
216 ecmd->supported = SUPPORTED_1000baseT_Full;
217 ecmd->advertising = ADVERTISED_1000baseT_Full;
218 }
219 } else if (status & E1000_STATUS_SPEED_1000) {
217 ecmd->speed = SPEED_1000; 220 ecmd->speed = SPEED_1000;
218 else if (status & E1000_STATUS_SPEED_100) 221 } else if (status & E1000_STATUS_SPEED_100) {
219 ecmd->speed = SPEED_100; 222 ecmd->speed = SPEED_100;
220 else 223 } else {
221 ecmd->speed = SPEED_10; 224 ecmd->speed = SPEED_10;
225 }
222 if ((status & E1000_STATUS_FD) || 226 if ((status & E1000_STATUS_FD) ||
223 hw->phy.media_type != e1000_media_type_copper) 227 hw->phy.media_type != e1000_media_type_copper)
224 ecmd->duplex = DUPLEX_FULL; 228 ecmd->duplex = DUPLEX_FULL;
@@ -1335,12 +1339,23 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1335 1339
1336static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) 1340static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1337{ 1341{
1342 struct e1000_hw *hw = &adapter->hw;
1343
1338 *data = 0; 1344 *data = 0;
1339 1345
1340 /* Validate eeprom on all parts but i211 */ 1346 /* Validate eeprom on all parts but flashless */
1341 if (adapter->hw.mac.type != e1000_i211) { 1347 switch (hw->mac.type) {
1348 case e1000_i210:
1349 case e1000_i211:
1350 if (igb_get_flash_presence_i210(hw)) {
1351 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
1352 *data = 2;
1353 }
1354 break;
1355 default:
1342 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) 1356 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
1343 *data = 2; 1357 *data = 2;
1358 break;
1344 } 1359 }
1345 1360
1346 return *data; 1361 return *data;
@@ -2672,7 +2687,9 @@ static int igb_set_eee(struct net_device *netdev,
2672 igb_set_eee_i350(hw); 2687 igb_set_eee_i350(hw);
2673 2688
2674 /* reset link */ 2689 /* reset link */
2675 if (!netif_running(netdev)) 2690 if (netif_running(netdev))
2691 igb_reinit_locked(adapter);
2692 else
2676 igb_reset(adapter); 2693 igb_reset(adapter);
2677 } 2694 }
2678 2695
@@ -2771,6 +2788,90 @@ static void igb_ethtool_complete(struct net_device *netdev)
2771 pm_runtime_put(&adapter->pdev->dev); 2788 pm_runtime_put(&adapter->pdev->dev);
2772} 2789}
2773 2790
2791static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
2792{
2793 return IGB_RETA_SIZE;
2794}
2795
2796static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
2797{
2798 struct igb_adapter *adapter = netdev_priv(netdev);
2799 int i;
2800
2801 for (i = 0; i < IGB_RETA_SIZE; i++)
2802 indir[i] = adapter->rss_indir_tbl[i];
2803
2804 return 0;
2805}
2806
2807void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
2808{
2809 struct e1000_hw *hw = &adapter->hw;
2810 u32 reg = E1000_RETA(0);
2811 u32 shift = 0;
2812 int i = 0;
2813
2814 switch (hw->mac.type) {
2815 case e1000_82575:
2816 shift = 6;
2817 break;
2818 case e1000_82576:
2819 /* 82576 supports 2 RSS queues for SR-IOV */
2820 if (adapter->vfs_allocated_count)
2821 shift = 3;
2822 break;
2823 default:
2824 break;
2825 }
2826
2827 while (i < IGB_RETA_SIZE) {
2828 u32 val = 0;
2829 int j;
2830
2831 for (j = 3; j >= 0; j--) {
2832 val <<= 8;
2833 val |= adapter->rss_indir_tbl[i + j];
2834 }
2835
2836 wr32(reg, val << shift);
2837 reg += 4;
2838 i += 4;
2839 }
2840}
2841
2842static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
2843{
2844 struct igb_adapter *adapter = netdev_priv(netdev);
2845 struct e1000_hw *hw = &adapter->hw;
2846 int i;
2847 u32 num_queues;
2848
2849 num_queues = adapter->rss_queues;
2850
2851 switch (hw->mac.type) {
2852 case e1000_82576:
2853 /* 82576 supports 2 RSS queues for SR-IOV */
2854 if (adapter->vfs_allocated_count)
2855 num_queues = 2;
2856 break;
2857 default:
2858 break;
2859 }
2860
2861 /* Verify user input. */
2862 for (i = 0; i < IGB_RETA_SIZE; i++)
2863 if (indir[i] >= num_queues)
2864 return -EINVAL;
2865
2866
2867 for (i = 0; i < IGB_RETA_SIZE; i++)
2868 adapter->rss_indir_tbl[i] = indir[i];
2869
2870 igb_write_rss_indir_tbl(adapter);
2871
2872 return 0;
2873}
2874
2774static const struct ethtool_ops igb_ethtool_ops = { 2875static const struct ethtool_ops igb_ethtool_ops = {
2775 .get_settings = igb_get_settings, 2876 .get_settings = igb_get_settings,
2776 .set_settings = igb_set_settings, 2877 .set_settings = igb_set_settings,
@@ -2804,6 +2905,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
2804 .set_eee = igb_set_eee, 2905 .set_eee = igb_set_eee,
2805 .get_module_info = igb_get_module_info, 2906 .get_module_info = igb_get_module_info,
2806 .get_module_eeprom = igb_get_module_eeprom, 2907 .get_module_eeprom = igb_get_module_eeprom,
2908 .get_rxfh_indir_size = igb_get_rxfh_indir_size,
2909 .get_rxfh_indir = igb_get_rxfh_indir,
2910 .set_rxfh_indir = igb_set_rxfh_indir,
2807 .begin = igb_ethtool_begin, 2911 .begin = igb_ethtool_begin,
2808 .complete = igb_ethtool_complete, 2912 .complete = igb_ethtool_complete,
2809}; 2913};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1d72c03cb59..8cf44f2a8ccd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -62,7 +62,7 @@
62 62
63#define MAJ 5 63#define MAJ 5
64#define MIN 0 64#define MIN 0
65#define BUILD 3 65#define BUILD 5
66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
67__stringify(BUILD) "-k" 67__stringify(BUILD) "-k"
68char igb_driver_name[] = "igb"; 68char igb_driver_name[] = "igb";
@@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, 86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, 87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, 90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, 91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, 92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -1013,7 +1015,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1013 adapter->q_vector[v_idx] = NULL; 1015 adapter->q_vector[v_idx] = NULL;
1014 netif_napi_del(&q_vector->napi); 1016 netif_napi_del(&q_vector->napi);
1015 1017
1016 /* ixgbe_get_stats64() might access the rings on this vector, 1018 /* igb_get_stats64() might access the rings on this vector,
1017 * we must wait a grace period before freeing it. 1019 * we must wait a grace period before freeing it.
1018 */ 1020 */
1019 kfree_rcu(q_vector, rcu); 1021 kfree_rcu(q_vector, rcu);
@@ -1669,6 +1671,8 @@ void igb_down(struct igb_adapter *adapter)
1669 1671
1670 igb_irq_disable(adapter); 1672 igb_irq_disable(adapter);
1671 1673
1674 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1675
1672 for (i = 0; i < adapter->num_q_vectors; i++) { 1676 for (i = 0; i < adapter->num_q_vectors; i++) {
1673 napi_synchronize(&(adapter->q_vector[i]->napi)); 1677 napi_synchronize(&(adapter->q_vector[i]->napi));
1674 napi_disable(&(adapter->q_vector[i]->napi)); 1678 napi_disable(&(adapter->q_vector[i]->napi));
@@ -1929,12 +1933,17 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1929 igb_get_fw_version(hw, &fw); 1933 igb_get_fw_version(hw, &fw);
1930 1934
1931 switch (hw->mac.type) { 1935 switch (hw->mac.type) {
1936 case e1000_i210:
1932 case e1000_i211: 1937 case e1000_i211:
1933 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 1938 if (!(igb_get_flash_presence_i210(hw))) {
1934 "%2d.%2d-%d", 1939 snprintf(adapter->fw_version,
1935 fw.invm_major, fw.invm_minor, fw.invm_img_type); 1940 sizeof(adapter->fw_version),
1936 break; 1941 "%2d.%2d-%d",
1937 1942 fw.invm_major, fw.invm_minor,
1943 fw.invm_img_type);
1944 break;
1945 }
1946 /* fall through */
1938 default: 1947 default:
1939 /* if option is rom valid, display its version too */ 1948 /* if option is rom valid, display its version too */
1940 if (fw.or_valid) { 1949 if (fw.or_valid) {
@@ -1944,11 +1953,16 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1944 fw.eep_major, fw.eep_minor, fw.etrack_id, 1953 fw.eep_major, fw.eep_minor, fw.etrack_id,
1945 fw.or_major, fw.or_build, fw.or_patch); 1954 fw.or_major, fw.or_build, fw.or_patch);
1946 /* no option rom */ 1955 /* no option rom */
1947 } else { 1956 } else if (fw.etrack_id != 0X0000) {
1948 snprintf(adapter->fw_version, 1957 snprintf(adapter->fw_version,
1949 sizeof(adapter->fw_version), 1958 sizeof(adapter->fw_version),
1950 "%d.%d, 0x%08x", 1959 "%d.%d, 0x%08x",
1951 fw.eep_major, fw.eep_minor, fw.etrack_id); 1960 fw.eep_major, fw.eep_minor, fw.etrack_id);
1961 } else {
1962 snprintf(adapter->fw_version,
1963 sizeof(adapter->fw_version),
1964 "%d.%d.%d",
1965 fw.eep_major, fw.eep_minor, fw.eep_build);
1952 } 1966 }
1953 break; 1967 break;
1954 } 1968 }
@@ -2166,15 +2180,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2166 */ 2180 */
2167 hw->mac.ops.reset_hw(hw); 2181 hw->mac.ops.reset_hw(hw);
2168 2182
2169 /* make sure the NVM is good , i211 parts have special NVM that 2183 /* make sure the NVM is good , i211/i210 parts can have special NVM
2170 * doesn't contain a checksum 2184 * that doesn't contain a checksum
2171 */ 2185 */
2172 if (hw->mac.type != e1000_i211) { 2186 switch (hw->mac.type) {
2187 case e1000_i210:
2188 case e1000_i211:
2189 if (igb_get_flash_presence_i210(hw)) {
2190 if (hw->nvm.ops.validate(hw) < 0) {
2191 dev_err(&pdev->dev,
2192 "The NVM Checksum Is Not Valid\n");
2193 err = -EIO;
2194 goto err_eeprom;
2195 }
2196 }
2197 break;
2198 default:
2173 if (hw->nvm.ops.validate(hw) < 0) { 2199 if (hw->nvm.ops.validate(hw) < 0) {
2174 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 2200 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2175 err = -EIO; 2201 err = -EIO;
2176 goto err_eeprom; 2202 goto err_eeprom;
2177 } 2203 }
2204 break;
2178 } 2205 }
2179 2206
2180 /* copy the MAC address out of the NVM */ 2207 /* copy the MAC address out of the NVM */
@@ -2342,7 +2369,14 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2342 "Width x1" : "unknown"), netdev->dev_addr); 2369 "Width x1" : "unknown"), netdev->dev_addr);
2343 } 2370 }
2344 2371
2345 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); 2372 if ((hw->mac.type >= e1000_i210 ||
2373 igb_get_flash_presence_i210(hw))) {
2374 ret_val = igb_read_part_string(hw, part_str,
2375 E1000_PBANUM_LENGTH);
2376 } else {
2377 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
2378 }
2379
2346 if (ret_val) 2380 if (ret_val)
2347 strcpy(part_str, "Unknown"); 2381 strcpy(part_str, "Unknown");
2348 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); 2382 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
@@ -2436,6 +2470,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2436 int err = 0; 2470 int err = 0;
2437 int i; 2471 int i;
2438 2472
2473 if (!adapter->msix_entries) {
2474 err = -EPERM;
2475 goto out;
2476 }
2477
2439 if (!num_vfs) 2478 if (!num_vfs)
2440 goto out; 2479 goto out;
2441 else if (old_vfs && old_vfs == num_vfs) 2480 else if (old_vfs && old_vfs == num_vfs)
@@ -3096,7 +3135,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3096{ 3135{
3097 struct e1000_hw *hw = &adapter->hw; 3136 struct e1000_hw *hw = &adapter->hw;
3098 u32 mrqc, rxcsum; 3137 u32 mrqc, rxcsum;
3099 u32 j, num_rx_queues, shift = 0; 3138 u32 j, num_rx_queues;
3100 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, 3139 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
3101 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, 3140 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
3102 0xA32DCB77, 0x0CF23080, 0x3BB7426A, 3141 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
@@ -3109,35 +3148,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3109 num_rx_queues = adapter->rss_queues; 3148 num_rx_queues = adapter->rss_queues;
3110 3149
3111 switch (hw->mac.type) { 3150 switch (hw->mac.type) {
3112 case e1000_82575:
3113 shift = 6;
3114 break;
3115 case e1000_82576: 3151 case e1000_82576:
3116 /* 82576 supports 2 RSS queues for SR-IOV */ 3152 /* 82576 supports 2 RSS queues for SR-IOV */
3117 if (adapter->vfs_allocated_count) { 3153 if (adapter->vfs_allocated_count)
3118 shift = 3;
3119 num_rx_queues = 2; 3154 num_rx_queues = 2;
3120 }
3121 break; 3155 break;
3122 default: 3156 default:
3123 break; 3157 break;
3124 } 3158 }
3125 3159
3126 /* Populate the indirection table 4 entries at a time. To do this 3160 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3127 * we are generating the results for n and n+2 and then interleaving 3161 for (j = 0; j < IGB_RETA_SIZE; j++)
3128 * those with the results with n+1 and n+3. 3162 adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
3129 */ 3163 adapter->rss_indir_tbl_init = num_rx_queues;
3130 for (j = 0; j < 32; j++) {
3131 /* first pass generates n and n+2 */
3132 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
3133 u32 reta = (base & 0x07800780) >> (7 - shift);
3134
3135 /* second pass generates n+1 and n+3 */
3136 base += 0x00010001 * num_rx_queues;
3137 reta |= (base & 0x07800780) << (1 + shift);
3138
3139 wr32(E1000_RETA(j), reta);
3140 } 3164 }
3165 igb_write_rss_indir_tbl(adapter);
3141 3166
3142 /* Disable raw packet checksumming so that RSS hash is placed in 3167 /* Disable raw packet checksumming so that RSS hash is placed in
3143 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 3168 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
@@ -3844,7 +3869,6 @@ bool igb_has_link(struct igb_adapter *adapter)
3844{ 3869{
3845 struct e1000_hw *hw = &adapter->hw; 3870 struct e1000_hw *hw = &adapter->hw;
3846 bool link_active = false; 3871 bool link_active = false;
3847 s32 ret_val = 0;
3848 3872
3849 /* get_link_status is set on LSC (link status) interrupt or 3873 /* get_link_status is set on LSC (link status) interrupt or
3850 * rx sequence error interrupt. get_link_status will stay 3874 * rx sequence error interrupt. get_link_status will stay
@@ -3853,22 +3877,28 @@ bool igb_has_link(struct igb_adapter *adapter)
3853 */ 3877 */
3854 switch (hw->phy.media_type) { 3878 switch (hw->phy.media_type) {
3855 case e1000_media_type_copper: 3879 case e1000_media_type_copper:
3856 if (hw->mac.get_link_status) { 3880 if (!hw->mac.get_link_status)
3857 ret_val = hw->mac.ops.check_for_link(hw); 3881 return true;
3858 link_active = !hw->mac.get_link_status;
3859 } else {
3860 link_active = true;
3861 }
3862 break;
3863 case e1000_media_type_internal_serdes: 3882 case e1000_media_type_internal_serdes:
3864 ret_val = hw->mac.ops.check_for_link(hw); 3883 hw->mac.ops.check_for_link(hw);
3865 link_active = hw->mac.serdes_has_link; 3884 link_active = !hw->mac.get_link_status;
3866 break; 3885 break;
3867 default: 3886 default:
3868 case e1000_media_type_unknown: 3887 case e1000_media_type_unknown:
3869 break; 3888 break;
3870 } 3889 }
3871 3890
3891 if (((hw->mac.type == e1000_i210) ||
3892 (hw->mac.type == e1000_i211)) &&
3893 (hw->phy.id == I210_I_PHY_ID)) {
3894 if (!netif_carrier_ok(adapter->netdev)) {
3895 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
3896 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
3897 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
3898 adapter->link_check_timeout = jiffies;
3899 }
3900 }
3901
3872 return link_active; 3902 return link_active;
3873} 3903}
3874 3904
@@ -3913,6 +3943,14 @@ static void igb_watchdog_task(struct work_struct *work)
3913 int i; 3943 int i;
3914 3944
3915 link = igb_has_link(adapter); 3945 link = igb_has_link(adapter);
3946
3947 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
3948 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
3949 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
3950 else
3951 link = false;
3952 }
3953
3916 if (link) { 3954 if (link) {
3917 /* Cancel scheduled suspend requests. */ 3955 /* Cancel scheduled suspend requests. */
3918 pm_runtime_resume(netdev->dev.parent); 3956 pm_runtime_resume(netdev->dev.parent);
@@ -4037,9 +4075,14 @@ static void igb_watchdog_task(struct work_struct *work)
4037 igb_ptp_rx_hang(adapter); 4075 igb_ptp_rx_hang(adapter);
4038 4076
4039 /* Reset the timer */ 4077 /* Reset the timer */
4040 if (!test_bit(__IGB_DOWN, &adapter->state)) 4078 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4041 mod_timer(&adapter->watchdog_timer, 4079 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
4042 round_jiffies(jiffies + 2 * HZ)); 4080 mod_timer(&adapter->watchdog_timer,
4081 round_jiffies(jiffies + HZ));
4082 else
4083 mod_timer(&adapter->watchdog_timer,
4084 round_jiffies(jiffies + 2 * HZ));
4085 }
4043} 4086}
4044 4087
4045enum latency_range { 4088enum latency_range {
@@ -4814,6 +4857,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4814 return -EINVAL; 4857 return -EINVAL;
4815 } 4858 }
4816 4859
4860 /* adjust max frame to be at least the size of a standard frame */
4861 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4862 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4863
4817 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 4864 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4818 msleep(1); 4865 msleep(1);
4819 4866
@@ -4865,6 +4912,8 @@ void igb_update_stats(struct igb_adapter *adapter,
4865 4912
4866 bytes = 0; 4913 bytes = 0;
4867 packets = 0; 4914 packets = 0;
4915
4916 rcu_read_lock();
4868 for (i = 0; i < adapter->num_rx_queues; i++) { 4917 for (i = 0; i < adapter->num_rx_queues; i++) {
4869 u32 rqdpc = rd32(E1000_RQDPC(i)); 4918 u32 rqdpc = rd32(E1000_RQDPC(i));
4870 struct igb_ring *ring = adapter->rx_ring[i]; 4919 struct igb_ring *ring = adapter->rx_ring[i];
@@ -4900,6 +4949,7 @@ void igb_update_stats(struct igb_adapter *adapter,
4900 } 4949 }
4901 net_stats->tx_bytes = bytes; 4950 net_stats->tx_bytes = bytes;
4902 net_stats->tx_packets = packets; 4951 net_stats->tx_packets = packets;
4952 rcu_read_unlock();
4903 4953
4904 /* read stats registers */ 4954 /* read stats registers */
4905 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 4955 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7e8c477b0ab9..5a54e3dc535d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
97{ 97{
98 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 98 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
99 struct e1000_hw *hw = &igb->hw; 99 struct e1000_hw *hw = &igb->hw;
100 u32 lo, hi;
100 u64 val; 101 u64 val;
101 u32 lo, hi, jk;
102 102
103 /* The timestamp latches on lowest register read. For the 82580 103 /* The timestamp latches on lowest register read. For the 82580
104 * the lowest register is SYSTIMR instead of SYSTIML. However we only 104 * the lowest register is SYSTIMR instead of SYSTIML. However we only
105 * need to provide nanosecond resolution, so we just ignore it. 105 * need to provide nanosecond resolution, so we just ignore it.
106 */ 106 */
107 jk = rd32(E1000_SYSTIMR); 107 rd32(E1000_SYSTIMR);
108 lo = rd32(E1000_SYSTIML); 108 lo = rd32(E1000_SYSTIML);
109 hi = rd32(E1000_SYSTIMH); 109 hi = rd32(E1000_SYSTIMH);
110 110
@@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
118static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) 118static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
119{ 119{
120 struct e1000_hw *hw = &adapter->hw; 120 struct e1000_hw *hw = &adapter->hw;
121 u32 sec, nsec, jk; 121 u32 sec, nsec;
122 122
123 /* The timestamp latches on lowest register read. For I210/I211, the 123 /* The timestamp latches on lowest register read. For I210/I211, the
124 * lowest register is SYSTIMR. Since we only need to provide nanosecond 124 * lowest register is SYSTIMR. Since we only need to provide nanosecond
125 * resolution, we can ignore it. 125 * resolution, we can ignore it.
126 */ 126 */
127 jk = rd32(E1000_SYSTIMR); 127 rd32(E1000_SYSTIMR);
128 nsec = rd32(E1000_SYSTIML); 128 nsec = rd32(E1000_SYSTIML);
129 sec = rd32(E1000_SYSTIMH); 129 sec = rd32(E1000_SYSTIMH);
130 130
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fce3e92f9d11..9f6b236828e6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -718,8 +718,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
718 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 718 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
719 txdr->size = ALIGN(txdr->size, 4096); 719 txdr->size = ALIGN(txdr->size, 4096);
720 720
721 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 721 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
722 GFP_KERNEL | __GFP_ZERO); 722 GFP_KERNEL);
723 if (!txdr->desc) { 723 if (!txdr->desc) {
724 vfree(txdr->buffer_info); 724 vfree(txdr->buffer_info);
725 return -ENOMEM; 725 return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a6494e5daffe..0ac6b11c6e4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -618,9 +618,8 @@ struct ixgbe_adapter {
618#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) 618#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
619#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) 619#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
620#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 620#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
621#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10) 621#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
622#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) 622#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
623#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
624 623
625 /* Tx fast path data */ 624 /* Tx fast path data */
626 int num_tx_queues; 625 int num_tx_queues;
@@ -754,7 +753,7 @@ enum ixgbe_state_t {
754 __IXGBE_DOWN, 753 __IXGBE_DOWN,
755 __IXGBE_SERVICE_SCHED, 754 __IXGBE_SERVICE_SCHED,
756 __IXGBE_IN_SFP_INIT, 755 __IXGBE_IN_SFP_INIT,
757 __IXGBE_READ_I2C, 756 __IXGBE_PTP_RUNNING,
758}; 757};
759 758
760struct ixgbe_cb { 759struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4a5bfb6b3af0..a26f3fee4f35 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1018 u16 sfp_addr = 0; 1018 u16 sfp_addr = 0;
1019 u16 sfp_data = 0; 1019 u16 sfp_data = 0;
1020 u16 sfp_stat = 0; 1020 u16 sfp_stat = 0;
1021 u16 gssr;
1021 u32 i; 1022 u32 i;
1022 1023
1024 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1025 gssr = IXGBE_GSSR_PHY1_SM;
1026 else
1027 gssr = IXGBE_GSSR_PHY0_SM;
1028
1029 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
1030 return IXGBE_ERR_SWFW_SYNC;
1031
1023 if (hw->phy.type == ixgbe_phy_nl) { 1032 if (hw->phy.type == ixgbe_phy_nl) {
1024 /* 1033 /*
1025 * phy SDA/SCL registers are at addresses 0xC30A to 1034 * phy SDA/SCL registers are at addresses 0xC30A to
@@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1028 */ 1037 */
1029 sfp_addr = (dev_addr << 8) + byte_offset; 1038 sfp_addr = (dev_addr << 8) + byte_offset;
1030 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1039 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1031 hw->phy.ops.write_reg(hw, 1040 hw->phy.ops.write_reg_mdi(hw,
1032 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1041 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1033 MDIO_MMD_PMAPMD, 1042 MDIO_MMD_PMAPMD,
1034 sfp_addr); 1043 sfp_addr);
1035 1044
1036 /* Poll status */ 1045 /* Poll status */
1037 for (i = 0; i < 100; i++) { 1046 for (i = 0; i < 100; i++) {
1038 hw->phy.ops.read_reg(hw, 1047 hw->phy.ops.read_reg_mdi(hw,
1039 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1048 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1040 MDIO_MMD_PMAPMD, 1049 MDIO_MMD_PMAPMD,
1041 &sfp_stat); 1050 &sfp_stat);
1042 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1051 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1043 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1052 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1044 break; 1053 break;
@@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1052 } 1061 }
1053 1062
1054 /* Read data */ 1063 /* Read data */
1055 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1064 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1056 MDIO_MMD_PMAPMD, &sfp_data); 1065 MDIO_MMD_PMAPMD, &sfp_data);
1057 1066
1058 *eeprom_data = (u8)(sfp_data >> 8); 1067 *eeprom_data = (u8)(sfp_data >> 8);
1059 } else { 1068 } else {
@@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1061 } 1070 }
1062 1071
1063out: 1072out:
1073 hw->mac.ops.release_swfw_sync(hw, gssr);
1064 return status; 1074 return status;
1065} 1075}
1066 1076
@@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1321 1331
1322static struct ixgbe_phy_operations phy_ops_82598 = { 1332static struct ixgbe_phy_operations phy_ops_82598 = {
1323 .identify = &ixgbe_identify_phy_generic, 1333 .identify = &ixgbe_identify_phy_generic,
1324 .identify_sfp = &ixgbe_identify_sfp_module_generic, 1334 .identify_sfp = &ixgbe_identify_module_generic,
1325 .init = &ixgbe_init_phy_ops_82598, 1335 .init = &ixgbe_init_phy_ops_82598,
1326 .reset = &ixgbe_reset_phy_generic, 1336 .reset = &ixgbe_reset_phy_generic,
1327 .read_reg = &ixgbe_read_phy_reg_generic, 1337 .read_reg = &ixgbe_read_phy_reg_generic,
1328 .write_reg = &ixgbe_write_phy_reg_generic, 1338 .write_reg = &ixgbe_write_phy_reg_generic,
1339 .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
1340 .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
1329 .setup_link = &ixgbe_setup_phy_link_generic, 1341 .setup_link = &ixgbe_setup_phy_link_generic,
1330 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1342 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1331 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, 1343 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 0b82d38bc97d..007a0083a636 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -49,6 +49,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed, 50 ixgbe_link_speed speed,
51 bool autoneg_wait_to_complete); 51 bool autoneg_wait_to_complete);
52static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
52static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 53static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
53 bool autoneg_wait_to_complete); 54 bool autoneg_wait_to_complete);
54static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 55static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -58,6 +59,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
58 ixgbe_link_speed speed, 59 ixgbe_link_speed speed,
59 bool autoneg_wait_to_complete); 60 bool autoneg_wait_to_complete);
60static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 61static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
62static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
63 u8 dev_addr, u8 *data);
64static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
65 u8 dev_addr, u8 data);
61 66
62static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 67static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
63{ 68{
@@ -137,11 +142,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
137 goto setup_sfp_out; 142 goto setup_sfp_out;
138 } 143 }
139 144
140 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 145 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
146 goto setup_sfp_err;
141 while (data_value != 0xffff) { 147 while (data_value != 0xffff) {
142 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 148 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
143 IXGBE_WRITE_FLUSH(hw); 149 IXGBE_WRITE_FLUSH(hw);
144 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 150 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
151 goto setup_sfp_err;
145 } 152 }
146 153
147 /* Release the semaphore */ 154 /* Release the semaphore */
@@ -187,6 +194,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
187 194
188setup_sfp_out: 195setup_sfp_out:
189 return ret_val; 196 return ret_val;
197
198setup_sfp_err:
199 /* Release the semaphore */
200 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
201 /* Delay obtaining semaphore again to allow FW access,
202 * semaphore_delay is in ms usleep_range needs us.
203 */
204 usleep_range(hw->eeprom.semaphore_delay * 1000,
205 hw->eeprom.semaphore_delay * 2000);
206 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
207 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
190} 208}
191 209
192static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 210static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
@@ -219,6 +237,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
219 struct ixgbe_mac_info *mac = &hw->mac; 237 struct ixgbe_mac_info *mac = &hw->mac;
220 struct ixgbe_phy_info *phy = &hw->phy; 238 struct ixgbe_phy_info *phy = &hw->phy;
221 s32 ret_val = 0; 239 s32 ret_val = 0;
240 u32 esdp;
241
242 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
243 /* Store flag indicating I2C bus access control unit. */
244 hw->phy.qsfp_shared_i2c_bus = true;
245
246 /* Initialize access to QSFP+ I2C bus */
247 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
248 esdp |= IXGBE_ESDP_SDP0_DIR;
249 esdp &= ~IXGBE_ESDP_SDP1_DIR;
250 esdp &= ~IXGBE_ESDP_SDP0;
251 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
252 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
253 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
254 IXGBE_WRITE_FLUSH(hw);
255
256 phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
257 phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
258 }
222 259
223 /* Identify the PHY or SFP module */ 260 /* Identify the PHY or SFP module */
224 ret_val = phy->ops.identify(hw); 261 ret_val = phy->ops.identify(hw);
@@ -342,8 +379,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
342 379
343 if (hw->phy.multispeed_fiber) { 380 if (hw->phy.multispeed_fiber) {
344 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 381 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
345 IXGBE_LINK_SPEED_1GB_FULL; 382 IXGBE_LINK_SPEED_1GB_FULL;
346 *autoneg = true; 383
384 /* QSFP must not enable auto-negotiation */
385 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
386 *autoneg = false;
387 else
388 *autoneg = true;
347 } 389 }
348 390
349out: 391out:
@@ -397,6 +439,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
397 case IXGBE_DEV_ID_82599_LS: 439 case IXGBE_DEV_ID_82599_LS:
398 media_type = ixgbe_media_type_fiber_lco; 440 media_type = ixgbe_media_type_fiber_lco;
399 break; 441 break;
442 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
443 media_type = ixgbe_media_type_fiber_qsfp;
444 break;
400 default: 445 default:
401 media_type = ixgbe_media_type_unknown; 446 media_type = ixgbe_media_type_unknown;
402 break; 447 break;
@@ -406,6 +451,24 @@ out:
406} 451}
407 452
408/** 453/**
454 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
455 * @hw: pointer to hardware structure
456 *
457 * Disables link, should be called during D3 power down sequence.
458 *
459 */
460static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
461{
462 u32 autoc2_reg;
463
464 if (!hw->mng_fw_enabled && !hw->wol_enabled) {
465 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
466 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
467 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
468 }
469}
470
471/**
409 * ixgbe_start_mac_link_82599 - Setup MAC link settings 472 * ixgbe_start_mac_link_82599 - Setup MAC link settings
410 * @hw: pointer to hardware structure 473 * @hw: pointer to hardware structure
411 * @autoneg_wait_to_complete: true when waiting for completion is needed 474 * @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -527,6 +590,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
527} 590}
528 591
529/** 592/**
593 * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
594 * @hw: pointer to hardware structure
595 * @speed: link speed to set
596 *
597 * We set the module speed differently for fixed fiber. For other
598 * multi-speed devices we don't have an error value so here if we
599 * detect an error we just log it and exit.
600 */
601static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
602 ixgbe_link_speed speed)
603{
604 s32 status;
605 u8 rs, eeprom_data;
606
607 switch (speed) {
608 case IXGBE_LINK_SPEED_10GB_FULL:
609 /* one bit mask same as setting on */
610 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
611 break;
612 case IXGBE_LINK_SPEED_1GB_FULL:
613 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
614 break;
615 default:
616 hw_dbg(hw, "Invalid fixed module speed\n");
617 return;
618 }
619
620 /* Set RS0 */
621 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
622 IXGBE_I2C_EEPROM_DEV_ADDR2,
623 &eeprom_data);
624 if (status) {
625 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
626 goto out;
627 }
628
629 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
630
631 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
632 IXGBE_I2C_EEPROM_DEV_ADDR2,
633 eeprom_data);
634 if (status) {
635 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
636 goto out;
637 }
638
639 /* Set RS1 */
640 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
641 IXGBE_I2C_EEPROM_DEV_ADDR2,
642 &eeprom_data);
643 if (status) {
644 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
645 goto out;
646 }
647
648 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
649
650 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
651 IXGBE_I2C_EEPROM_DEV_ADDR2,
652 eeprom_data);
653 if (status) {
654 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
655 goto out;
656 }
657out:
658 return;
659}
660
661/**
530 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 662 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
531 * @hw: pointer to hardware structure 663 * @hw: pointer to hardware structure
532 * @speed: new link speed 664 * @speed: new link speed
@@ -573,9 +705,19 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
573 goto out; 705 goto out;
574 706
575 /* Set the module link speed */ 707 /* Set the module link speed */
576 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 708 switch (hw->phy.media_type) {
577 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 709 case ixgbe_media_type_fiber:
578 IXGBE_WRITE_FLUSH(hw); 710 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
711 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
712 IXGBE_WRITE_FLUSH(hw);
713 break;
714 case ixgbe_media_type_fiber_qsfp:
715 /* QSFP module automatically detects MAC link speed */
716 break;
717 default:
718 hw_dbg(hw, "Unexpected media type.\n");
719 break;
720 }
579 721
580 /* Allow module to change analog characteristics (1G->10G) */ 722 /* Allow module to change analog characteristics (1G->10G) */
581 msleep(40); 723 msleep(40);
@@ -625,10 +767,24 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
625 goto out; 767 goto out;
626 768
627 /* Set the module link speed */ 769 /* Set the module link speed */
628 esdp_reg &= ~IXGBE_ESDP_SDP5; 770 switch (hw->phy.media_type) {
629 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 771 case ixgbe_media_type_fiber_fixed:
630 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 772 ixgbe_set_fiber_fixed_speed(hw,
631 IXGBE_WRITE_FLUSH(hw); 773 IXGBE_LINK_SPEED_1GB_FULL);
774 break;
775 case ixgbe_media_type_fiber:
776 esdp_reg &= ~IXGBE_ESDP_SDP5;
777 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
778 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
779 IXGBE_WRITE_FLUSH(hw);
780 break;
781 case ixgbe_media_type_fiber_qsfp:
782 /* QSFP module automatically detects MAC link speed */
783 break;
784 default:
785 hw_dbg(hw, "Unexpected media type.\n");
786 break;
787 }
632 788
633 /* Allow module to change analog characteristics (10G->1G) */ 789 /* Allow module to change analog characteristics (10G->1G) */
634 msleep(40); 790 msleep(40);
@@ -1872,7 +2028,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1872 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 2028 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1873 goto out; 2029 goto out;
1874 else 2030 else
1875 status = ixgbe_identify_sfp_module_generic(hw); 2031 status = ixgbe_identify_module_generic(hw);
1876 } 2032 }
1877 2033
1878 /* Set PHY type none if no PHY detected */ 2034 /* Set PHY type none if no PHY detected */
@@ -1978,10 +2134,12 @@ sfp_check:
1978 switch (hw->phy.type) { 2134 switch (hw->phy.type) {
1979 case ixgbe_phy_sfp_passive_tyco: 2135 case ixgbe_phy_sfp_passive_tyco:
1980 case ixgbe_phy_sfp_passive_unknown: 2136 case ixgbe_phy_sfp_passive_unknown:
2137 case ixgbe_phy_qsfp_passive_unknown:
1981 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2138 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1982 break; 2139 break;
1983 case ixgbe_phy_sfp_ftl_active: 2140 case ixgbe_phy_sfp_ftl_active:
1984 case ixgbe_phy_sfp_active_unknown: 2141 case ixgbe_phy_sfp_active_unknown:
2142 case ixgbe_phy_qsfp_active_unknown:
1985 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2143 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
1986 break; 2144 break;
1987 case ixgbe_phy_sfp_avago: 2145 case ixgbe_phy_sfp_avago:
@@ -1999,6 +2157,15 @@ sfp_check:
1999 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2157 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2000 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2158 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2001 break; 2159 break;
2160 case ixgbe_phy_qsfp_intel:
2161 case ixgbe_phy_qsfp_unknown:
2162 hw->phy.ops.read_i2c_eeprom(hw,
2163 IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
2164 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2165 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2166 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2167 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2168 break;
2002 default: 2169 default:
2003 break; 2170 break;
2004 } 2171 }
@@ -2045,6 +2212,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2045{ 2212{
2046 s32 status = IXGBE_ERR_EEPROM_VERSION; 2213 s32 status = IXGBE_ERR_EEPROM_VERSION;
2047 u16 fw_offset, fw_ptp_cfg_offset; 2214 u16 fw_offset, fw_ptp_cfg_offset;
2215 u16 offset;
2048 u16 fw_version = 0; 2216 u16 fw_version = 0;
2049 2217
2050 /* firmware check is only necessary for SFI devices */ 2218 /* firmware check is only necessary for SFI devices */
@@ -2054,29 +2222,35 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2054 } 2222 }
2055 2223
2056 /* get the offset to the Firmware Module block */ 2224 /* get the offset to the Firmware Module block */
2057 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2225 offset = IXGBE_FW_PTR;
2226 if (hw->eeprom.ops.read(hw, offset, &fw_offset))
2227 goto fw_version_err;
2058 2228
2059 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2229 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2060 goto fw_version_out; 2230 goto fw_version_out;
2061 2231
2062 /* get the offset to the Pass Through Patch Configuration block */ 2232 /* get the offset to the Pass Through Patch Configuration block */
2063 hw->eeprom.ops.read(hw, (fw_offset + 2233 offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
2064 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2234 if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
2065 &fw_ptp_cfg_offset); 2235 goto fw_version_err;
2066 2236
2067 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2237 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2068 goto fw_version_out; 2238 goto fw_version_out;
2069 2239
2070 /* get the firmware version */ 2240 /* get the firmware version */
2071 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2241 offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
2072 IXGBE_FW_PATCH_VERSION_4), 2242 if (hw->eeprom.ops.read(hw, offset, &fw_version))
2073 &fw_version); 2243 goto fw_version_err;
2074 2244
2075 if (fw_version > 0x5) 2245 if (fw_version > 0x5)
2076 status = 0; 2246 status = 0;
2077 2247
2078fw_version_out: 2248fw_version_out:
2079 return status; 2249 return status;
2250
2251fw_version_err:
2252 hw_err(hw, "eeprom read at offset %d failed\n", offset);
2253 return IXGBE_ERR_EEPROM_VERSION;
2080} 2254}
2081 2255
2082/** 2256/**
@@ -2236,6 +2410,112 @@ reset_pipeline_out:
2236 return ret_val; 2410 return ret_val;
2237} 2411}
2238 2412
2413/**
2414 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2415 * @hw: pointer to hardware structure
2416 * @byte_offset: byte offset to read
2417 * @data: value read
2418 *
2419 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2420 * a specified device address.
2421 **/
2422static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2423 u8 dev_addr, u8 *data)
2424{
2425 u32 esdp;
2426 s32 status;
2427 s32 timeout = 200;
2428
2429 if (hw->phy.qsfp_shared_i2c_bus == true) {
2430 /* Acquire I2C bus ownership. */
2431 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2432 esdp |= IXGBE_ESDP_SDP0;
2433 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2434 IXGBE_WRITE_FLUSH(hw);
2435
2436 while (timeout) {
2437 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2438 if (esdp & IXGBE_ESDP_SDP1)
2439 break;
2440
2441 usleep_range(5000, 10000);
2442 timeout--;
2443 }
2444
2445 if (!timeout) {
2446 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2447 status = IXGBE_ERR_I2C;
2448 goto release_i2c_access;
2449 }
2450 }
2451
2452 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2453
2454release_i2c_access:
2455 if (hw->phy.qsfp_shared_i2c_bus == true) {
2456 /* Release I2C bus ownership. */
2457 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2458 esdp &= ~IXGBE_ESDP_SDP0;
2459 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2460 IXGBE_WRITE_FLUSH(hw);
2461 }
2462
2463 return status;
2464}
2465
2466/**
2467 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2468 * @hw: pointer to hardware structure
2469 * @byte_offset: byte offset to write
2470 * @data: value to write
2471 *
2472 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2473 * a specified device address.
2474 **/
2475static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2476 u8 dev_addr, u8 data)
2477{
2478 u32 esdp;
2479 s32 status;
2480 s32 timeout = 200;
2481
2482 if (hw->phy.qsfp_shared_i2c_bus == true) {
2483 /* Acquire I2C bus ownership. */
2484 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2485 esdp |= IXGBE_ESDP_SDP0;
2486 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2487 IXGBE_WRITE_FLUSH(hw);
2488
2489 while (timeout) {
2490 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2491 if (esdp & IXGBE_ESDP_SDP1)
2492 break;
2493
2494 usleep_range(5000, 10000);
2495 timeout--;
2496 }
2497
2498 if (!timeout) {
2499 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2500 status = IXGBE_ERR_I2C;
2501 goto release_i2c_access;
2502 }
2503 }
2504
2505 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2506
2507release_i2c_access:
2508 if (hw->phy.qsfp_shared_i2c_bus == true) {
2509 /* Release I2C bus ownership. */
2510 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2511 esdp &= ~IXGBE_ESDP_SDP0;
2512 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2513 IXGBE_WRITE_FLUSH(hw);
2514 }
2515
2516 return status;
2517}
2518
2239static struct ixgbe_mac_operations mac_ops_82599 = { 2519static struct ixgbe_mac_operations mac_ops_82599 = {
2240 .init_hw = &ixgbe_init_hw_generic, 2520 .init_hw = &ixgbe_init_hw_generic,
2241 .reset_hw = &ixgbe_reset_hw_82599, 2521 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2255,6 +2535,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2255 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2535 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
2256 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, 2536 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
2257 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, 2537 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
2538 .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
2258 .setup_link = &ixgbe_setup_mac_link_82599, 2539 .setup_link = &ixgbe_setup_mac_link_82599,
2259 .set_rxpba = &ixgbe_set_rxpba_generic, 2540 .set_rxpba = &ixgbe_set_rxpba_generic,
2260 .check_link = &ixgbe_check_mac_link_generic, 2541 .check_link = &ixgbe_check_mac_link_generic,
@@ -2300,7 +2581,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2300 2581
2301static struct ixgbe_phy_operations phy_ops_82599 = { 2582static struct ixgbe_phy_operations phy_ops_82599 = {
2302 .identify = &ixgbe_identify_phy_82599, 2583 .identify = &ixgbe_identify_phy_82599,
2303 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2584 .identify_sfp = &ixgbe_identify_module_generic,
2304 .init = &ixgbe_init_phy_ops_82599, 2585 .init = &ixgbe_init_phy_ops_82599,
2305 .reset = &ixgbe_reset_phy_generic, 2586 .reset = &ixgbe_reset_phy_generic,
2306 .read_reg = &ixgbe_read_phy_reg_generic, 2587 .read_reg = &ixgbe_read_phy_reg_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bcdeb89af5a..b5c434b617b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
65 * function check the device id to see if the associated phy supports 65 * function check the device id to see if the associated phy supports
66 * autoneg flow control. 66 * autoneg flow control.
67 **/ 67 **/
68s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 68bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{ 69{
70 bool supported = false;
71 ixgbe_link_speed speed;
72 bool link_up;
70 73
71 switch (hw->device_id) { 74 switch (hw->phy.media_type) {
72 case IXGBE_DEV_ID_X540T: 75 case ixgbe_media_type_fiber_fixed:
73 case IXGBE_DEV_ID_X540T1: 76 case ixgbe_media_type_fiber:
74 case IXGBE_DEV_ID_82599_T3_LOM: 77 hw->mac.ops.check_link(hw, &speed, &link_up, false);
75 return 0; 78 /* if link is down, assume supported */
79 if (link_up)
80 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
81 true : false;
82 else
83 supported = true;
84 break;
85 case ixgbe_media_type_backplane:
86 supported = true;
87 break;
88 case ixgbe_media_type_copper:
89 /* only some copper devices support flow control autoneg */
90 switch (hw->device_id) {
91 case IXGBE_DEV_ID_82599_T3_LOM:
92 case IXGBE_DEV_ID_X540T:
93 case IXGBE_DEV_ID_X540T1:
94 supported = true;
95 break;
96 default:
97 break;
98 }
76 default: 99 default:
77 return IXGBE_ERR_FC_NOT_SUPPORTED; 100 break;
78 } 101 }
102
103 return supported;
79} 104}
80 105
81/** 106/**
@@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
114 * we link at 10G, the 1G advertisement is harmless and vice versa. 139 * we link at 10G, the 1G advertisement is harmless and vice versa.
115 */ 140 */
116 switch (hw->phy.media_type) { 141 switch (hw->phy.media_type) {
142 case ixgbe_media_type_fiber_fixed:
117 case ixgbe_media_type_fiber: 143 case ixgbe_media_type_fiber:
118 case ixgbe_media_type_backplane: 144 case ixgbe_media_type_backplane:
119 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 145 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
234 IXGBE_GSSR_MAC_CSR_SM); 260 IXGBE_GSSR_MAC_CSR_SM);
235 261
236 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 262 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
237 (ixgbe_device_supports_autoneg_fc(hw) == 0)) { 263 ixgbe_device_supports_autoneg_fc(hw)) {
238 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 264 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
239 MDIO_MMD_AN, reg_cu); 265 MDIO_MMD_AN, reg_cu);
240 } 266 }
@@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2380 2406
2381 switch (hw->phy.media_type) { 2407 switch (hw->phy.media_type) {
2382 /* Autoneg flow control on fiber adapters */ 2408 /* Autoneg flow control on fiber adapters */
2409 case ixgbe_media_type_fiber_fixed:
2383 case ixgbe_media_type_fiber: 2410 case ixgbe_media_type_fiber:
2384 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2411 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385 ret_val = ixgbe_fc_autoneg_fiber(hw); 2412 ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2392 2419
2393 /* Autoneg flow control on copper adapters */ 2420 /* Autoneg flow control on copper adapters */
2394 case ixgbe_media_type_copper: 2421 case ixgbe_media_type_copper:
2395 if (ixgbe_device_supports_autoneg_fc(hw) == 0) 2422 if (ixgbe_device_supports_autoneg_fc(hw))
2396 ret_val = ixgbe_fc_autoneg_copper(hw); 2423 ret_val = ixgbe_fc_autoneg_copper(hw);
2397 break; 2424 break;
2398 2425
@@ -2479,42 +2506,39 @@ out:
2479 **/ 2506 **/
2480s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2507s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2481{ 2508{
2482 u32 gssr; 2509 u32 gssr = 0;
2483 u32 swmask = mask; 2510 u32 swmask = mask;
2484 u32 fwmask = mask << 5; 2511 u32 fwmask = mask << 5;
2485 s32 timeout = 200; 2512 u32 timeout = 200;
2513 u32 i;
2486 2514
2487 while (timeout) { 2515 for (i = 0; i < timeout; i++) {
2488 /* 2516 /*
2489 * SW EEPROM semaphore bit is used for access to all 2517 * SW NVM semaphore bit is used for access to all
2490 * SW_FW_SYNC/GSSR bits (not just EEPROM) 2518 * SW_FW_SYNC bits (not just NVM)
2491 */ 2519 */
2492 if (ixgbe_get_eeprom_semaphore(hw)) 2520 if (ixgbe_get_eeprom_semaphore(hw))
2493 return IXGBE_ERR_SWFW_SYNC; 2521 return IXGBE_ERR_SWFW_SYNC;
2494 2522
2495 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2523 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2496 if (!(gssr & (fwmask | swmask))) 2524 if (!(gssr & (fwmask | swmask))) {
2497 break; 2525 gssr |= swmask;
2498 2526 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2499 /* 2527 ixgbe_release_eeprom_semaphore(hw);
2500 * Firmware currently using resource (fwmask) or other software 2528 return 0;
2501 * thread currently using resource (swmask) 2529 } else {
2502 */ 2530 /* Resource is currently in use by FW or SW */
2503 ixgbe_release_eeprom_semaphore(hw); 2531 ixgbe_release_eeprom_semaphore(hw);
2504 usleep_range(5000, 10000); 2532 usleep_range(5000, 10000);
2505 timeout--; 2533 }
2506 }
2507
2508 if (!timeout) {
2509 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2510 return IXGBE_ERR_SWFW_SYNC;
2511 } 2534 }
2512 2535
2513 gssr |= swmask; 2536 /* If time expired clear the bits holding the lock and retry */
2514 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2537 if (gssr & (fwmask | swmask))
2538 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2515 2539
2516 ixgbe_release_eeprom_semaphore(hw); 2540 usleep_range(5000, 10000);
2517 return 0; 2541 return IXGBE_ERR_SWFW_SYNC;
2518} 2542}
2519 2543
2520/** 2544/**
@@ -2716,13 +2740,19 @@ out:
2716static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2740static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2717 u16 *san_mac_offset) 2741 u16 *san_mac_offset)
2718{ 2742{
2743 s32 ret_val;
2744
2719 /* 2745 /*
2720 * First read the EEPROM pointer to see if the MAC addresses are 2746 * First read the EEPROM pointer to see if the MAC addresses are
2721 * available. 2747 * available.
2722 */ 2748 */
2723 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2749 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
2750 san_mac_offset);
2751 if (ret_val)
2752 hw_err(hw, "eeprom read at offset %d failed\n",
2753 IXGBE_SAN_MAC_ADDR_PTR);
2724 2754
2725 return 0; 2755 return ret_val;
2726} 2756}
2727 2757
2728/** 2758/**
@@ -2739,23 +2769,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2739{ 2769{
2740 u16 san_mac_data, san_mac_offset; 2770 u16 san_mac_data, san_mac_offset;
2741 u8 i; 2771 u8 i;
2772 s32 ret_val;
2742 2773
2743 /* 2774 /*
2744 * First read the EEPROM pointer to see if the MAC addresses are 2775 * First read the EEPROM pointer to see if the MAC addresses are
2745 * available. If they're not, no point in calling set_lan_id() here. 2776 * available. If they're not, no point in calling set_lan_id() here.
2746 */ 2777 */
2747 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2778 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2779 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
2748 2780
2749 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2781 goto san_mac_addr_clr;
2750 /*
2751 * No addresses available in this EEPROM. It's not an
2752 * error though, so just wipe the local address and return.
2753 */
2754 for (i = 0; i < 6; i++)
2755 san_mac_addr[i] = 0xFF;
2756
2757 goto san_mac_addr_out;
2758 }
2759 2782
2760 /* make sure we know which port we need to program */ 2783 /* make sure we know which port we need to program */
2761 hw->mac.ops.set_lan_id(hw); 2784 hw->mac.ops.set_lan_id(hw);
@@ -2763,14 +2786,26 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2763 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2786 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2764 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2787 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2765 for (i = 0; i < 3; i++) { 2788 for (i = 0; i < 3; i++) {
2766 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2789 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2790 &san_mac_data);
2791 if (ret_val) {
2792 hw_err(hw, "eeprom read at offset %d failed\n",
2793 san_mac_offset);
2794 goto san_mac_addr_clr;
2795 }
2767 san_mac_addr[i * 2] = (u8)(san_mac_data); 2796 san_mac_addr[i * 2] = (u8)(san_mac_data);
2768 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2797 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2769 san_mac_offset++; 2798 san_mac_offset++;
2770 } 2799 }
2771
2772san_mac_addr_out:
2773 return 0; 2800 return 0;
2801
2802san_mac_addr_clr:
2803 /* No addresses available in this EEPROM. It's not necessarily an
2804 * error though, so just wipe the local address and return.
2805 */
2806 for (i = 0; i < 6; i++)
2807 san_mac_addr[i] = 0xFF;
2808 return ret_val;
2774} 2809}
2775 2810
2776/** 2811/**
@@ -3219,8 +3254,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3219 *wwpn_prefix = 0xFFFF; 3254 *wwpn_prefix = 0xFFFF;
3220 3255
3221 /* check if alternative SAN MAC is supported */ 3256 /* check if alternative SAN MAC is supported */
3222 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 3257 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3223 &alt_san_mac_blk_offset); 3258 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3259 goto wwn_prefix_err;
3224 3260
3225 if ((alt_san_mac_blk_offset == 0) || 3261 if ((alt_san_mac_blk_offset == 0) ||
3226 (alt_san_mac_blk_offset == 0xFFFF)) 3262 (alt_san_mac_blk_offset == 0xFFFF))
@@ -3228,19 +3264,26 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3228 3264
3229 /* check capability in alternative san mac address block */ 3265 /* check capability in alternative san mac address block */
3230 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3266 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3231 hw->eeprom.ops.read(hw, offset, &caps); 3267 if (hw->eeprom.ops.read(hw, offset, &caps))
3268 goto wwn_prefix_err;
3232 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3269 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3233 goto wwn_prefix_out; 3270 goto wwn_prefix_out;
3234 3271
3235 /* get the corresponding prefix for WWNN/WWPN */ 3272 /* get the corresponding prefix for WWNN/WWPN */
3236 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3273 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3237 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 3274 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
3275 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3238 3276
3239 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3277 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3240 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 3278 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3279 goto wwn_prefix_err;
3241 3280
3242wwn_prefix_out: 3281wwn_prefix_out:
3243 return 0; 3282 return 0;
3283
3284wwn_prefix_err:
3285 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3286 return 0;
3244} 3287}
3245 3288
3246/** 3289/**
@@ -3754,7 +3797,11 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3754 u8 sensor_index; 3797 u8 sensor_index;
3755 u8 sensor_location; 3798 u8 sensor_location;
3756 3799
3757 hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); 3800 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
3801 hw_err(hw, "eeprom read at offset %d failed\n",
3802 ets_offset + 1 + i);
3803 continue;
3804 }
3758 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3805 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3759 IXGBE_ETS_DATA_INDEX_SHIFT); 3806 IXGBE_ETS_DATA_INDEX_SHIFT);
3760 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3807 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 22eee38868f1..d259dc76604e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
80s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 80s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
81s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 81s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
82s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); 82s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
83s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 83bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
84void ixgbe_fc_autoneg(struct ixgbe_hw *hw); 84void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
85 85
86s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 86s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -143,8 +143,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
143 143
144#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 144#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
145 145
146#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
147
146#define hw_dbg(hw, format, arg...) \ 148#define hw_dbg(hw, format, arg...) \
147 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) 149 netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg)
150#define hw_err(hw, format, arg...) \
151 netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg)
148#define e_dev_info(format, arg...) \ 152#define e_dev_info(format, arg...) \
149 dev_info(&adapter->pdev->dev, format, ## arg) 153 dev_info(&adapter->pdev->dev, format, ## arg)
150#define e_dev_warn(format, arg...) \ 154#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 24e2e7aafda2..0e1b973659b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -311,9 +311,6 @@ static int ixgbe_set_settings(struct net_device *netdev,
311 * this function does not support duplex forcing, but can 311 * this function does not support duplex forcing, but can
312 * limit the advertising of the adapter to the specified speed 312 * limit the advertising of the adapter to the specified speed
313 */ 313 */
314 if (ecmd->autoneg == AUTONEG_DISABLE)
315 return -EINVAL;
316
317 if (ecmd->advertising & ~ecmd->supported) 314 if (ecmd->advertising & ~ecmd->supported)
318 return -EINVAL; 315 return -EINVAL;
319 316
@@ -355,10 +352,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
355 struct ixgbe_adapter *adapter = netdev_priv(netdev); 352 struct ixgbe_adapter *adapter = netdev_priv(netdev);
356 struct ixgbe_hw *hw = &adapter->hw; 353 struct ixgbe_hw *hw = &adapter->hw;
357 354
358 if (hw->fc.disable_fc_autoneg) 355 if (ixgbe_device_supports_autoneg_fc(hw) &&
359 pause->autoneg = 0; 356 !hw->fc.disable_fc_autoneg)
360 else
361 pause->autoneg = 1; 357 pause->autoneg = 1;
358 else
359 pause->autoneg = 0;
362 360
363 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 361 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
364 pause->rx_pause = 1; 362 pause->rx_pause = 1;
@@ -384,7 +382,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
384 382
385 /* some devices do not support autoneg of link flow control */ 383 /* some devices do not support autoneg of link flow control */
386 if ((pause->autoneg == AUTONEG_ENABLE) && 384 if ((pause->autoneg == AUTONEG_ENABLE) &&
387 (ixgbe_device_supports_autoneg_fc(hw) != 0)) 385 !ixgbe_device_supports_autoneg_fc(hw))
388 return -EINVAL; 386 return -EINVAL;
389 387
390 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); 388 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
@@ -1048,7 +1046,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1048 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1046 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1049 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1047 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1050 } 1048 }
1051 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1049 for (j = 0; j < netdev->num_tx_queues; j++) {
1052 ring = adapter->tx_ring[j]; 1050 ring = adapter->tx_ring[j];
1053 if (!ring) { 1051 if (!ring) {
1054 data[i] = 0; 1052 data[i] = 0;
@@ -1140,11 +1138,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1140 sprintf(p, "tx_queue_%u_bytes", i); 1138 sprintf(p, "tx_queue_%u_bytes", i);
1141 p += ETH_GSTRING_LEN; 1139 p += ETH_GSTRING_LEN;
1142#ifdef LL_EXTENDED_STATS 1140#ifdef LL_EXTENDED_STATS
1143 sprintf(p, "tx_q_%u_napi_yield", i); 1141 sprintf(p, "tx_queue_%u_ll_napi_yield", i);
1144 p += ETH_GSTRING_LEN; 1142 p += ETH_GSTRING_LEN;
1145 sprintf(p, "tx_q_%u_misses", i); 1143 sprintf(p, "tx_queue_%u_ll_misses", i);
1146 p += ETH_GSTRING_LEN; 1144 p += ETH_GSTRING_LEN;
1147 sprintf(p, "tx_q_%u_cleaned", i); 1145 sprintf(p, "tx_queue_%u_ll_cleaned", i);
1148 p += ETH_GSTRING_LEN; 1146 p += ETH_GSTRING_LEN;
1149#endif /* LL_EXTENDED_STATS */ 1147#endif /* LL_EXTENDED_STATS */
1150 } 1148 }
@@ -1154,11 +1152,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1154 sprintf(p, "rx_queue_%u_bytes", i); 1152 sprintf(p, "rx_queue_%u_bytes", i);
1155 p += ETH_GSTRING_LEN; 1153 p += ETH_GSTRING_LEN;
1156#ifdef LL_EXTENDED_STATS 1154#ifdef LL_EXTENDED_STATS
1157 sprintf(p, "rx_q_%u_ll_poll_yield", i); 1155 sprintf(p, "rx_queue_%u_ll_poll_yield", i);
1158 p += ETH_GSTRING_LEN; 1156 p += ETH_GSTRING_LEN;
1159 sprintf(p, "rx_q_%u_misses", i); 1157 sprintf(p, "rx_queue_%u_ll_misses", i);
1160 p += ETH_GSTRING_LEN; 1158 p += ETH_GSTRING_LEN;
1161 sprintf(p, "rx_q_%u_cleaned", i); 1159 sprintf(p, "rx_queue_%u_ll_cleaned", i);
1162 p += ETH_GSTRING_LEN; 1160 p += ETH_GSTRING_LEN;
1163#endif /* LL_EXTENDED_STATS */ 1161#endif /* LL_EXTENDED_STATS */
1164 } 1162 }
@@ -1884,11 +1882,12 @@ static void ixgbe_diag_test(struct net_device *netdev,
1884 struct ethtool_test *eth_test, u64 *data) 1882 struct ethtool_test *eth_test, u64 *data)
1885{ 1883{
1886 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1887 struct ixgbe_hw *hw = &adapter->hw;
1888 bool if_running = netif_running(netdev); 1885 bool if_running = netif_running(netdev);
1889 1886
1890 set_bit(__IXGBE_TESTING, &adapter->state); 1887 set_bit(__IXGBE_TESTING, &adapter->state);
1891 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1888 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1889 struct ixgbe_hw *hw = &adapter->hw;
1890
1892 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1891 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1893 int i; 1892 int i;
1894 for (i = 0; i < adapter->num_vfs; i++) { 1893 for (i = 0; i < adapter->num_vfs; i++) {
@@ -1912,21 +1911,18 @@ static void ixgbe_diag_test(struct net_device *netdev,
1912 /* Offline tests */ 1911 /* Offline tests */
1913 e_info(hw, "offline testing starting\n"); 1912 e_info(hw, "offline testing starting\n");
1914 1913
1915 if (if_running)
1916 /* indicate we're in test mode */
1917 dev_close(netdev);
1918
1919 /* bringing adapter down disables SFP+ optics */
1920 if (hw->mac.ops.enable_tx_laser)
1921 hw->mac.ops.enable_tx_laser(hw);
1922
1923 /* Link test performed before hardware reset so autoneg doesn't 1914 /* Link test performed before hardware reset so autoneg doesn't
1924 * interfere with test result 1915 * interfere with test result
1925 */ 1916 */
1926 if (ixgbe_link_test(adapter, &data[4])) 1917 if (ixgbe_link_test(adapter, &data[4]))
1927 eth_test->flags |= ETH_TEST_FL_FAILED; 1918 eth_test->flags |= ETH_TEST_FL_FAILED;
1928 1919
1929 ixgbe_reset(adapter); 1920 if (if_running)
1921 /* indicate we're in test mode */
1922 dev_close(netdev);
1923 else
1924 ixgbe_reset(adapter);
1925
1930 e_info(hw, "register testing starting\n"); 1926 e_info(hw, "register testing starting\n");
1931 if (ixgbe_reg_test(adapter, &data[0])) 1927 if (ixgbe_reg_test(adapter, &data[0]))
1932 eth_test->flags |= ETH_TEST_FL_FAILED; 1928 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1963,13 +1959,11 @@ skip_loopback:
1963 clear_bit(__IXGBE_TESTING, &adapter->state); 1959 clear_bit(__IXGBE_TESTING, &adapter->state);
1964 if (if_running) 1960 if (if_running)
1965 dev_open(netdev); 1961 dev_open(netdev);
1962 else if (hw->mac.ops.disable_tx_laser)
1963 hw->mac.ops.disable_tx_laser(hw);
1966 } else { 1964 } else {
1967 e_info(hw, "online testing starting\n"); 1965 e_info(hw, "online testing starting\n");
1968 1966
1969 /* if adapter is down, SFP+ optics will be disabled */
1970 if (!if_running && hw->mac.ops.enable_tx_laser)
1971 hw->mac.ops.enable_tx_laser(hw);
1972
1973 /* Online tests */ 1967 /* Online tests */
1974 if (ixgbe_link_test(adapter, &data[4])) 1968 if (ixgbe_link_test(adapter, &data[4]))
1975 eth_test->flags |= ETH_TEST_FL_FAILED; 1969 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1983,9 +1977,6 @@ skip_loopback:
1983 clear_bit(__IXGBE_TESTING, &adapter->state); 1977 clear_bit(__IXGBE_TESTING, &adapter->state);
1984 } 1978 }
1985 1979
1986 /* if adapter was down, ensure SFP+ optics are disabled again */
1987 if (!if_running && hw->mac.ops.disable_tx_laser)
1988 hw->mac.ops.disable_tx_laser(hw);
1989skip_ol_tests: 1980skip_ol_tests:
1990 msleep_interruptible(4 * 1000); 1981 msleep_interruptible(4 * 1000);
1991} 1982}
@@ -2909,33 +2900,21 @@ static int ixgbe_get_module_info(struct net_device *dev,
2909 struct ixgbe_hw *hw = &adapter->hw; 2900 struct ixgbe_hw *hw = &adapter->hw;
2910 u32 status; 2901 u32 status;
2911 u8 sff8472_rev, addr_mode; 2902 u8 sff8472_rev, addr_mode;
2912 int ret_val = 0;
2913 bool page_swap = false; 2903 bool page_swap = false;
2914 2904
2915 /* avoid concurent i2c reads */
2916 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2917 msleep(100);
2918
2919 /* used by the service task */
2920 set_bit(__IXGBE_READ_I2C, &adapter->state);
2921
2922 /* Check whether we support SFF-8472 or not */ 2905 /* Check whether we support SFF-8472 or not */
2923 status = hw->phy.ops.read_i2c_eeprom(hw, 2906 status = hw->phy.ops.read_i2c_eeprom(hw,
2924 IXGBE_SFF_SFF_8472_COMP, 2907 IXGBE_SFF_SFF_8472_COMP,
2925 &sff8472_rev); 2908 &sff8472_rev);
2926 if (status != 0) { 2909 if (status != 0)
2927 ret_val = -EIO; 2910 return -EIO;
2928 goto err_out;
2929 }
2930 2911
2931 /* addressing mode is not supported */ 2912 /* addressing mode is not supported */
2932 status = hw->phy.ops.read_i2c_eeprom(hw, 2913 status = hw->phy.ops.read_i2c_eeprom(hw,
2933 IXGBE_SFF_SFF_8472_SWAP, 2914 IXGBE_SFF_SFF_8472_SWAP,
2934 &addr_mode); 2915 &addr_mode);
2935 if (status != 0) { 2916 if (status != 0)
2936 ret_val = -EIO; 2917 return -EIO;
2937 goto err_out;
2938 }
2939 2918
2940 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 2919 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
2941 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); 2920 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
@@ -2952,9 +2931,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
2952 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2931 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2953 } 2932 }
2954 2933
2955err_out: 2934 return 0;
2956 clear_bit(__IXGBE_READ_I2C, &adapter->state);
2957 return ret_val;
2958} 2935}
2959 2936
2960static int ixgbe_get_module_eeprom(struct net_device *dev, 2937static int ixgbe_get_module_eeprom(struct net_device *dev,
@@ -2966,51 +2943,27 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
2966 u32 status = IXGBE_ERR_PHY_ADDR_INVALID; 2943 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2967 u8 databyte = 0xFF; 2944 u8 databyte = 0xFF;
2968 int i = 0; 2945 int i = 0;
2969 int ret_val = 0;
2970 2946
2971 /* ixgbe_get_module_info is called before this function in all 2947 if (ee->len == 0)
2972 * cases, so we do not need any checks we already do above, 2948 return -EINVAL;
2973 * and can trust ee->len to be a known value.
2974 */
2975 2949
2976 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 2950 for (i = ee->offset; i < ee->offset + ee->len; i++) {
2977 msleep(100); 2951 /* I2C reads can take long time */
2978 set_bit(__IXGBE_READ_I2C, &adapter->state); 2952 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2979 2953 return -EBUSY;
2980 /* Read the first block, SFF-8079 */
2981 for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
2982 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2983 if (status != 0) {
2984 /* Error occured while reading module */
2985 ret_val = -EIO;
2986 goto err_out;
2987 }
2988 data[i] = databyte;
2989 }
2990 2954
2991 /* If the second block is requested, check if SFF-8472 is supported. */ 2955 if (i < ETH_MODULE_SFF_8079_LEN)
2992 if (ee->len == ETH_MODULE_SFF_8472_LEN) { 2956 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2993 if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP) 2957 else
2994 return -EOPNOTSUPP; 2958 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
2995
2996 /* Read the second block, SFF-8472 */
2997 for (i = ETH_MODULE_SFF_8079_LEN;
2998 i < ETH_MODULE_SFF_8472_LEN; i++) {
2999 status = hw->phy.ops.read_i2c_sff8472(hw,
3000 i - ETH_MODULE_SFF_8079_LEN, &databyte);
3001 if (status != 0) {
3002 /* Error occured while reading module */
3003 ret_val = -EIO;
3004 goto err_out;
3005 }
3006 data[i] = databyte;
3007 }
3008 }
3009 2959
3010err_out: 2960 if (status != 0)
3011 clear_bit(__IXGBE_READ_I2C, &adapter->state); 2961 return -EIO;
3012 2962
3013 return ret_val; 2963 data[i - ee->offset] = databyte;
2964 }
2965
2966 return 0;
3014} 2967}
3015 2968
3016static const struct ethtool_ops ixgbe_ethtool_ops = { 2969static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index be4b1fb3d0d2..7aba452833e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
63static char ixgbe_default_device_descr[] = 63static char ixgbe_default_device_descr[] =
64 "Intel(R) 10 Gigabit Network Connection"; 64 "Intel(R) 10 Gigabit Network Connection";
65#endif 65#endif
66#define DRV_VERSION "3.13.10-k" 66#define DRV_VERSION "3.15.1-k"
67const char ixgbe_driver_version[] = DRV_VERSION; 67const char ixgbe_driver_version[] = DRV_VERSION;
68static const char ixgbe_copyright[] = 68static const char ixgbe_copyright[] =
69 "Copyright (c) 1999-2013 Intel Corporation."; 69 "Copyright (c) 1999-2013 Intel Corporation.";
@@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, 111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
195 return 0; 196 return 0;
196} 197}
197 198
199/**
200 * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
201 * @hw: hw specific details
202 *
203 * This function is used by probe to determine whether a device's PCI-Express
204 * bandwidth details should be gathered from the parent bus instead of from the
205 * device. Used to ensure that various locations all have the correct device ID
206 * checks.
207 */
208static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
209{
210 switch (hw->device_id) {
211 case IXGBE_DEV_ID_82599_SFP_SF_QP:
212 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
213 return true;
214 default:
215 return false;
216 }
217}
218
219static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
220 int expected_gts)
221{
222 int max_gts = 0;
223 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
224 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
225 struct pci_dev *pdev;
226
227 /* determine whether to use the the parent device
228 */
229 if (ixgbe_pcie_from_parent(&adapter->hw))
230 pdev = adapter->pdev->bus->parent->self;
231 else
232 pdev = adapter->pdev;
233
234 if (pcie_get_minimum_link(pdev, &speed, &width) ||
235 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
236 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
237 return;
238 }
239
240 switch (speed) {
241 case PCIE_SPEED_2_5GT:
242 /* 8b/10b encoding reduces max throughput by 20% */
243 max_gts = 2 * width;
244 break;
245 case PCIE_SPEED_5_0GT:
246 /* 8b/10b encoding reduces max throughput by 20% */
247 max_gts = 4 * width;
248 break;
249 case PCIE_SPEED_8_0GT:
250 /* 128b/130b encoding only reduces throughput by 1% */
251 max_gts = 8 * width;
252 break;
253 default:
254 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
255 return;
256 }
257
258 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
259 max_gts);
260 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
261 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
262 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
263 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
264 "Unknown"),
265 width,
266 (speed == PCIE_SPEED_2_5GT ? "20%" :
267 speed == PCIE_SPEED_5_0GT ? "20%" :
268 speed == PCIE_SPEED_8_0GT ? "N/a" :
269 "Unknown"));
270
271 if (max_gts < expected_gts) {
272 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
273 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
274 expected_gts);
275 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
276 }
277}
278
198static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 279static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
199{ 280{
200 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 281 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -3724,8 +3805,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3724 hw->addr_ctrl.user_set_promisc = true; 3805 hw->addr_ctrl.user_set_promisc = true;
3725 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3806 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3726 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); 3807 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3727 /* don't hardware filter vlans in promisc mode */ 3808 /* Only disable hardware filter vlans in promiscuous mode
3728 ixgbe_vlan_filter_disable(adapter); 3809 * if SR-IOV and VMDQ are disabled - otherwise ensure
3810 * that hardware VLAN filters remain enabled.
3811 */
3812 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3813 IXGBE_FLAG_SRIOV_ENABLED)))
3814 ixgbe_vlan_filter_disable(adapter);
3815 else
3816 ixgbe_vlan_filter_enable(adapter);
3729 } else { 3817 } else {
3730 if (netdev->flags & IFF_ALLMULTI) { 3818 if (netdev->flags & IFF_ALLMULTI) {
3731 fctrl |= IXGBE_FCTRL_MPE; 3819 fctrl |= IXGBE_FCTRL_MPE;
@@ -4087,6 +4175,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4087 case ixgbe_phy_sfp_passive_unknown: 4175 case ixgbe_phy_sfp_passive_unknown:
4088 case ixgbe_phy_sfp_active_unknown: 4176 case ixgbe_phy_sfp_active_unknown:
4089 case ixgbe_phy_sfp_ftl_active: 4177 case ixgbe_phy_sfp_ftl_active:
4178 case ixgbe_phy_qsfp_passive_unknown:
4179 case ixgbe_phy_qsfp_active_unknown:
4180 case ixgbe_phy_qsfp_intel:
4181 case ixgbe_phy_qsfp_unknown:
4090 return true; 4182 return true;
4091 case ixgbe_phy_nl: 4183 case ixgbe_phy_nl:
4092 if (hw->mac.type == ixgbe_mac_82598EB) 4184 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4352,7 +4444,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4352 if (hw->mac.san_mac_rar_index) 4444 if (hw->mac.san_mac_rar_index)
4353 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 4445 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4354 4446
4355 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) 4447 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
4356 ixgbe_ptp_reset(adapter); 4448 ixgbe_ptp_reset(adapter);
4357} 4449}
4358 4450
@@ -4714,8 +4806,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4714 ixgbe_pbthresh_setup(adapter); 4806 ixgbe_pbthresh_setup(adapter);
4715 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 4807 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4716 hw->fc.send_xon = true; 4808 hw->fc.send_xon = true;
4717 hw->fc.disable_fc_autoneg = 4809 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
4718 (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
4719 4810
4720#ifdef CONFIG_PCI_IOV 4811#ifdef CONFIG_PCI_IOV
4721 /* assign number of SR-IOV VFs */ 4812 /* assign number of SR-IOV VFs */
@@ -5205,6 +5296,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5205 return retval; 5296 return retval;
5206 5297
5207#endif 5298#endif
5299 if (hw->mac.ops.stop_link_on_d3)
5300 hw->mac.ops.stop_link_on_d3(hw);
5301
5208 if (wufc) { 5302 if (wufc) {
5209 ixgbe_set_rx_mode(netdev); 5303 ixgbe_set_rx_mode(netdev);
5210 5304
@@ -5681,7 +5775,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5681 5775
5682 adapter->last_rx_ptp_check = jiffies; 5776 adapter->last_rx_ptp_check = jiffies;
5683 5777
5684 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) 5778 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5685 ixgbe_ptp_start_cyclecounter(adapter); 5779 ixgbe_ptp_start_cyclecounter(adapter);
5686 5780
5687 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 5781 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@ -5727,7 +5821,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5727 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 5821 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5728 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5822 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5729 5823
5730 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) 5824 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5731 ixgbe_ptp_start_cyclecounter(adapter); 5825 ixgbe_ptp_start_cyclecounter(adapter);
5732 5826
5733 e_info(drv, "NIC Link is Down\n"); 5827 e_info(drv, "NIC Link is Down\n");
@@ -5826,10 +5920,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5826 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 5920 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5827 return; 5921 return;
5828 5922
5829 /* concurent i2c reads are not supported */
5830 if (test_bit(__IXGBE_READ_I2C, &adapter->state))
5831 return;
5832
5833 /* someone else is in init, wait until next service event */ 5923 /* someone else is in init, wait until next service event */
5834 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 5924 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5835 return; 5925 return;
@@ -6038,7 +6128,7 @@ static void ixgbe_service_task(struct work_struct *work)
6038 ixgbe_fdir_reinit_subtask(adapter); 6128 ixgbe_fdir_reinit_subtask(adapter);
6039 ixgbe_check_hang_subtask(adapter); 6129 ixgbe_check_hang_subtask(adapter);
6040 6130
6041 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { 6131 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6042 ixgbe_ptp_overflow_check(adapter); 6132 ixgbe_ptp_overflow_check(adapter);
6043 ixgbe_ptp_rx_hang(adapter); 6133 ixgbe_ptp_rx_hang(adapter);
6044 } 6134 }
@@ -7247,6 +7337,42 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7247}; 7337};
7248 7338
7249/** 7339/**
7340 * ixgbe_enumerate_functions - Get the number of ports this device has
7341 * @adapter: adapter structure
7342 *
7343 * This function enumerates the phsyical functions co-located on a single slot,
7344 * in order to determine how many ports a device has. This is most useful in
7345 * determining the required GT/s of PCIe bandwidth necessary for optimal
7346 * performance.
7347 **/
7348static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
7349{
7350 struct ixgbe_hw *hw = &adapter->hw;
7351 struct list_head *entry;
7352 int physfns = 0;
7353
7354 /* Some cards can not use the generic count PCIe functions method, and
7355 * so must be hardcoded to the correct value.
7356 */
7357 switch (hw->device_id) {
7358 case IXGBE_DEV_ID_82599_SFP_SF_QP:
7359 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
7360 physfns = 4;
7361 break;
7362 default:
7363 list_for_each(entry, &adapter->pdev->bus_list) {
7364 struct pci_dev *pdev =
7365 list_entry(entry, struct pci_dev, bus_list);
7366 /* don't count virtual functions */
7367 if (!pdev->is_virtfn)
7368 physfns++;
7369 }
7370 }
7371
7372 return physfns;
7373}
7374
7375/**
7250 * ixgbe_wol_supported - Check whether device supports WoL 7376 * ixgbe_wol_supported - Check whether device supports WoL
7251 * @hw: hw specific details 7377 * @hw: hw specific details
7252 * @device_id: the device ID 7378 * @device_id: the device ID
@@ -7328,7 +7454,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7328 struct ixgbe_hw *hw; 7454 struct ixgbe_hw *hw;
7329 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 7455 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7330 static int cards_found; 7456 static int cards_found;
7331 int i, err, pci_using_dac; 7457 int i, err, pci_using_dac, expected_gts;
7332 unsigned int indices = MAX_TX_QUEUES; 7458 unsigned int indices = MAX_TX_QUEUES;
7333 u8 part_str[IXGBE_PBANUM_LENGTH]; 7459 u8 part_str[IXGBE_PBANUM_LENGTH];
7334#ifdef IXGBE_FCOE 7460#ifdef IXGBE_FCOE
@@ -7483,10 +7609,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7483 hw->mac.type == ixgbe_mac_82598EB) { 7609 hw->mac.type == ixgbe_mac_82598EB) {
7484 err = 0; 7610 err = 0;
7485 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 7611 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7486 e_dev_err("failed to load because an unsupported SFP+ " 7612 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
7487 "module type was detected.\n"); 7613 e_dev_err("Reload the driver after installing a supported module.\n");
7488 e_dev_err("Reload the driver after installing a supported "
7489 "module.\n");
7490 goto err_sw_init; 7614 goto err_sw_init;
7491 } else if (err) { 7615 } else if (err) {
7492 e_dev_err("HW Init failed: %d\n", err); 7616 e_dev_err("HW Init failed: %d\n", err);
@@ -7617,7 +7741,7 @@ skip_sriov:
7617 7741
7618 /* pick up the PCI bus settings for reporting later */ 7742 /* pick up the PCI bus settings for reporting later */
7619 hw->mac.ops.get_bus_info(hw); 7743 hw->mac.ops.get_bus_info(hw);
7620 if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) 7744 if (ixgbe_pcie_from_parent(hw))
7621 ixgbe_get_parent_bus_info(adapter); 7745 ixgbe_get_parent_bus_info(adapter);
7622 7746
7623 /* print bus type/speed/width info */ 7747 /* print bus type/speed/width info */
@@ -7643,12 +7767,20 @@ skip_sriov:
7643 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 7767 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7644 hw->mac.type, hw->phy.type, part_str); 7768 hw->mac.type, hw->phy.type, part_str);
7645 7769
7646 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 7770 /* calculate the expected PCIe bandwidth required for optimal
7647 e_dev_warn("PCI-Express bandwidth available for this card is " 7771 * performance. Note that some older parts will never have enough
7648 "not sufficient for optimal performance.\n"); 7772 * bandwidth due to being older generation PCIe parts. We clamp these
7649 e_dev_warn("For optimal performance a x8 PCI-Express slot " 7773 * parts to ensure no warning is displayed if it can't be fixed.
7650 "is required.\n"); 7774 */
7775 switch (hw->mac.type) {
7776 case ixgbe_mac_82598EB:
7777 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
7778 break;
7779 default:
7780 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
7781 break;
7651 } 7782 }
7783 ixgbe_check_minimum_link(adapter, expected_gts);
7652 7784
7653 /* reset the hardware with the new settings */ 7785 /* reset the hardware with the new settings */
7654 err = hw->mac.ops.start_hw(hw); 7786 err = hw->mac.ops.start_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e5691ccbce9d..e4c676006be9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -204,7 +204,83 @@ out:
204} 204}
205 205
206/** 206/**
207 * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
208 * the SWFW lock
209 * @hw: pointer to hardware structure
210 * @reg_addr: 32 bit address of PHY register to read
211 * @phy_data: Pointer to read data from PHY register
212 **/
213s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
214 u16 *phy_data)
215{
216 u32 i, data, command;
217
218 /* Setup and write the address cycle command */
219 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
220 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
221 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
222 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
223
224 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
225
226 /* Check every 10 usec to see if the address cycle completed.
227 * The MDI Command bit will clear when the operation is
228 * complete
229 */
230 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
231 udelay(10);
232
233 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
234 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
235 break;
236 }
237
238
239 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
240 hw_dbg(hw, "PHY address command did not complete.\n");
241 return IXGBE_ERR_PHY;
242 }
243
244 /* Address cycle complete, setup and write the read
245 * command
246 */
247 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
248 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
249 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
250 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
251
252 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
253
254 /* Check every 10 usec to see if the address cycle
255 * completed. The MDI Command bit will clear when the
256 * operation is complete
257 */
258 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
259 udelay(10);
260
261 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
262 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
263 break;
264 }
265
266 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
267 hw_dbg(hw, "PHY read command didn't complete\n");
268 return IXGBE_ERR_PHY;
269 }
270
271 /* Read operation is complete. Get the data
272 * from MSRWD
273 */
274 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
275 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
276 *phy_data = (u16)(data);
277
278 return 0;
279}
280
281/**
207 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register 282 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
283 * using the SWFW lock - this function is needed in most cases
208 * @hw: pointer to hardware structure 284 * @hw: pointer to hardware structure
209 * @reg_addr: 32 bit address of PHY register to read 285 * @reg_addr: 32 bit address of PHY register to read
210 * @phy_data: Pointer to read data from PHY register 286 * @phy_data: Pointer to read data from PHY register
@@ -212,10 +288,7 @@ out:
212s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 288s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
213 u32 device_type, u16 *phy_data) 289 u32 device_type, u16 *phy_data)
214{ 290{
215 u32 command; 291 s32 status;
216 u32 i;
217 u32 data;
218 s32 status = 0;
219 u16 gssr; 292 u16 gssr;
220 293
221 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 294 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
223 else 296 else
224 gssr = IXGBE_GSSR_PHY0_SM; 297 gssr = IXGBE_GSSR_PHY0_SM;
225 298
226 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) 299 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
300 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
301 phy_data);
302 hw->mac.ops.release_swfw_sync(hw, gssr);
303 } else {
227 status = IXGBE_ERR_SWFW_SYNC; 304 status = IXGBE_ERR_SWFW_SYNC;
305 }
228 306
229 if (status == 0) { 307 return status;
230 /* Setup and write the address cycle command */ 308}
231 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
232 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
233 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
234 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
235 309
236 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 310/**
311 * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
312 * without SWFW lock
313 * @hw: pointer to hardware structure
314 * @reg_addr: 32 bit PHY register to write
315 * @device_type: 5 bit device type
316 * @phy_data: Data to write to the PHY register
317 **/
318s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
319 u32 device_type, u16 phy_data)
320{
321 u32 i, command;
237 322
238 /* 323 /* Put the data in the MDI single read and write data register*/
239 * Check every 10 usec to see if the address cycle completed. 324 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
240 * The MDI Command bit will clear when the operation is
241 * complete
242 */
243 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
244 udelay(10);
245 325
246 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 326 /* Setup and write the address cycle command */
327 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
328 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
329 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
330 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
247 331
248 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) 332 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
249 break;
250 }
251 333
252 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 334 /*
253 hw_dbg(hw, "PHY address command did not complete.\n"); 335 * Check every 10 usec to see if the address cycle completed.
254 status = IXGBE_ERR_PHY; 336 * The MDI Command bit will clear when the operation is
255 } 337 * complete
338 */
339 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
340 udelay(10);
256 341
257 if (status == 0) { 342 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
258 /* 343 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
259 * Address cycle complete, setup and write the read 344 break;
260 * command 345 }
261 */
262 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
263 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
264 (hw->phy.mdio.prtad <<
265 IXGBE_MSCA_PHY_ADDR_SHIFT) |
266 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
267
268 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
269
270 /*
271 * Check every 10 usec to see if the address cycle
272 * completed. The MDI Command bit will clear when the
273 * operation is complete
274 */
275 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
276 udelay(10);
277 346
278 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 347 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
348 hw_dbg(hw, "PHY address cmd didn't complete\n");
349 return IXGBE_ERR_PHY;
350 }
279 351
280 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) 352 /*
281 break; 353 * Address cycle complete, setup and write the write
282 } 354 * command
355 */
356 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
357 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
358 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
359 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
283 360
284 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 361 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
285 hw_dbg(hw, "PHY read command didn't complete\n");
286 status = IXGBE_ERR_PHY;
287 } else {
288 /*
289 * Read operation is complete. Get the data
290 * from MSRWD
291 */
292 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
293 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
294 *phy_data = (u16)(data);
295 }
296 }
297 362
298 hw->mac.ops.release_swfw_sync(hw, gssr); 363 /* Check every 10 usec to see if the address cycle
364 * completed. The MDI Command bit will clear when the
365 * operation is complete
366 */
367 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
368 udelay(10);
369
370 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
371 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
372 break;
299 } 373 }
300 374
301 return status; 375 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
376 hw_dbg(hw, "PHY write cmd didn't complete\n");
377 return IXGBE_ERR_PHY;
378 }
379
380 return 0;
302} 381}
303 382
304/** 383/**
305 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register 384 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
385 * using SWFW lock- this function is needed in most cases
306 * @hw: pointer to hardware structure 386 * @hw: pointer to hardware structure
307 * @reg_addr: 32 bit PHY register to write 387 * @reg_addr: 32 bit PHY register to write
308 * @device_type: 5 bit device type 388 * @device_type: 5 bit device type
@@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
311s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 391s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
312 u32 device_type, u16 phy_data) 392 u32 device_type, u16 phy_data)
313{ 393{
314 u32 command; 394 s32 status;
315 u32 i;
316 s32 status = 0;
317 u16 gssr; 395 u16 gssr;
318 396
319 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 397 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
321 else 399 else
322 gssr = IXGBE_GSSR_PHY0_SM; 400 gssr = IXGBE_GSSR_PHY0_SM;
323 401
324 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) 402 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
325 status = IXGBE_ERR_SWFW_SYNC; 403 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
326 404 phy_data);
327 if (status == 0) {
328 /* Put the data in the MDI single read and write data register*/
329 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
330
331 /* Setup and write the address cycle command */
332 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
333 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
334 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
335 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
336
337 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
338
339 /*
340 * Check every 10 usec to see if the address cycle completed.
341 * The MDI Command bit will clear when the operation is
342 * complete
343 */
344 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
345 udelay(10);
346
347 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
348
349 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
350 break;
351 }
352
353 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
354 hw_dbg(hw, "PHY address cmd didn't complete\n");
355 status = IXGBE_ERR_PHY;
356 }
357
358 if (status == 0) {
359 /*
360 * Address cycle complete, setup and write the write
361 * command
362 */
363 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
364 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
365 (hw->phy.mdio.prtad <<
366 IXGBE_MSCA_PHY_ADDR_SHIFT) |
367 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
368
369 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
370
371 /*
372 * Check every 10 usec to see if the address cycle
373 * completed. The MDI Command bit will clear when the
374 * operation is complete
375 */
376 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
377 udelay(10);
378
379 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
380
381 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
382 break;
383 }
384
385 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
386 hw_dbg(hw, "PHY address cmd didn't complete\n");
387 status = IXGBE_ERR_PHY;
388 }
389 }
390
391 hw->mac.ops.release_swfw_sync(hw, gssr); 405 hw->mac.ops.release_swfw_sync(hw, gssr);
406 } else {
407 status = IXGBE_ERR_SWFW_SYNC;
392 } 408 }
393 409
394 return status; 410 return status;
@@ -775,6 +791,8 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
775 * Read control word from PHY init contents offset 791 * Read control word from PHY init contents offset
776 */ 792 */
777 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); 793 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
794 if (ret_val)
795 goto err_eeprom;
778 control = (eword & IXGBE_CONTROL_MASK_NL) >> 796 control = (eword & IXGBE_CONTROL_MASK_NL) >>
779 IXGBE_CONTROL_SHIFT_NL; 797 IXGBE_CONTROL_SHIFT_NL;
780 edata = eword & IXGBE_DATA_MASK_NL; 798 edata = eword & IXGBE_DATA_MASK_NL;
@@ -787,10 +805,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
787 case IXGBE_DATA_NL: 805 case IXGBE_DATA_NL:
788 hw_dbg(hw, "DATA:\n"); 806 hw_dbg(hw, "DATA:\n");
789 data_offset++; 807 data_offset++;
790 hw->eeprom.ops.read(hw, data_offset++, 808 ret_val = hw->eeprom.ops.read(hw, data_offset++,
791 &phy_offset); 809 &phy_offset);
810 if (ret_val)
811 goto err_eeprom;
792 for (i = 0; i < edata; i++) { 812 for (i = 0; i < edata; i++) {
793 hw->eeprom.ops.read(hw, data_offset, &eword); 813 ret_val = hw->eeprom.ops.read(hw, data_offset,
814 &eword);
815 if (ret_val)
816 goto err_eeprom;
794 hw->phy.ops.write_reg(hw, phy_offset, 817 hw->phy.ops.write_reg(hw, phy_offset,
795 MDIO_MMD_PMAPMD, eword); 818 MDIO_MMD_PMAPMD, eword);
796 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 819 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
@@ -822,12 +845,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
822 845
823out: 846out:
824 return ret_val; 847 return ret_val;
848
849err_eeprom:
850 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
851 return IXGBE_ERR_PHY;
825} 852}
826 853
827/** 854/**
828 * ixgbe_identify_sfp_module_generic - Identifies SFP modules 855 * ixgbe_identify_module_generic - Identifies module type
829 * @hw: pointer to hardware structure 856 * @hw: pointer to hardware structure
830 * 857 *
858 * Determines HW type and calls appropriate function.
859 **/
860s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
861{
862 s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
863
864 switch (hw->mac.ops.get_media_type(hw)) {
865 case ixgbe_media_type_fiber:
866 status = ixgbe_identify_sfp_module_generic(hw);
867 break;
868 case ixgbe_media_type_fiber_qsfp:
869 status = ixgbe_identify_qsfp_module_generic(hw);
870 break;
871 default:
872 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
873 status = IXGBE_ERR_SFP_NOT_PRESENT;
874 break;
875 }
876
877 return status;
878}
879
880/**
881 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
882 * @hw: pointer to hardware structure
883*
831 * Searches for and identifies the SFP module and assigns appropriate PHY type. 884 * Searches for and identifies the SFP module and assigns appropriate PHY type.
832 **/ 885 **/
833s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 886s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
@@ -1106,6 +1159,197 @@ err_read_i2c_eeprom:
1106} 1159}
1107 1160
1108/** 1161/**
1162 * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
1163 * @hw: pointer to hardware structure
1164 *
1165 * Searches for and identifies the QSFP module and assigns appropriate PHY type
1166 **/
1167s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1168{
1169 struct ixgbe_adapter *adapter = hw->back;
1170 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1171 u32 vendor_oui = 0;
1172 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1173 u8 identifier = 0;
1174 u8 comp_codes_1g = 0;
1175 u8 comp_codes_10g = 0;
1176 u8 oui_bytes[3] = {0, 0, 0};
1177 u16 enforce_sfp = 0;
1178 u8 connector = 0;
1179 u8 cable_length = 0;
1180 u8 device_tech = 0;
1181 bool active_cable = false;
1182
1183 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1184 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1185 status = IXGBE_ERR_SFP_NOT_PRESENT;
1186 goto out;
1187 }
1188
1189 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1190 &identifier);
1191
1192 if (status != 0)
1193 goto err_read_i2c_eeprom;
1194
1195 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1196 hw->phy.type = ixgbe_phy_sfp_unsupported;
1197 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1198 goto out;
1199 }
1200
1201 hw->phy.id = identifier;
1202
1203 /* LAN ID is needed for sfp_type determination */
1204 hw->mac.ops.set_lan_id(hw);
1205
1206 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1207 &comp_codes_10g);
1208
1209 if (status != 0)
1210 goto err_read_i2c_eeprom;
1211
1212 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1213 &comp_codes_1g);
1214
1215 if (status != 0)
1216 goto err_read_i2c_eeprom;
1217
1218 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1219 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1220 if (hw->bus.lan_id == 0)
1221 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1222 else
1223 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1224 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1225 IXGBE_SFF_10GBASELR_CAPABLE)) {
1226 if (hw->bus.lan_id == 0)
1227 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1228 else
1229 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1230 } else {
1231 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1232 active_cable = true;
1233
1234 if (!active_cable) {
1235 /* check for active DA cables that pre-date
1236 * SFF-8436 v3.6
1237 */
1238 hw->phy.ops.read_i2c_eeprom(hw,
1239 IXGBE_SFF_QSFP_CONNECTOR,
1240 &connector);
1241
1242 hw->phy.ops.read_i2c_eeprom(hw,
1243 IXGBE_SFF_QSFP_CABLE_LENGTH,
1244 &cable_length);
1245
1246 hw->phy.ops.read_i2c_eeprom(hw,
1247 IXGBE_SFF_QSFP_DEVICE_TECH,
1248 &device_tech);
1249
1250 if ((connector ==
1251 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1252 (cable_length > 0) &&
1253 ((device_tech >> 4) ==
1254 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1255 active_cable = true;
1256 }
1257
1258 if (active_cable) {
1259 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1260 if (hw->bus.lan_id == 0)
1261 hw->phy.sfp_type =
1262 ixgbe_sfp_type_da_act_lmt_core0;
1263 else
1264 hw->phy.sfp_type =
1265 ixgbe_sfp_type_da_act_lmt_core1;
1266 } else {
1267 /* unsupported module type */
1268 hw->phy.type = ixgbe_phy_sfp_unsupported;
1269 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1270 goto out;
1271 }
1272 }
1273
1274 if (hw->phy.sfp_type != stored_sfp_type)
1275 hw->phy.sfp_setup_needed = true;
1276
1277 /* Determine if the QSFP+ PHY is dual speed or not. */
1278 hw->phy.multispeed_fiber = false;
1279 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1280 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1281 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1282 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1283 hw->phy.multispeed_fiber = true;
1284
1285 /* Determine PHY vendor for optical modules */
1286 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1287 IXGBE_SFF_10GBASELR_CAPABLE)) {
1288 status = hw->phy.ops.read_i2c_eeprom(hw,
1289 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1290 &oui_bytes[0]);
1291
1292 if (status != 0)
1293 goto err_read_i2c_eeprom;
1294
1295 status = hw->phy.ops.read_i2c_eeprom(hw,
1296 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1297 &oui_bytes[1]);
1298
1299 if (status != 0)
1300 goto err_read_i2c_eeprom;
1301
1302 status = hw->phy.ops.read_i2c_eeprom(hw,
1303 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1304 &oui_bytes[2]);
1305
1306 if (status != 0)
1307 goto err_read_i2c_eeprom;
1308
1309 vendor_oui =
1310 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1311 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1312 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1313
1314 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1315 hw->phy.type = ixgbe_phy_qsfp_intel;
1316 else
1317 hw->phy.type = ixgbe_phy_qsfp_unknown;
1318
1319 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1320 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1321 /* Make sure we're a supported PHY type */
1322 if (hw->phy.type == ixgbe_phy_qsfp_intel) {
1323 status = 0;
1324 } else {
1325 if (hw->allow_unsupported_sfp == true) {
1326 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1327 status = 0;
1328 } else {
1329 hw_dbg(hw,
1330 "QSFP module not supported\n");
1331 hw->phy.type =
1332 ixgbe_phy_sfp_unsupported;
1333 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1334 }
1335 }
1336 } else {
1337 status = 0;
1338 }
1339 }
1340
1341out:
1342 return status;
1343
1344err_read_i2c_eeprom:
1345 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1346 hw->phy.id = 0;
1347 hw->phy.type = ixgbe_phy_unknown;
1348
1349 return IXGBE_ERR_SFP_NOT_PRESENT;
1350}
1351
1352/**
1109 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence 1353 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
1110 * @hw: pointer to hardware structure 1354 * @hw: pointer to hardware structure
1111 * @list_offset: offset to the SFP ID list 1355 * @list_offset: offset to the SFP ID list
@@ -1147,7 +1391,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1147 sfp_type = ixgbe_sfp_type_srlr_core1; 1391 sfp_type = ixgbe_sfp_type_srlr_core1;
1148 1392
1149 /* Read offset to PHY init contents */ 1393 /* Read offset to PHY init contents */
1150 hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); 1394 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1395 hw_err(hw, "eeprom read at %d failed\n",
1396 IXGBE_PHY_INIT_OFFSET_NL);
1397 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1398 }
1151 1399
1152 if ((!*list_offset) || (*list_offset == 0xFFFF)) 1400 if ((!*list_offset) || (*list_offset == 0xFFFF))
1153 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; 1401 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
@@ -1159,12 +1407,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1159 * Find the matching SFP ID in the EEPROM 1407 * Find the matching SFP ID in the EEPROM
1160 * and program the init sequence 1408 * and program the init sequence
1161 */ 1409 */
1162 hw->eeprom.ops.read(hw, *list_offset, &sfp_id); 1410 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1411 goto err_phy;
1163 1412
1164 while (sfp_id != IXGBE_PHY_INIT_END_NL) { 1413 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1165 if (sfp_id == sfp_type) { 1414 if (sfp_id == sfp_type) {
1166 (*list_offset)++; 1415 (*list_offset)++;
1167 hw->eeprom.ops.read(hw, *list_offset, data_offset); 1416 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1417 goto err_phy;
1168 if ((!*data_offset) || (*data_offset == 0xFFFF)) { 1418 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1169 hw_dbg(hw, "SFP+ module not supported\n"); 1419 hw_dbg(hw, "SFP+ module not supported\n");
1170 return IXGBE_ERR_SFP_NOT_SUPPORTED; 1420 return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1174,7 +1424,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1174 } else { 1424 } else {
1175 (*list_offset) += 2; 1425 (*list_offset) += 2;
1176 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) 1426 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1177 return IXGBE_ERR_PHY; 1427 goto err_phy;
1178 } 1428 }
1179 } 1429 }
1180 1430
@@ -1184,6 +1434,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1184 } 1434 }
1185 1435
1186 return 0; 1436 return 0;
1437
1438err_phy:
1439 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1440 return IXGBE_ERR_PHY;
1187} 1441}
1188 1442
1189/** 1443/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 886a3431cf5b..24af12e3719e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -33,17 +33,28 @@
33#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 33#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
34 34
35/* EEPROM byte offsets */ 35/* EEPROM byte offsets */
36#define IXGBE_SFF_IDENTIFIER 0x0 36#define IXGBE_SFF_IDENTIFIER 0x0
37#define IXGBE_SFF_IDENTIFIER_SFP 0x3 37#define IXGBE_SFF_IDENTIFIER_SFP 0x3
38#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 38#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
39#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 39#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
40#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 40#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
41#define IXGBE_SFF_1GBE_COMP_CODES 0x6 41#define IXGBE_SFF_1GBE_COMP_CODES 0x6
42#define IXGBE_SFF_10GBE_COMP_CODES 0x3 42#define IXGBE_SFF_10GBE_COMP_CODES 0x3
43#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 43#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
44#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C 44#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
45#define IXGBE_SFF_SFF_8472_SWAP 0x5C 45#define IXGBE_SFF_SFF_8472_SWAP 0x5C
46#define IXGBE_SFF_SFF_8472_COMP 0x5E 46#define IXGBE_SFF_SFF_8472_COMP 0x5E
47#define IXGBE_SFF_SFF_8472_OSCB 0x6E
48#define IXGBE_SFF_SFF_8472_ESCB 0x76
49#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
50#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
51#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
52#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
53#define IXGBE_SFF_QSFP_CONNECTOR 0x82
54#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
55#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
56#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
57#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
47 58
48/* Bitmasks */ 59/* Bitmasks */
49#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 60#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -54,7 +65,14 @@
54#define IXGBE_SFF_1GBASET_CAPABLE 0x8 65#define IXGBE_SFF_1GBASET_CAPABLE 0x8
55#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 66#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
56#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 67#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
68#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
69#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
70#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
57#define IXGBE_SFF_ADDRESSING_MODE 0x4 71#define IXGBE_SFF_ADDRESSING_MODE 0x4
72#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
73#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
74#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
75#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
58#define IXGBE_I2C_EEPROM_READ_MASK 0x100 76#define IXGBE_I2C_EEPROM_READ_MASK 0x100
59#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 77#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
60#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 78#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -102,6 +120,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
102 u32 device_type, u16 *phy_data); 120 u32 device_type, u16 *phy_data);
103s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 121s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
104 u32 device_type, u16 phy_data); 122 u32 device_type, u16 phy_data);
123s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
124 u32 device_type, u16 *phy_data);
125s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
126 u32 device_type, u16 phy_data);
105s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 127s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
106s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 128s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
107 ixgbe_link_speed speed, 129 ixgbe_link_speed speed,
@@ -121,7 +143,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
121 u16 *firmware_version); 143 u16 *firmware_version);
122 144
123s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
124s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
148s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
125s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 149s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
126 u16 *list_offset, 150 u16 *list_offset,
127 u16 *data_offset); 151 u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 331987d6815c..5184e2a1a7d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
885 885
886 ixgbe_ptp_reset(adapter); 886 ixgbe_ptp_reset(adapter);
887 887
888 /* set the flag that PTP has been enabled */ 888 /* enter the IXGBE_PTP_RUNNING state */
889 adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED; 889 set_bit(__IXGBE_PTP_RUNNING, &adapter->state);
890 890
891 return; 891 return;
892} 892}
@@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
899 */ 899 */
900void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 900void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
901{ 901{
902 /* stop the overflow check task */ 902 /* Leave the IXGBE_PTP_RUNNING state. */
903 adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED | 903 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
904 IXGBE_FLAG2_PTP_PPS_ENABLED); 904 return;
905 905
906 /* stop the PPS signal */
907 adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
906 ixgbe_ptp_setup_sdp(adapter); 908 ixgbe_ptp_setup_sdp(adapter);
907 909
908 cancel_work_sync(&adapter->ptp_tx_work); 910 cancel_work_sync(&adapter->ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1e7d587c4e57..276d7b135332 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
173 ixgbe_disable_sriov(adapter); 173 ixgbe_disable_sriov(adapter);
174} 174}
175 175
176static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
177{
178 struct pci_dev *pdev = adapter->pdev;
179 struct pci_dev *vfdev;
180 int dev_id;
181
182 switch (adapter->hw.mac.type) {
183 case ixgbe_mac_82599EB:
184 dev_id = IXGBE_DEV_ID_82599_VF;
185 break;
186 case ixgbe_mac_X540:
187 dev_id = IXGBE_DEV_ID_X540_VF;
188 break;
189 default:
190 return false;
191 }
192
193 /* loop through all the VFs to see if we own any that are assigned */
194 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
195 while (vfdev) {
196 /* if we don't own it we don't care */
197 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
198 /* if it is assigned we cannot release it */
199 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
200 return true;
201 }
202
203 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
204 }
205
206 return false;
207}
208
209#endif /* #ifdef CONFIG_PCI_IOV */ 176#endif /* #ifdef CONFIG_PCI_IOV */
210int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 177int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
211{ 178{
@@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
235 * without causing issues, so just leave the hardware 202 * without causing issues, so just leave the hardware
236 * available but disabled 203 * available but disabled
237 */ 204 */
238 if (ixgbe_vfs_are_assigned(adapter)) { 205 if (pci_vfs_assigned(adapter->pdev)) {
239 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 206 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
240 return -EPERM; 207 return -EPERM;
241 } 208 }
@@ -672,8 +639,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
672{ 639{
673 struct ixgbe_hw *hw = &adapter->hw; 640 struct ixgbe_hw *hw = &adapter->hw;
674 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 641 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
675 u32 reg, msgbuf[4]; 642 u32 reg, reg_offset, vf_shift;
676 u32 reg_offset, vf_shift; 643 u32 msgbuf[4] = {0, 0, 0, 0};
677 u8 *addr = (u8 *)(&msgbuf[1]); 644 u8 *addr = (u8 *)(&msgbuf[1]);
678 645
679 e_info(probe, "VF Reset msg received from vf %d\n", vf); 646 e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
768 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; 735 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
769} 736}
770 737
738static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
739{
740 u32 vlvf;
741 s32 regindex;
742
743 /* short cut the special case */
744 if (vlan == 0)
745 return 0;
746
747 /* Search for the vlan id in the VLVF entries */
748 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
749 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
750 if ((vlvf & VLAN_VID_MASK) == vlan)
751 break;
752 }
753
754 /* Return a negative value if not found */
755 if (regindex >= IXGBE_VLVF_ENTRIES)
756 regindex = -1;
757
758 return regindex;
759}
760
771static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, 761static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
772 u32 *msgbuf, u32 vf) 762 u32 *msgbuf, u32 vf)
773{ 763{
@@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
775 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 765 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
776 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 766 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
777 int err; 767 int err;
768 s32 reg_ndx;
769 u32 vlvf;
770 u32 bits;
778 u8 tcs = netdev_get_num_tc(adapter->netdev); 771 u8 tcs = netdev_get_num_tc(adapter->netdev);
779 772
780 if (adapter->vfinfo[vf].pf_vlan || tcs) { 773 if (adapter->vfinfo[vf].pf_vlan || tcs) {
@@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
790 else if (adapter->vfinfo[vf].vlan_count) 783 else if (adapter->vfinfo[vf].vlan_count)
791 adapter->vfinfo[vf].vlan_count--; 784 adapter->vfinfo[vf].vlan_count--;
792 785
786 /* in case of promiscuous mode any VLAN filter set for a VF must
787 * also have the PF pool added to it.
788 */
789 if (add && adapter->netdev->flags & IFF_PROMISC)
790 err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
791
793 err = ixgbe_set_vf_vlan(adapter, add, vid, vf); 792 err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
794 if (!err && adapter->vfinfo[vf].spoofchk_enabled) 793 if (!err && adapter->vfinfo[vf].spoofchk_enabled)
795 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 794 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
796 795
796 /* Go through all the checks to see if the VLAN filter should
797 * be wiped completely.
798 */
799 if (!add && adapter->netdev->flags & IFF_PROMISC) {
800 reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
801 if (reg_ndx < 0)
802 goto out;
803 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
804 /* See if any other pools are set for this VLAN filter
805 * entry other than the PF.
806 */
807 if (VMDQ_P(0) < 32) {
808 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
809 bits &= ~(1 << VMDQ_P(0));
810 bits |= IXGBE_READ_REG(hw,
811 IXGBE_VLVFB(reg_ndx * 2) + 1);
812 } else {
813 bits = IXGBE_READ_REG(hw,
814 IXGBE_VLVFB(reg_ndx * 2) + 1);
815 bits &= ~(1 << (VMDQ_P(0) - 32));
816 bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
817 }
818
819 /* If the filter was removed then ensure PF pool bit
820 * is cleared if the PF only added itself to the pool
821 * because the PF is in promiscuous mode.
822 */
823 if ((vlvf & VLAN_VID_MASK) == vid &&
824 !test_bit(vid, adapter->active_vlans) && !bits)
825 ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
826 }
827
828out:
829
797 return err; 830 return err;
798} 831}
799 832
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 70c6aa3d3f95..6442cf8f9dce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -69,6 +69,7 @@
69#define IXGBE_DEV_ID_82599_LS 0x154F 69#define IXGBE_DEV_ID_82599_LS 0x154F
70#define IXGBE_DEV_ID_X540T 0x1528 70#define IXGBE_DEV_ID_X540T 0x1528
71#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A 71#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
72#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
72#define IXGBE_DEV_ID_X540T1 0x1560 73#define IXGBE_DEV_ID_X540T1 0x1560
73 74
74/* VF Device IDs */ 75/* VF Device IDs */
@@ -1520,9 +1521,11 @@ enum {
1520#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ 1521#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
1521#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ 1522#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
1522#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ 1523#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
1524#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
1523#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ 1525#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
1524#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ 1526#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
1525#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ 1527#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
1528#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
1526 1529
1527/* LEDCTL Bit Masks */ 1530/* LEDCTL Bit Masks */
1528#define IXGBE_LED_IVRT_BASE 0x00000040 1531#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -1593,6 +1596,7 @@ enum {
1593#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1596#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1594#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1597#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1595#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1598#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1599#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
1596#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 1600#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
1597 1601
1598#define IXGBE_MACC_FLU 0x00000001 1602#define IXGBE_MACC_FLU 0x00000001
@@ -2582,6 +2586,10 @@ enum ixgbe_phy_type {
2582 ixgbe_phy_sfp_ftl_active, 2586 ixgbe_phy_sfp_ftl_active,
2583 ixgbe_phy_sfp_unknown, 2587 ixgbe_phy_sfp_unknown,
2584 ixgbe_phy_sfp_intel, 2588 ixgbe_phy_sfp_intel,
2589 ixgbe_phy_qsfp_passive_unknown,
2590 ixgbe_phy_qsfp_active_unknown,
2591 ixgbe_phy_qsfp_intel,
2592 ixgbe_phy_qsfp_unknown,
2585 ixgbe_phy_sfp_unsupported, 2593 ixgbe_phy_sfp_unsupported,
2586 ixgbe_phy_generic 2594 ixgbe_phy_generic
2587}; 2595};
@@ -2622,6 +2630,8 @@ enum ixgbe_sfp_type {
2622enum ixgbe_media_type { 2630enum ixgbe_media_type {
2623 ixgbe_media_type_unknown = 0, 2631 ixgbe_media_type_unknown = 0,
2624 ixgbe_media_type_fiber, 2632 ixgbe_media_type_fiber,
2633 ixgbe_media_type_fiber_fixed,
2634 ixgbe_media_type_fiber_qsfp,
2625 ixgbe_media_type_fiber_lco, 2635 ixgbe_media_type_fiber_lco,
2626 ixgbe_media_type_copper, 2636 ixgbe_media_type_copper,
2627 ixgbe_media_type_backplane, 2637 ixgbe_media_type_backplane,
@@ -2838,6 +2848,7 @@ struct ixgbe_mac_operations {
2838 void (*disable_tx_laser)(struct ixgbe_hw *); 2848 void (*disable_tx_laser)(struct ixgbe_hw *);
2839 void (*enable_tx_laser)(struct ixgbe_hw *); 2849 void (*enable_tx_laser)(struct ixgbe_hw *);
2840 void (*flap_tx_laser)(struct ixgbe_hw *); 2850 void (*flap_tx_laser)(struct ixgbe_hw *);
2851 void (*stop_link_on_d3)(struct ixgbe_hw *);
2841 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2852 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2842 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2853 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2843 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2854 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2885,6 +2896,8 @@ struct ixgbe_phy_operations {
2885 s32 (*reset)(struct ixgbe_hw *); 2896 s32 (*reset)(struct ixgbe_hw *);
2886 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); 2897 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
2887 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); 2898 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
2899 s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
2900 s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
2888 s32 (*setup_link)(struct ixgbe_hw *); 2901 s32 (*setup_link)(struct ixgbe_hw *);
2889 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2902 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2890 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); 2903 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
@@ -2953,6 +2966,7 @@ struct ixgbe_phy_info {
2953 bool smart_speed_active; 2966 bool smart_speed_active;
2954 bool multispeed_fiber; 2967 bool multispeed_fiber;
2955 bool reset_if_overtemp; 2968 bool reset_if_overtemp;
2969 bool qsfp_shared_i2c_bus;
2956}; 2970};
2957 2971
2958#include "ixgbe_mbx.h" 2972#include "ixgbe_mbx.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f5166ad6bb5..59a62bbfb371 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -488,8 +488,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
488 * source pruning. 488 * source pruning.
489 */ 489 */
490 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 490 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
491 !(compare_ether_addr(adapter->netdev->dev_addr, 491 ether_addr_equal(adapter->netdev->dev_addr,
492 eth_hdr(skb)->h_source))) { 492 eth_hdr(skb)->h_source)) {
493 dev_kfree_skb_irq(skb); 493 dev_kfree_skb_irq(skb);
494 goto next_desc; 494 goto next_desc;
495 } 495 }
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c35db735958f..7fb5677451f9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2641,7 +2641,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2641 ret = mv643xx_eth_shared_of_probe(pdev); 2641 ret = mv643xx_eth_shared_of_probe(pdev);
2642 if (ret) 2642 if (ret)
2643 return ret; 2643 return ret;
2644 pd = pdev->dev.platform_data; 2644 pd = dev_get_platdata(&pdev->dev);
2645 2645
2646 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2646 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2647 pd->tx_csum_limit : 9 * 1024; 2647 pd->tx_csum_limit : 9 * 1024;
@@ -2833,7 +2833,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2833 struct resource *res; 2833 struct resource *res;
2834 int err; 2834 int err;
2835 2835
2836 pd = pdev->dev.platform_data; 2836 pd = dev_get_platdata(&pdev->dev);
2837 if (pd == NULL) { 2837 if (pd == NULL) {
2838 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); 2838 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2839 return -ENODEV; 2839 return -ENODEV;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b017818bccae..e35bac7cfdf1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -79,10 +79,10 @@
79#define MVNETA_MAC_ADDR_HIGH 0x2418 79#define MVNETA_MAC_ADDR_HIGH 0x2418
80#define MVNETA_SDMA_CONFIG 0x241c 80#define MVNETA_SDMA_CONFIG 0x241c
81#define MVNETA_SDMA_BRST_SIZE_16 4 81#define MVNETA_SDMA_BRST_SIZE_16 4
82#define MVNETA_NO_DESC_SWAP 0x0
83#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 82#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
84#define MVNETA_RX_NO_DATA_SWAP BIT(4) 83#define MVNETA_RX_NO_DATA_SWAP BIT(4)
85#define MVNETA_TX_NO_DATA_SWAP BIT(5) 84#define MVNETA_TX_NO_DATA_SWAP BIT(5)
85#define MVNETA_DESC_SWAP BIT(6)
86#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 86#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
87#define MVNETA_PORT_STATUS 0x2444 87#define MVNETA_PORT_STATUS 0x2444
88#define MVNETA_TX_IN_PRGRS BIT(1) 88#define MVNETA_TX_IN_PRGRS BIT(1)
@@ -138,7 +138,9 @@
138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
141#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
141#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 142#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
143#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
142#define MVNETA_MIB_COUNTERS_BASE 0x3080 144#define MVNETA_MIB_COUNTERS_BASE 0x3080
143#define MVNETA_MIB_LATE_COLLISION 0x7c 145#define MVNETA_MIB_LATE_COLLISION 0x7c
144#define MVNETA_DA_FILT_SPEC_MCAST 0x3400 146#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -264,8 +266,7 @@ struct mvneta_port {
264 * layout of the transmit and reception DMA descriptors, and their 266 * layout of the transmit and reception DMA descriptors, and their
265 * layout is therefore defined by the hardware design 267 * layout is therefore defined by the hardware design
266 */ 268 */
267struct mvneta_tx_desc { 269
268 u32 command; /* Options used by HW for packet transmitting.*/
269#define MVNETA_TX_L3_OFF_SHIFT 0 270#define MVNETA_TX_L3_OFF_SHIFT 0
270#define MVNETA_TX_IP_HLEN_SHIFT 8 271#define MVNETA_TX_IP_HLEN_SHIFT 8
271#define MVNETA_TX_L4_UDP BIT(16) 272#define MVNETA_TX_L4_UDP BIT(16)
@@ -280,15 +281,6 @@ struct mvneta_tx_desc {
280#define MVNETA_TX_L4_CSUM_FULL BIT(30) 281#define MVNETA_TX_L4_CSUM_FULL BIT(30)
281#define MVNETA_TX_L4_CSUM_NOT BIT(31) 282#define MVNETA_TX_L4_CSUM_NOT BIT(31)
282 283
283 u16 reserverd1; /* csum_l4 (for future use) */
284 u16 data_size; /* Data size of transmitted packet in bytes */
285 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
286 u32 reserved2; /* hw_cmd - (for future use, PMT) */
287 u32 reserved3[4]; /* Reserved - (for future use) */
288};
289
290struct mvneta_rx_desc {
291 u32 status; /* Info about received packet */
292#define MVNETA_RXD_ERR_CRC 0x0 284#define MVNETA_RXD_ERR_CRC 0x0
293#define MVNETA_RXD_ERR_SUMMARY BIT(16) 285#define MVNETA_RXD_ERR_SUMMARY BIT(16)
294#define MVNETA_RXD_ERR_OVERRUN BIT(17) 286#define MVNETA_RXD_ERR_OVERRUN BIT(17)
@@ -299,16 +291,57 @@ struct mvneta_rx_desc {
299#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) 291#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
300#define MVNETA_RXD_L4_CSUM_OK BIT(30) 292#define MVNETA_RXD_L4_CSUM_OK BIT(30)
301 293
294#if defined(__LITTLE_ENDIAN)
295struct mvneta_tx_desc {
296 u32 command; /* Options used by HW for packet transmitting.*/
297 u16 reserverd1; /* csum_l4 (for future use) */
298 u16 data_size; /* Data size of transmitted packet in bytes */
299 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
300 u32 reserved2; /* hw_cmd - (for future use, PMT) */
301 u32 reserved3[4]; /* Reserved - (for future use) */
302};
303
304struct mvneta_rx_desc {
305 u32 status; /* Info about received packet */
302 u16 reserved1; /* pnc_info - (for future use, PnC) */ 306 u16 reserved1; /* pnc_info - (for future use, PnC) */
303 u16 data_size; /* Size of received packet in bytes */ 307 u16 data_size; /* Size of received packet in bytes */
308
304 u32 buf_phys_addr; /* Physical address of the buffer */ 309 u32 buf_phys_addr; /* Physical address of the buffer */
305 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 310 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
311
306 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 312 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
307 u16 reserved3; /* prefetch_cmd, for future use */ 313 u16 reserved3; /* prefetch_cmd, for future use */
308 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 314 u16 reserved4; /* csum_l4 - (for future use, PnC) */
315
316 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
317 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
318};
319#else
320struct mvneta_tx_desc {
321 u16 data_size; /* Data size of transmitted packet in bytes */
322 u16 reserverd1; /* csum_l4 (for future use) */
323 u32 command; /* Options used by HW for packet transmitting.*/
324 u32 reserved2; /* hw_cmd - (for future use, PMT) */
325 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
326 u32 reserved3[4]; /* Reserved - (for future use) */
327};
328
329struct mvneta_rx_desc {
330 u16 data_size; /* Size of received packet in bytes */
331 u16 reserved1; /* pnc_info - (for future use, PnC) */
332 u32 status; /* Info about received packet */
333
334 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
335 u32 buf_phys_addr; /* Physical address of the buffer */
336
337 u16 reserved4; /* csum_l4 - (for future use, PnC) */
338 u16 reserved3; /* prefetch_cmd, for future use */
339 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
340
309 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 341 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
310 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 342 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
311}; 343};
344#endif
312 345
313struct mvneta_tx_queue { 346struct mvneta_tx_queue {
314 /* Number of this TX queue, in the range 0-7 */ 347 /* Number of this TX queue, in the range 0-7 */
@@ -908,13 +941,22 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
908 /* Default burst size */ 941 /* Default burst size */
909 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 942 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
910 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 943 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
944 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
911 945
912 val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP | 946#if defined(__BIG_ENDIAN)
913 MVNETA_NO_DESC_SWAP); 947 val |= MVNETA_DESC_SWAP;
948#endif
914 949
915 /* Assign port SDMA configuration */ 950 /* Assign port SDMA configuration */
916 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 951 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
917 952
953 /* Disable PHY polling in hardware, since we're using the
954 * kernel phylib to do this.
955 */
956 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
957 val &= ~MVNETA_PHY_POLLING_ENABLE;
958 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
959
918 mvneta_set_ucast_table(pp, -1); 960 mvneta_set_ucast_table(pp, -1);
919 mvneta_set_special_mcast_table(pp, -1); 961 mvneta_set_special_mcast_table(pp, -1);
920 mvneta_set_other_mcast_table(pp, -1); 962 mvneta_set_other_mcast_table(pp, -1);
@@ -2307,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
2307 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2349 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2308 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2350 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2309 MVNETA_GMAC_CONFIG_GMII_SPEED | 2351 MVNETA_GMAC_CONFIG_GMII_SPEED |
2310 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 2352 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2353 MVNETA_GMAC_AN_SPEED_EN |
2354 MVNETA_GMAC_AN_DUPLEX_EN);
2311 2355
2312 if (phydev->duplex) 2356 if (phydev->duplex)
2313 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2357 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2440,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
2440 return 0; 2484 return 0;
2441} 2485}
2442 2486
2487static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2488{
2489 struct mvneta_port *pp = netdev_priv(dev);
2490 int ret;
2491
2492 if (!pp->phy_dev)
2493 return -ENOTSUPP;
2494
2495 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2496 if (!ret)
2497 mvneta_adjust_link(dev);
2498
2499 return ret;
2500}
2501
2443/* Ethtool methods */ 2502/* Ethtool methods */
2444 2503
2445/* Get settings (phy address, speed) for ethtools */ 2504/* Get settings (phy address, speed) for ethtools */
@@ -2558,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
2558 .ndo_change_mtu = mvneta_change_mtu, 2617 .ndo_change_mtu = mvneta_change_mtu,
2559 .ndo_tx_timeout = mvneta_tx_timeout, 2618 .ndo_tx_timeout = mvneta_tx_timeout,
2560 .ndo_get_stats64 = mvneta_get_stats64, 2619 .ndo_get_stats64 = mvneta_get_stats64,
2620 .ndo_do_ioctl = mvneta_ioctl,
2561}; 2621};
2562 2622
2563const struct ethtool_ops mvneta_eth_tool_ops = { 2623const struct ethtool_ops mvneta_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index db481477bcc5..4ae0c7426010 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -583,10 +583,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
583 * table is full. 583 * table is full.
584 */ 584 */
585 if (pep->htpr == NULL) { 585 if (pep->htpr == NULL) {
586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, 586 pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
587 HASH_ADDR_TABLE_SIZE, 587 HASH_ADDR_TABLE_SIZE,
588 &pep->htpr_dma, 588 &pep->htpr_dma, GFP_KERNEL);
589 GFP_KERNEL | __GFP_ZERO);
590 if (pep->htpr == NULL) 589 if (pep->htpr == NULL)
591 return -ENOMEM; 590 return -ENOMEM;
592 } else { 591 } else {
@@ -1024,9 +1023,9 @@ static int rxq_init(struct net_device *dev)
1024 pep->rx_desc_count = 0; 1023 pep->rx_desc_count = 0;
1025 size = pep->rx_ring_size * sizeof(struct rx_desc); 1024 size = pep->rx_ring_size * sizeof(struct rx_desc);
1026 pep->rx_desc_area_size = size; 1025 pep->rx_desc_area_size = size;
1027 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1026 pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
1028 &pep->rx_desc_dma, 1027 &pep->rx_desc_dma,
1029 GFP_KERNEL | __GFP_ZERO); 1028 GFP_KERNEL);
1030 if (!pep->p_rx_desc_area) 1029 if (!pep->p_rx_desc_area)
1031 goto out; 1030 goto out;
1032 1031
@@ -1085,9 +1084,9 @@ static int txq_init(struct net_device *dev)
1085 pep->tx_desc_count = 0; 1084 pep->tx_desc_count = 0;
1086 size = pep->tx_ring_size * sizeof(struct tx_desc); 1085 size = pep->tx_ring_size * sizeof(struct tx_desc);
1087 pep->tx_desc_area_size = size; 1086 pep->tx_desc_area_size = size;
1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1087 pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
1089 &pep->tx_desc_dma, 1088 &pep->tx_desc_dma,
1090 GFP_KERNEL | __GFP_ZERO); 1089 GFP_KERNEL);
1091 if (!pep->p_tx_desc_area) 1090 if (!pep->p_tx_desc_area)
1092 goto out; 1091 goto out;
1093 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1092 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1517,7 +1516,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1517 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); 1516 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
1518 eth_hw_addr_random(dev); 1517 eth_hw_addr_random(dev);
1519 1518
1520 pep->pd = pdev->dev.platform_data; 1519 pep->pd = dev_get_platdata(&pdev->dev);
1521 pep->rx_ring_size = NUM_RX_DESCS; 1520 pep->rx_ring_size = NUM_RX_DESCS;
1522 if (pep->pd->rx_queue_size) 1521 if (pep->pd->rx_queue_size)
1523 pep->rx_ring_size = pep->pd->rx_queue_size; 1522 pep->rx_ring_size = pep->pd->rx_queue_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 299d0184f983..ea20182c6969 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,7 +800,16 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
801} 801}
802 802
803int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 803static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_vhcr *vhcr,
805 struct mlx4_cmd_mailbox *inbox,
806 struct mlx4_cmd_mailbox *outbox,
807 struct mlx4_cmd_info *cmd)
808{
809 return -EPERM;
810}
811
812static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_vhcr *vhcr, 813 struct mlx4_vhcr *vhcr,
805 struct mlx4_cmd_mailbox *inbox, 814 struct mlx4_cmd_mailbox *inbox,
806 struct mlx4_cmd_mailbox *outbox, 815 struct mlx4_cmd_mailbox *outbox,
@@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1252 .wrapper = MLX4_CMD_UPDATE_QP_wrapper 1261 .wrapper = MLX4_CMD_UPDATE_QP_wrapper
1253 }, 1262 },
1254 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ,
1265 .has_inbox = false,
1266 .has_outbox = false,
1267 .out_is_imm = false,
1268 .encode_slave_id = false,
1269 .verify = NULL,
1270 .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
1271 },
1272 {
1255 .opcode = MLX4_CMD_CONF_SPECIAL_QP, 1273 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1256 .has_inbox = false, 1274 .has_inbox = false,
1257 .has_outbox = false, 1275 .has_outbox = false,
@@ -1526,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
1526 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); 1544 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
1527} 1545}
1528 1546
1529int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, 1547static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1530 int slave, int port) 1548 int slave, int port)
1531{ 1549{
1532 struct mlx4_vport_oper_state *vp_oper; 1550 struct mlx4_vport_oper_state *vp_oper;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 9d4a1ea030d8..b4881b686159 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
160 struct ieee_pfc *pfc) 160 struct ieee_pfc *pfc)
161{ 161{
162 struct mlx4_en_priv *priv = netdev_priv(dev); 162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct mlx4_en_port_profile *prof = priv->prof;
163 struct mlx4_en_dev *mdev = priv->mdev; 164 struct mlx4_en_dev *mdev = priv->mdev;
164 int err; 165 int err;
165 166
@@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
169 pfc->mbc, 170 pfc->mbc,
170 pfc->delay); 171 pfc->delay);
171 172
172 priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en; 173 prof->rx_pause = !pfc->pfc_en;
173 priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en; 174 prof->tx_pause = !pfc->pfc_en;
175 prof->rx_ppp = pfc->pfc_en;
176 prof->tx_ppp = pfc->pfc_en;
174 177
175 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 178 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
176 priv->rx_skb_size + ETH_FCS_LEN, 179 priv->rx_skb_size + ETH_FCS_LEN,
177 priv->prof->tx_pause, 180 prof->tx_pause,
178 priv->prof->tx_ppp, 181 prof->tx_ppp,
179 priv->prof->rx_pause, 182 prof->rx_pause,
180 priv->prof->rx_ppp); 183 prof->rx_ppp);
181 if (err) 184 if (err)
182 en_err(priv, "Failed setting pause params\n"); 185 en_err(priv, "Failed setting pause params\n");
183 186
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7c492382da09..0698c82d6ff1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
191 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 191 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
192} 192}
193 193
194static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
195 struct mlx4_en_tx_ring *ring, int index,
196 u8 owner)
197{
198 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
199 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
200 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
201 void *end = ring->buf + ring->buf_size;
202 __be32 *ptr = (__be32 *)tx_desc;
203 int i;
204
205 /* Optimize the common case when there are no wraparounds */
206 if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
207 /* Stamp the freed descriptor */
208 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
209 i += STAMP_STRIDE) {
210 *ptr = stamp;
211 ptr += STAMP_DWORDS;
212 }
213 } else {
214 /* Stamp the freed descriptor */
215 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
216 i += STAMP_STRIDE) {
217 *ptr = stamp;
218 ptr += STAMP_DWORDS;
219 if ((void *)ptr >= end) {
220 ptr = ring->buf;
221 stamp ^= cpu_to_be32(0x80000000);
222 }
223 }
224 }
225}
226
194 227
195static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 228static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
196 struct mlx4_en_tx_ring *ring, 229 struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
205 void *end = ring->buf + ring->buf_size; 238 void *end = ring->buf + ring->buf_size;
206 int frags = skb_shinfo(skb)->nr_frags; 239 int frags = skb_shinfo(skb)->nr_frags;
207 int i; 240 int i;
208 __be32 *ptr = (__be32 *)tx_desc;
209 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
210 struct skb_shared_hwtstamps hwts; 241 struct skb_shared_hwtstamps hwts;
211 242
212 if (timestamp) { 243 if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
232 skb_frag_size(frag), PCI_DMA_TODEVICE); 263 skb_frag_size(frag), PCI_DMA_TODEVICE);
233 } 264 }
234 } 265 }
235 /* Stamp the freed descriptor */
236 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
237 *ptr = stamp;
238 ptr += STAMP_DWORDS;
239 }
240
241 } else { 266 } else {
242 if (!tx_info->inl) { 267 if (!tx_info->inl) {
243 if ((void *) data >= end) { 268 if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
263 ++data; 288 ++data;
264 } 289 }
265 } 290 }
266 /* Stamp the freed descriptor */
267 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
268 *ptr = stamp;
269 ptr += STAMP_DWORDS;
270 if ((void *) ptr >= end) {
271 ptr = ring->buf;
272 stamp ^= cpu_to_be32(0x80000000);
273 }
274 }
275
276 } 291 }
277 dev_kfree_skb_any(skb); 292 dev_kfree_skb_any(skb);
278 return tx_info->nr_txbb; 293 return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
318 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 333 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
319 struct mlx4_cqe *cqe; 334 struct mlx4_cqe *cqe;
320 u16 index; 335 u16 index;
321 u16 new_index, ring_index; 336 u16 new_index, ring_index, stamp_index;
322 u32 txbbs_skipped = 0; 337 u32 txbbs_skipped = 0;
338 u32 txbbs_stamp = 0;
323 u32 cons_index = mcq->cons_index; 339 u32 cons_index = mcq->cons_index;
324 int size = cq->size; 340 int size = cq->size;
325 u32 size_mask = ring->size_mask; 341 u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
335 index = cons_index & size_mask; 351 index = cons_index & size_mask;
336 cqe = &buf[(index << factor) + factor]; 352 cqe = &buf[(index << factor) + factor];
337 ring_index = ring->cons & size_mask; 353 ring_index = ring->cons & size_mask;
354 stamp_index = ring_index;
338 355
339 /* Process all completed CQEs */ 356 /* Process all completed CQEs */
340 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 357 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -345,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
345 */ 362 */
346 rmb(); 363 rmb();
347 364
365 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
366 MLX4_CQE_OPCODE_ERROR)) {
367 struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
368
369 en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
370 cqe_err->vendor_err_syndrome,
371 cqe_err->syndrome);
372 }
373
348 /* Skip over last polled CQE */ 374 /* Skip over last polled CQE */
349 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 375 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
350 376
@@ -359,6 +385,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
359 priv, ring, ring_index, 385 priv, ring, ring_index,
360 !!((ring->cons + txbbs_skipped) & 386 !!((ring->cons + txbbs_skipped) &
361 ring->size), timestamp); 387 ring->size), timestamp);
388
389 mlx4_en_stamp_wqe(priv, ring, stamp_index,
390 !!((ring->cons + txbbs_stamp) &
391 ring->size));
392 stamp_index = ring_index;
393 txbbs_stamp = txbbs_skipped;
362 packets++; 394 packets++;
363 bytes += ring->tx_info[ring_index].nr_bytes; 395 bytes += ring->tx_info[ring_index].nr_bytes;
364 } while (ring_index != new_index); 396 } while (ring_index != new_index);
@@ -556,17 +588,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
556{ 588{
557 struct mlx4_en_priv *priv = netdev_priv(dev); 589 struct mlx4_en_priv *priv = netdev_priv(dev);
558 struct mlx4_en_dev *mdev = priv->mdev; 590 struct mlx4_en_dev *mdev = priv->mdev;
591 struct device *ddev = priv->ddev;
559 struct mlx4_en_tx_ring *ring; 592 struct mlx4_en_tx_ring *ring;
560 struct mlx4_en_tx_desc *tx_desc; 593 struct mlx4_en_tx_desc *tx_desc;
561 struct mlx4_wqe_data_seg *data; 594 struct mlx4_wqe_data_seg *data;
562 struct skb_frag_struct *frag;
563 struct mlx4_en_tx_info *tx_info; 595 struct mlx4_en_tx_info *tx_info;
564 struct ethhdr *ethh;
565 int tx_ind = 0; 596 int tx_ind = 0;
566 int nr_txbb; 597 int nr_txbb;
567 int desc_size; 598 int desc_size;
568 int real_size; 599 int real_size;
569 dma_addr_t dma;
570 u32 index, bf_index; 600 u32 index, bf_index;
571 __be32 op_own; 601 __be32 op_own;
572 u16 vlan_tag = 0; 602 u16 vlan_tag = 0;
@@ -642,6 +672,61 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
642 tx_info->skb = skb; 672 tx_info->skb = skb;
643 tx_info->nr_txbb = nr_txbb; 673 tx_info->nr_txbb = nr_txbb;
644 674
675 if (lso_header_size)
676 data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
677 DS_SIZE));
678 else
679 data = &tx_desc->data;
680
681 /* valid only for none inline segments */
682 tx_info->data_offset = (void *)data - (void *)tx_desc;
683
684 tx_info->linear = (lso_header_size < skb_headlen(skb) &&
685 !is_inline(skb, NULL)) ? 1 : 0;
686
687 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
688
689 if (is_inline(skb, &fragptr)) {
690 tx_info->inl = 1;
691 } else {
692 /* Map fragments */
693 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
694 struct skb_frag_struct *frag;
695 dma_addr_t dma;
696
697 frag = &skb_shinfo(skb)->frags[i];
698 dma = skb_frag_dma_map(ddev, frag,
699 0, skb_frag_size(frag),
700 DMA_TO_DEVICE);
701 if (dma_mapping_error(ddev, dma))
702 goto tx_drop_unmap;
703
704 data->addr = cpu_to_be64(dma);
705 data->lkey = cpu_to_be32(mdev->mr.key);
706 wmb();
707 data->byte_count = cpu_to_be32(skb_frag_size(frag));
708 --data;
709 }
710
711 /* Map linear part */
712 if (tx_info->linear) {
713 u32 byte_count = skb_headlen(skb) - lso_header_size;
714 dma_addr_t dma;
715
716 dma = dma_map_single(ddev, skb->data +
717 lso_header_size, byte_count,
718 PCI_DMA_TODEVICE);
719 if (dma_mapping_error(ddev, dma))
720 goto tx_drop_unmap;
721
722 data->addr = cpu_to_be64(dma);
723 data->lkey = cpu_to_be32(mdev->mr.key);
724 wmb();
725 data->byte_count = cpu_to_be32(byte_count);
726 }
727 tx_info->inl = 0;
728 }
729
645 /* 730 /*
646 * For timestamping add flag to skb_shinfo and 731 * For timestamping add flag to skb_shinfo and
647 * set flag for further reference 732 * set flag for further reference
@@ -666,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
666 } 751 }
667 752
668 if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) { 753 if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
754 struct ethhdr *ethh;
755
669 /* Copy dst mac address to wqe. This allows loopback in eSwitch, 756 /* Copy dst mac address to wqe. This allows loopback in eSwitch,
670 * so that VFs and PF can communicate with each other 757 * so that VFs and PF can communicate with each other
671 */ 758 */
@@ -688,8 +775,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
688 /* Copy headers; 775 /* Copy headers;
689 * note that we already verified that it is linear */ 776 * note that we already verified that it is linear */
690 memcpy(tx_desc->lso.header, skb->data, lso_header_size); 777 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
691 data = ((void *) &tx_desc->lso +
692 ALIGN(lso_header_size + 4, DS_SIZE));
693 778
694 priv->port_stats.tso_packets++; 779 priv->port_stats.tso_packets++;
695 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 780 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
@@ -701,7 +786,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
701 op_own = cpu_to_be32(MLX4_OPCODE_SEND) | 786 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
702 ((ring->prod & ring->size) ? 787 ((ring->prod & ring->size) ?
703 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 788 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
704 data = &tx_desc->data;
705 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 789 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
706 ring->packets++; 790 ring->packets++;
707 791
@@ -710,38 +794,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
710 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); 794 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
711 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 795 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
712 796
713 797 if (tx_info->inl) {
714 /* valid only for none inline segments */
715 tx_info->data_offset = (void *) data - (void *) tx_desc;
716
717 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
718 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
719
720 if (!is_inline(skb, &fragptr)) {
721 /* Map fragments */
722 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
723 frag = &skb_shinfo(skb)->frags[i];
724 dma = skb_frag_dma_map(priv->ddev, frag,
725 0, skb_frag_size(frag),
726 DMA_TO_DEVICE);
727 data->addr = cpu_to_be64(dma);
728 data->lkey = cpu_to_be32(mdev->mr.key);
729 wmb();
730 data->byte_count = cpu_to_be32(skb_frag_size(frag));
731 --data;
732 }
733
734 /* Map linear part */
735 if (tx_info->linear) {
736 dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
737 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
738 data->addr = cpu_to_be64(dma);
739 data->lkey = cpu_to_be32(mdev->mr.key);
740 wmb();
741 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
742 }
743 tx_info->inl = 0;
744 } else {
745 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); 798 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
746 tx_info->inl = 1; 799 tx_info->inl = 1;
747 } 800 }
@@ -781,6 +834,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
781 834
782 return NETDEV_TX_OK; 835 return NETDEV_TX_OK;
783 836
837tx_drop_unmap:
838 en_err(priv, "DMA mapping error\n");
839
840 for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
841 data++;
842 dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
843 be32_to_cpu(data->byte_count),
844 PCI_DMA_TODEVICE);
845 }
846
784tx_drop: 847tx_drop:
785 dev_kfree_skb_any(skb); 848 dev_kfree_skb_any(skb);
786 priv->stats.tx_dropped++; 849 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 7e042869ef0c..0416c5b3b35c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -79,6 +79,7 @@ enum {
79 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 79 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
80 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 80 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
81 (1ull << MLX4_EVENT_TYPE_CMD) | \ 81 (1ull << MLX4_EVENT_TYPE_CMD) | \
82 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ 83 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 84 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 85 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
@@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
629 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 630 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
630 break; 631 break;
631 632
633 case MLX4_EVENT_TYPE_OP_REQUIRED:
634 atomic_inc(&priv->opreq_count);
635 /* FW commands can't be executed from interrupt context
636 * working in deferred task
637 */
638 queue_work(mlx4_wq, &priv->opreq_task);
639 break;
640
632 case MLX4_EVENT_TYPE_COMM_CHANNEL: 641 case MLX4_EVENT_TYPE_COMM_CHANNEL:
633 if (!mlx4_is_master(dev)) { 642 if (!mlx4_is_master(dev)) {
634 mlx4_warn(dev, "Received comm channel event " 643 mlx4_warn(dev, "Received comm channel event "
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6fc6dabc78d5..0d63daa2f422 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1696,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1696 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1696 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1697} 1697}
1698EXPORT_SYMBOL_GPL(mlx4_wol_write); 1698EXPORT_SYMBOL_GPL(mlx4_wol_write);
1699
1700enum {
1701 ADD_TO_MCG = 0x26,
1702};
1703
1704
1705void mlx4_opreq_action(struct work_struct *work)
1706{
1707 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
1708 opreq_task);
1709 struct mlx4_dev *dev = &priv->dev;
1710 int num_tasks = atomic_read(&priv->opreq_count);
1711 struct mlx4_cmd_mailbox *mailbox;
1712 struct mlx4_mgm *mgm;
1713 u32 *outbox;
1714 u32 modifier;
1715 u16 token;
1716 u16 type_m;
1717 u16 type;
1718 int err;
1719 u32 num_qps;
1720 struct mlx4_qp qp;
1721 int i;
1722 u8 rem_mcg;
1723 u8 prot;
1724
1725#define GET_OP_REQ_MODIFIER_OFFSET 0x08
1726#define GET_OP_REQ_TOKEN_OFFSET 0x14
1727#define GET_OP_REQ_TYPE_OFFSET 0x1a
1728#define GET_OP_REQ_DATA_OFFSET 0x20
1729
1730 mailbox = mlx4_alloc_cmd_mailbox(dev);
1731 if (IS_ERR(mailbox)) {
1732 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1733 return;
1734 }
1735 outbox = mailbox->buf;
1736
1737 while (num_tasks) {
1738 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1739 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1740 MLX4_CMD_NATIVE);
1741 if (err) {
1742 mlx4_err(dev, "Failed to retreive required operation: %d\n",
1743 err);
1744 return;
1745 }
1746 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1747 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1748 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1749 type_m = type >> 12;
1750 type &= 0xfff;
1751
1752 switch (type) {
1753 case ADD_TO_MCG:
1754 if (dev->caps.steering_mode ==
1755 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1756 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1757 err = EPERM;
1758 break;
1759 }
1760 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
1761 GET_OP_REQ_DATA_OFFSET);
1762 num_qps = be32_to_cpu(mgm->members_count) &
1763 MGM_QPN_MASK;
1764 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
1765 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
1766
1767 for (i = 0; i < num_qps; i++) {
1768 qp.qpn = be32_to_cpu(mgm->qp[i]);
1769 if (rem_mcg)
1770 err = mlx4_multicast_detach(dev, &qp,
1771 mgm->gid,
1772 prot, 0);
1773 else
1774 err = mlx4_multicast_attach(dev, &qp,
1775 mgm->gid,
1776 mgm->gid[5]
1777 , 0, prot,
1778 NULL);
1779 if (err)
1780 break;
1781 }
1782 break;
1783 default:
1784 mlx4_warn(dev, "Bad type for required operation\n");
1785 err = EINVAL;
1786 break;
1787 }
1788 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
1789 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1790 MLX4_CMD_NATIVE);
1791 if (err) {
1792 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
1793 err);
1794 goto out;
1795 }
1796 memset(outbox, 0, 0xffc);
1797 num_tasks = atomic_dec_return(&priv->opreq_count);
1798 }
1799
1800out:
1801 mlx4_free_cmd_mailbox(dev, mailbox);
1802}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index fdf41665a059..a0a368b7c939 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
220int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); 220int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
221int mlx4_NOP(struct mlx4_dev *dev); 221int mlx4_NOP(struct mlx4_dev *dev);
222int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); 222int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
223void mlx4_opreq_action(struct work_struct *work);
223 224
224#endif /* MLX4_FW_H */ 225#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 36be3208786a..60c9f4f103fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1692 goto err_xrcd_table_free; 1692 goto err_xrcd_table_free;
1693 } 1693 }
1694 1694
1695 if (!mlx4_is_slave(dev)) {
1696 err = mlx4_init_mcg_table(dev);
1697 if (err) {
1698 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
1699 goto err_mr_table_free;
1700 }
1701 }
1702
1695 err = mlx4_init_eq_table(dev); 1703 err = mlx4_init_eq_table(dev);
1696 if (err) { 1704 if (err) {
1697 mlx4_err(dev, "Failed to initialize " 1705 mlx4_err(dev, "Failed to initialize "
1698 "event queue table, aborting.\n"); 1706 "event queue table, aborting.\n");
1699 goto err_mr_table_free; 1707 goto err_mcg_table_free;
1700 } 1708 }
1701 1709
1702 err = mlx4_cmd_use_events(dev); 1710 err = mlx4_cmd_use_events(dev);
@@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1746 goto err_srq_table_free; 1754 goto err_srq_table_free;
1747 } 1755 }
1748 1756
1749 if (!mlx4_is_slave(dev)) {
1750 err = mlx4_init_mcg_table(dev);
1751 if (err) {
1752 mlx4_err(dev, "Failed to initialize "
1753 "multicast group table, aborting.\n");
1754 goto err_qp_table_free;
1755 }
1756 }
1757
1758 err = mlx4_init_counters_table(dev); 1757 err = mlx4_init_counters_table(dev);
1759 if (err && err != -ENOENT) { 1758 if (err && err != -ENOENT) {
1760 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1759 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1761 goto err_mcg_table_free; 1760 goto err_qp_table_free;
1762 } 1761 }
1763 1762
1764 if (!mlx4_is_slave(dev)) { 1763 if (!mlx4_is_slave(dev)) {
@@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1803err_counters_table_free: 1802err_counters_table_free:
1804 mlx4_cleanup_counters_table(dev); 1803 mlx4_cleanup_counters_table(dev);
1805 1804
1806err_mcg_table_free:
1807 mlx4_cleanup_mcg_table(dev);
1808
1809err_qp_table_free: 1805err_qp_table_free:
1810 mlx4_cleanup_qp_table(dev); 1806 mlx4_cleanup_qp_table(dev);
1811 1807
@@ -1821,6 +1817,10 @@ err_cmd_poll:
1821err_eq_table_free: 1817err_eq_table_free:
1822 mlx4_cleanup_eq_table(dev); 1818 mlx4_cleanup_eq_table(dev);
1823 1819
1820err_mcg_table_free:
1821 if (!mlx4_is_slave(dev))
1822 mlx4_cleanup_mcg_table(dev);
1823
1824err_mr_table_free: 1824err_mr_table_free:
1825 mlx4_cleanup_mr_table(dev); 1825 mlx4_cleanup_mr_table(dev);
1826 1826
@@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2197 } 2197 }
2198 } 2198 }
2199 2199
2200 atomic_set(&priv->opreq_count, 0);
2201 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2202
2200 /* 2203 /*
2201 * Now reset the HCA before we touch the PCI capabilities or 2204 * Now reset the HCA before we touch the PCI capabilities or
2202 * attempt a firmware command, since a boot ROM may have left 2205 * attempt a firmware command, since a boot ROM may have left
@@ -2315,12 +2318,12 @@ err_port:
2315 mlx4_cleanup_port_info(&priv->port[port]); 2318 mlx4_cleanup_port_info(&priv->port[port]);
2316 2319
2317 mlx4_cleanup_counters_table(dev); 2320 mlx4_cleanup_counters_table(dev);
2318 mlx4_cleanup_mcg_table(dev);
2319 mlx4_cleanup_qp_table(dev); 2321 mlx4_cleanup_qp_table(dev);
2320 mlx4_cleanup_srq_table(dev); 2322 mlx4_cleanup_srq_table(dev);
2321 mlx4_cleanup_cq_table(dev); 2323 mlx4_cleanup_cq_table(dev);
2322 mlx4_cmd_use_polling(dev); 2324 mlx4_cmd_use_polling(dev);
2323 mlx4_cleanup_eq_table(dev); 2325 mlx4_cleanup_eq_table(dev);
2326 mlx4_cleanup_mcg_table(dev);
2324 mlx4_cleanup_mr_table(dev); 2327 mlx4_cleanup_mr_table(dev);
2325 mlx4_cleanup_xrcd_table(dev); 2328 mlx4_cleanup_xrcd_table(dev);
2326 mlx4_cleanup_pd_table(dev); 2329 mlx4_cleanup_pd_table(dev);
@@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2403 RES_TR_FREE_SLAVES_ONLY); 2406 RES_TR_FREE_SLAVES_ONLY);
2404 2407
2405 mlx4_cleanup_counters_table(dev); 2408 mlx4_cleanup_counters_table(dev);
2406 mlx4_cleanup_mcg_table(dev);
2407 mlx4_cleanup_qp_table(dev); 2409 mlx4_cleanup_qp_table(dev);
2408 mlx4_cleanup_srq_table(dev); 2410 mlx4_cleanup_srq_table(dev);
2409 mlx4_cleanup_cq_table(dev); 2411 mlx4_cleanup_cq_table(dev);
2410 mlx4_cmd_use_polling(dev); 2412 mlx4_cmd_use_polling(dev);
2411 mlx4_cleanup_eq_table(dev); 2413 mlx4_cleanup_eq_table(dev);
2414 mlx4_cleanup_mcg_table(dev);
2412 mlx4_cleanup_mr_table(dev); 2415 mlx4_cleanup_mr_table(dev);
2413 mlx4_cleanup_xrcd_table(dev); 2416 mlx4_cleanup_xrcd_table(dev);
2414 mlx4_cleanup_pd_table(dev); 2417 mlx4_cleanup_pd_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f3e804f2a35f..55f6245efb6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,19 +39,8 @@
39 39
40#include "mlx4.h" 40#include "mlx4.h"
41 41
42#define MGM_QPN_MASK 0x00FFFFFF
43#define MGM_BLCK_LB_BIT 30
44
45static const u8 zero_gid[16]; /* automatically initialized to 0 */ 42static const u8 zero_gid[16]; /* automatically initialized to 0 */
46 43
47struct mlx4_mgm {
48 __be32 next_gid_index;
49 __be32 members_count;
50 u32 reserved[2];
51 u8 gid[16];
52 __be32 qp[MLX4_MAX_QP_PER_MGM];
53};
54
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 44int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 45{
57 return 1 << dev->oper_log_mgm_entry_size; 46 return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 17d9277e33ef..348bb8c7d9a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -554,6 +554,17 @@ struct mlx4_mfunc {
554 struct mlx4_mfunc_master_ctx master; 554 struct mlx4_mfunc_master_ctx master;
555}; 555};
556 556
557#define MGM_QPN_MASK 0x00FFFFFF
558#define MGM_BLCK_LB_BIT 30
559
560struct mlx4_mgm {
561 __be32 next_gid_index;
562 __be32 members_count;
563 u32 reserved[2];
564 u8 gid[16];
565 __be32 qp[MLX4_MAX_QP_PER_MGM];
566};
567
557struct mlx4_cmd { 568struct mlx4_cmd {
558 struct pci_pool *pool; 569 struct pci_pool *pool;
559 void __iomem *hcr; 570 void __iomem *hcr;
@@ -802,6 +813,8 @@ struct mlx4_priv {
802 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 813 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
803 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 814 __be64 slave_node_guids[MLX4_MFUNC_MAX];
804 815
816 atomic_t opreq_count;
817 struct work_struct opreq_task;
805}; 818};
806 819
807static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 820static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index e393d998be89..0951f7aca1ef 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
705 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); 705 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
706} 706}
707 707
708void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) 708static void ks8842_handle_rx(struct net_device *netdev,
709 struct ks8842_adapter *adapter)
709{ 710{
710 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 711 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
711 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); 712 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
@@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
715 } 716 }
716} 717}
717 718
718void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) 719static void ks8842_handle_tx(struct net_device *netdev,
720 struct ks8842_adapter *adapter)
719{ 721{
720 u16 sr = ks8842_read16(adapter, 16, REG_TXSR); 722 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
721 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); 723 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
@@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
724 netif_wake_queue(netdev); 726 netif_wake_queue(netdev);
725} 727}
726 728
727void ks8842_handle_rx_overrun(struct net_device *netdev, 729static void ks8842_handle_rx_overrun(struct net_device *netdev,
728 struct ks8842_adapter *adapter) 730 struct ks8842_adapter *adapter)
729{ 731{
730 netdev_dbg(netdev, "%s: entry\n", __func__); 732 netdev_dbg(netdev, "%s: entry\n", __func__);
@@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev,
732 netdev->stats.rx_fifo_errors++; 734 netdev->stats.rx_fifo_errors++;
733} 735}
734 736
735void ks8842_tasklet(unsigned long arg) 737static void ks8842_tasklet(unsigned long arg)
736{ 738{
737 struct net_device *netdev = (struct net_device *)arg; 739 struct net_device *netdev = (struct net_device *)arg;
738 struct ks8842_adapter *adapter = netdev_priv(netdev); 740 struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1146,7 +1148,7 @@ static int ks8842_probe(struct platform_device *pdev)
1146 struct resource *iomem; 1148 struct resource *iomem;
1147 struct net_device *netdev; 1149 struct net_device *netdev;
1148 struct ks8842_adapter *adapter; 1150 struct ks8842_adapter *adapter;
1149 struct ks8842_platform_data *pdata = pdev->dev.platform_data; 1151 struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1150 u16 id; 1152 u16 id;
1151 unsigned i; 1153 unsigned i;
1152 1154
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ac20098b542a..0fba1532d326 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
688} 688}
689 689
690 690
691void ks_enable_qmu(struct ks_net *ks) 691static void ks_enable_qmu(struct ks_net *ks)
692{ 692{
693 u16 w; 693 u16 w;
694 694
@@ -1636,7 +1636,7 @@ static int ks8851_probe(struct platform_device *pdev)
1636 } else { 1636 } else {
1637 struct ks8851_mll_platform_data *pdata; 1637 struct ks8851_mll_platform_data *pdata;
1638 1638
1639 pdata = pdev->dev.platform_data; 1639 pdata = dev_get_platdata(&pdev->dev);
1640 if (!pdata) { 1640 if (!pdata) {
1641 netdev_err(netdev, "No platform data\n"); 1641 netdev_err(netdev, "No platform data\n");
1642 err = -ENODEV; 1642 err = -ENODEV;
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
new file mode 100644
index 000000000000..1731e050fa27
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -0,0 +1,30 @@
1#
2# MOXART device configuration
3#
4
5config NET_VENDOR_MOXART
6 bool "MOXA ART devices"
7 default y
8 depends on (ARM && ARCH_MOXART)
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about MOXA ART devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_MOXART
20
21config ARM_MOXART_ETHER
22 tristate "MOXART Ethernet support"
23 depends on ARM && ARCH_MOXART
24 select NET_CORE
25 ---help---
26 If you wish to compile a kernel for a hardware with MOXA ART SoC and
27 want to use the internal ethernet then you should answer Y to this.
28
29
30endif # NET_VENDOR_MOXART
diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile
new file mode 100644
index 000000000000..aa3c73e9e952
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the MOXART network device drivers.
3#
4
5obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
new file mode 100644
index 000000000000..83c2091c9c23
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -0,0 +1,559 @@
1/* MOXA ART Ethernet (RTL8201CP) driver.
2 *
3 * Copyright (C) 2013 Jonas Jensen
4 *
5 * Jonas Jensen <jonas.jensen@gmail.com>
6 *
7 * Based on code from
8 * Moxa Technology Co., Ltd. <www.moxa.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/skbuff.h>
20#include <linux/dma-mapping.h>
21#include <linux/ethtool.h>
22#include <linux/platform_device.h>
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/crc32.h>
28#include <linux/crc32c.h>
29#include <linux/dma-mapping.h>
30
31#include "moxart_ether.h"
32
33static inline void moxart_emac_write(struct net_device *ndev,
34 unsigned int reg, unsigned long value)
35{
36 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
37
38 writel(value, priv->base + reg);
39}
40
41static void moxart_update_mac_address(struct net_device *ndev)
42{
43 moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
44 ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
45 moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
46 ((ndev->dev_addr[2] << 24) |
47 (ndev->dev_addr[3] << 16) |
48 (ndev->dev_addr[4] << 8) |
49 (ndev->dev_addr[5])));
50}
51
52static int moxart_set_mac_address(struct net_device *ndev, void *addr)
53{
54 struct sockaddr *address = addr;
55
56 if (!is_valid_ether_addr(address->sa_data))
57 return -EADDRNOTAVAIL;
58
59 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
60 moxart_update_mac_address(ndev);
61
62 return 0;
63}
64
65static void moxart_mac_free_memory(struct net_device *ndev)
66{
67 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
68 int i;
69
70 for (i = 0; i < RX_DESC_NUM; i++)
71 dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
72 priv->rx_buf_size, DMA_FROM_DEVICE);
73
74 if (priv->tx_desc_base)
75 dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
76 priv->tx_desc_base, priv->tx_base);
77
78 if (priv->rx_desc_base)
79 dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
80 priv->rx_desc_base, priv->rx_base);
81
82 kfree(priv->tx_buf_base);
83 kfree(priv->rx_buf_base);
84}
85
86static void moxart_mac_reset(struct net_device *ndev)
87{
88 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
89
90 writel(SW_RST, priv->base + REG_MAC_CTRL);
91 while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
92 mdelay(10);
93
94 writel(0, priv->base + REG_INTERRUPT_MASK);
95
96 priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
97}
98
99static void moxart_mac_enable(struct net_device *ndev)
100{
101 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
102
103 writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
104 writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
105 writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
106
107 priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
108 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
109
110 priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
111 writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
112}
113
114static void moxart_mac_setup_desc_ring(struct net_device *ndev)
115{
116 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
117 void __iomem *desc;
118 int i;
119
120 for (i = 0; i < TX_DESC_NUM; i++) {
121 desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
122 memset(desc, 0, TX_REG_DESC_SIZE);
123
124 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
125 }
126 writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
127
128 priv->tx_head = 0;
129 priv->tx_tail = 0;
130
131 for (i = 0; i < RX_DESC_NUM; i++) {
132 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
133 memset(desc, 0, RX_REG_DESC_SIZE);
134 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
135 writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
136 desc + RX_REG_OFFSET_DESC1);
137
138 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
139 priv->rx_mapping[i] = dma_map_single(&ndev->dev,
140 priv->rx_buf[i],
141 priv->rx_buf_size,
142 DMA_FROM_DEVICE);
143 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
144 netdev_err(ndev, "DMA mapping error\n");
145
146 writel(priv->rx_mapping[i],
147 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
148 writel(priv->rx_buf[i],
149 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
150 }
151 writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
152
153 priv->rx_head = 0;
154
155 /* reset the MAC controler TX/RX desciptor base address */
156 writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
157 writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
158}
159
160static int moxart_mac_open(struct net_device *ndev)
161{
162 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
163
164 if (!is_valid_ether_addr(ndev->dev_addr))
165 return -EADDRNOTAVAIL;
166
167 napi_enable(&priv->napi);
168
169 moxart_mac_reset(ndev);
170 moxart_update_mac_address(ndev);
171 moxart_mac_setup_desc_ring(ndev);
172 moxart_mac_enable(ndev);
173 netif_start_queue(ndev);
174
175 netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
176 __func__, readl(priv->base + REG_INTERRUPT_MASK),
177 readl(priv->base + REG_MAC_CTRL));
178
179 return 0;
180}
181
182static int moxart_mac_stop(struct net_device *ndev)
183{
184 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
185
186 napi_disable(&priv->napi);
187
188 netif_stop_queue(ndev);
189
190 /* disable all interrupts */
191 writel(0, priv->base + REG_INTERRUPT_MASK);
192
193 /* disable all functions */
194 writel(0, priv->base + REG_MAC_CTRL);
195
196 return 0;
197}
198
199static int moxart_rx_poll(struct napi_struct *napi, int budget)
200{
201 struct moxart_mac_priv_t *priv = container_of(napi,
202 struct moxart_mac_priv_t,
203 napi);
204 struct net_device *ndev = priv->ndev;
205 struct sk_buff *skb;
206 void __iomem *desc;
207 unsigned int desc0, len;
208 int rx_head = priv->rx_head;
209 int rx = 0;
210
211 while (1) {
212 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
213 desc0 = readl(desc + RX_REG_OFFSET_DESC0);
214
215 if (desc0 & RX_DESC0_DMA_OWN)
216 break;
217
218 if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
219 RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
220 net_dbg_ratelimited("packet error\n");
221 priv->stats.rx_dropped++;
222 priv->stats.rx_errors++;
223 continue;
224 }
225
226 len = desc0 & RX_DESC0_FRAME_LEN_MASK;
227
228 if (len > RX_BUF_SIZE)
229 len = RX_BUF_SIZE;
230
231 skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
232 if (unlikely(!skb)) {
233 net_dbg_ratelimited("build_skb failed\n");
234 priv->stats.rx_dropped++;
235 priv->stats.rx_errors++;
236 }
237
238 skb_put(skb, len);
239 skb->protocol = eth_type_trans(skb, ndev);
240 napi_gro_receive(&priv->napi, skb);
241 rx++;
242
243 ndev->last_rx = jiffies;
244 priv->stats.rx_packets++;
245 priv->stats.rx_bytes += len;
246 if (desc0 & RX_DESC0_MULTICAST)
247 priv->stats.multicast++;
248
249 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
250
251 rx_head = RX_NEXT(rx_head);
252 priv->rx_head = rx_head;
253
254 if (rx >= budget)
255 break;
256 }
257
258 if (rx < budget) {
259 napi_gro_flush(napi, false);
260 __napi_complete(napi);
261 }
262
263 priv->reg_imr |= RPKT_FINISH_M;
264 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
265
266 return rx;
267}
268
269static void moxart_tx_finished(struct net_device *ndev)
270{
271 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
272 unsigned tx_head = priv->tx_head;
273 unsigned tx_tail = priv->tx_tail;
274
275 while (tx_tail != tx_head) {
276 dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
277 priv->tx_len[tx_tail], DMA_TO_DEVICE);
278
279 priv->stats.tx_packets++;
280 priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
281
282 dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
283 priv->tx_skb[tx_tail] = NULL;
284
285 tx_tail = TX_NEXT(tx_tail);
286 }
287 priv->tx_tail = tx_tail;
288}
289
290static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
291{
292 struct net_device *ndev = (struct net_device *) dev_id;
293 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
294 unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
295
296 if (ists & XPKT_OK_INT_STS)
297 moxart_tx_finished(ndev);
298
299 if (ists & RPKT_FINISH) {
300 if (napi_schedule_prep(&priv->napi)) {
301 priv->reg_imr &= ~RPKT_FINISH_M;
302 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
303 __napi_schedule(&priv->napi);
304 }
305 }
306
307 return IRQ_HANDLED;
308}
309
310static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
311{
312 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
313 void __iomem *desc;
314 unsigned int len;
315 unsigned int tx_head = priv->tx_head;
316 u32 txdes1;
317 int ret = NETDEV_TX_BUSY;
318
319 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
320
321 spin_lock_irq(&priv->txlock);
322 if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
323 net_dbg_ratelimited("no TX space for packet\n");
324 priv->stats.tx_dropped++;
325 goto out_unlock;
326 }
327
328 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
329
330 priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
331 len, DMA_TO_DEVICE);
332 if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
333 netdev_err(ndev, "DMA mapping error\n");
334 goto out_unlock;
335 }
336
337 priv->tx_len[tx_head] = len;
338 priv->tx_skb[tx_head] = skb;
339
340 writel(priv->tx_mapping[tx_head],
341 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
342 writel(skb->data,
343 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
344
345 if (skb->len < ETH_ZLEN) {
346 memset(&skb->data[skb->len],
347 0, ETH_ZLEN - skb->len);
348 len = ETH_ZLEN;
349 }
350
351 txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
352 txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
353 txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
354 txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
355 writel(txdes1, desc + TX_REG_OFFSET_DESC1);
356 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
357
358 /* start to send packet */
359 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
360
361 priv->tx_head = TX_NEXT(tx_head);
362
363 ndev->trans_start = jiffies;
364 ret = NETDEV_TX_OK;
365out_unlock:
366 spin_unlock_irq(&priv->txlock);
367
368 return ret;
369}
370
371static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
372{
373 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
374
375 return &priv->stats;
376}
377
378static void moxart_mac_setmulticast(struct net_device *ndev)
379{
380 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
381 struct netdev_hw_addr *ha;
382 int crc_val;
383
384 netdev_for_each_mc_addr(ha, ndev) {
385 crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
386 crc_val = (crc_val >> 26) & 0x3f;
387 if (crc_val >= 32) {
388 writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
389 (1UL << (crc_val - 32)),
390 priv->base + REG_MCAST_HASH_TABLE1);
391 } else {
392 writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
393 (1UL << crc_val),
394 priv->base + REG_MCAST_HASH_TABLE0);
395 }
396 }
397}
398
399static void moxart_mac_set_rx_mode(struct net_device *ndev)
400{
401 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
402
403 spin_lock_irq(&priv->txlock);
404
405 (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
406 (priv->reg_maccr &= ~RCV_ALL);
407
408 (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
409 (priv->reg_maccr &= ~RX_MULTIPKT);
410
411 if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
412 priv->reg_maccr |= HT_MULTI_EN;
413 moxart_mac_setmulticast(ndev);
414 } else {
415 priv->reg_maccr &= ~HT_MULTI_EN;
416 }
417
418 writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
419
420 spin_unlock_irq(&priv->txlock);
421}
422
423static struct net_device_ops moxart_netdev_ops = {
424 .ndo_open = moxart_mac_open,
425 .ndo_stop = moxart_mac_stop,
426 .ndo_start_xmit = moxart_mac_start_xmit,
427 .ndo_get_stats = moxart_mac_get_stats,
428 .ndo_set_rx_mode = moxart_mac_set_rx_mode,
429 .ndo_set_mac_address = moxart_set_mac_address,
430 .ndo_validate_addr = eth_validate_addr,
431 .ndo_change_mtu = eth_change_mtu,
432};
433
434static int moxart_mac_probe(struct platform_device *pdev)
435{
436 struct device *p_dev = &pdev->dev;
437 struct device_node *node = p_dev->of_node;
438 struct net_device *ndev;
439 struct moxart_mac_priv_t *priv;
440 struct resource *res;
441 unsigned int irq;
442 int ret;
443
444 ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
445 if (!ndev)
446 return -ENOMEM;
447
448 irq = irq_of_parse_and_map(node, 0);
449 if (irq <= 0) {
450 netdev_err(ndev, "irq_of_parse_and_map failed\n");
451 return -EINVAL;
452 }
453
454 priv = netdev_priv(ndev);
455 priv->ndev = ndev;
456
457 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
458 ndev->base_addr = res->start;
459 priv->base = devm_ioremap_resource(p_dev, res);
460 ret = IS_ERR(priv->base);
461 if (ret) {
462 dev_err(p_dev, "devm_ioremap_resource failed\n");
463 goto init_fail;
464 }
465
466 spin_lock_init(&priv->txlock);
467
468 priv->tx_buf_size = TX_BUF_SIZE;
469 priv->rx_buf_size = RX_BUF_SIZE +
470 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
471
472 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
473 TX_DESC_NUM, &priv->tx_base,
474 GFP_DMA | GFP_KERNEL);
475 if (priv->tx_desc_base == NULL)
476 goto init_fail;
477
478 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
479 RX_DESC_NUM, &priv->rx_base,
480 GFP_DMA | GFP_KERNEL);
481 if (priv->rx_desc_base == NULL)
482 goto init_fail;
483
484 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
485 GFP_ATOMIC);
486 if (!priv->tx_buf_base)
487 goto init_fail;
488
489 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
490 GFP_ATOMIC);
491 if (!priv->rx_buf_base)
492 goto init_fail;
493
494 platform_set_drvdata(pdev, ndev);
495
496 ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
497 pdev->name, ndev);
498 if (ret) {
499 netdev_err(ndev, "devm_request_irq failed\n");
500 goto init_fail;
501 }
502
503 ether_setup(ndev);
504 ndev->netdev_ops = &moxart_netdev_ops;
505 netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
506 ndev->priv_flags |= IFF_UNICAST_FLT;
507 ndev->irq = irq;
508
509 SET_NETDEV_DEV(ndev, &pdev->dev);
510
511 ret = register_netdev(ndev);
512 if (ret) {
513 free_netdev(ndev);
514 goto init_fail;
515 }
516
517 netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
518 __func__, ndev->irq, ndev->dev_addr);
519
520 return 0;
521
522init_fail:
523 netdev_err(ndev, "init failed\n");
524 moxart_mac_free_memory(ndev);
525
526 return ret;
527}
528
529static int moxart_remove(struct platform_device *pdev)
530{
531 struct net_device *ndev = platform_get_drvdata(pdev);
532
533 unregister_netdev(ndev);
534 free_irq(ndev->irq, ndev);
535 moxart_mac_free_memory(ndev);
536 free_netdev(ndev);
537
538 return 0;
539}
540
541static const struct of_device_id moxart_mac_match[] = {
542 { .compatible = "moxa,moxart-mac" },
543 { }
544};
545
546struct __initdata platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe,
548 .remove = moxart_remove,
549 .driver = {
550 .name = "moxart-ethernet",
551 .owner = THIS_MODULE,
552 .of_match_table = moxart_mac_match,
553 },
554};
555module_platform_driver(moxart_mac_driver);
556
557MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
558MODULE_LICENSE("GPL v2");
559MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
new file mode 100644
index 000000000000..2be9280d608c
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -0,0 +1,330 @@
1/* MOXA ART Ethernet (RTL8201CP) driver.
2 *
3 * Copyright (C) 2013 Jonas Jensen
4 *
5 * Jonas Jensen <jonas.jensen@gmail.com>
6 *
7 * Based on code from
8 * Moxa Technology Co., Ltd. <www.moxa.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#ifndef _MOXART_ETHERNET_H
16#define _MOXART_ETHERNET_H
17
18#define TX_REG_OFFSET_DESC0 0
19#define TX_REG_OFFSET_DESC1 4
20#define TX_REG_OFFSET_DESC2 8
21#define TX_REG_DESC_SIZE 16
22
23#define RX_REG_OFFSET_DESC0 0
24#define RX_REG_OFFSET_DESC1 4
25#define RX_REG_OFFSET_DESC2 8
26#define RX_REG_DESC_SIZE 16
27
28#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */
29#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */
30#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */
31#define TX_DESC1_BUF_SIZE_MASK 0x7ff
32#define TX_DESC1_LTS 0x8000000 /* last TX packet */
33#define TX_DESC1_FTS 0x10000000 /* first TX packet */
34#define TX_DESC1_FIFO_COMPLETE 0x20000000
35#define TX_DESC1_INTR_COMPLETE 0x40000000
36#define TX_DESC1_END 0x80000000
37#define TX_DESC2_ADDRESS_PHYS 0
38#define TX_DESC2_ADDRESS_VIRT 4
39
40#define RX_DESC0_FRAME_LEN 0
41#define RX_DESC0_FRAME_LEN_MASK 0x7FF
42#define RX_DESC0_MULTICAST 0x10000
43#define RX_DESC0_BROADCAST 0x20000
44#define RX_DESC0_ERR 0x40000
45#define RX_DESC0_CRC_ERR 0x80000
46#define RX_DESC0_FTL 0x100000
47#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */
48#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */
49#define RX_DESC0_LRS 0x10000000 /* last receive segment */
50#define RX_DESC0_FRS 0x20000000 /* first receive segment */
51#define RX_DESC0_DMA_OWN 0x80000000
52#define RX_DESC1_BUF_SIZE_MASK 0x7FF
53#define RX_DESC1_END 0x80000000
54#define RX_DESC2_ADDRESS_PHYS 0
55#define RX_DESC2_ADDRESS_VIRT 4
56
57#define TX_DESC_NUM 64
58#define TX_DESC_NUM_MASK (TX_DESC_NUM-1)
59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
60#define TX_BUF_SIZE 1600
61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
62
63#define RX_DESC_NUM 64
64#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
65#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK))
66#define RX_BUF_SIZE 1600
67#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1)
68
69#define REG_INTERRUPT_STATUS 0
70#define REG_INTERRUPT_MASK 4
71#define REG_MAC_MS_ADDRESS 8
72#define REG_MAC_LS_ADDRESS 12
73#define REG_MCAST_HASH_TABLE0 16
74#define REG_MCAST_HASH_TABLE1 20
75#define REG_TX_POLL_DEMAND 24
76#define REG_RX_POLL_DEMAND 28
77#define REG_TXR_BASE_ADDRESS 32
78#define REG_RXR_BASE_ADDRESS 36
79#define REG_INT_TIMER_CTRL 40
80#define REG_APOLL_TIMER_CTRL 44
81#define REG_DMA_BLEN_CTRL 48
82#define REG_RESERVED1 52
83#define REG_MAC_CTRL 136
84#define REG_MAC_STATUS 140
85#define REG_PHY_CTRL 144
86#define REG_PHY_WRITE_DATA 148
87#define REG_FLOW_CTRL 152
88#define REG_BACK_PRESSURE 156
89#define REG_RESERVED2 160
90#define REG_TEST_SEED 196
91#define REG_DMA_FIFO_STATE 200
92#define REG_TEST_MODE 204
93#define REG_RESERVED3 208
94#define REG_TX_COL_COUNTER 212
95#define REG_RPF_AEP_COUNTER 216
96#define REG_XM_PG_COUNTER 220
97#define REG_RUNT_TLC_COUNTER 224
98#define REG_CRC_FTL_COUNTER 228
99#define REG_RLC_RCC_COUNTER 232
100#define REG_BROC_COUNTER 236
101#define REG_MULCA_COUNTER 240
102#define REG_RP_COUNTER 244
103#define REG_XP_COUNTER 248
104
105#define REG_PHY_CTRL_OFFSET 0x0
106#define REG_PHY_STATUS 0x1
107#define REG_PHY_ID1 0x2
108#define REG_PHY_ID2 0x3
109#define REG_PHY_ANA 0x4
110#define REG_PHY_ANLPAR 0x5
111#define REG_PHY_ANE 0x6
112#define REG_PHY_ECTRL1 0x10
113#define REG_PHY_QPDS 0x11
114#define REG_PHY_10BOP 0x12
115#define REG_PHY_ECTRL2 0x13
116#define REG_PHY_FTMAC100_WRITE 0x8000000
117#define REG_PHY_FTMAC100_READ 0x4000000
118
119/* REG_INTERRUPT_STATUS */
120#define RPKT_FINISH BIT(0) /* DMA data received */
121#define NORXBUF BIT(1) /* receive buffer unavailable */
122#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */
123#define NOTXBUF BIT(3) /* transmit buffer unavailable */
124#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */
125#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */
126#define RPKT_SAV BIT(6) /* FIFO receive success */
127#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */
128#define AHB_ERR BIT(8) /* AHB error */
129#define PHYSTS_CHG BIT(9) /* PHY link status change */
130
131/* REG_INTERRUPT_MASK */
132#define RPKT_FINISH_M BIT(0)
133#define NORXBUF_M BIT(1)
134#define XPKT_FINISH_M BIT(2)
135#define NOTXBUF_M BIT(3)
136#define XPKT_OK_M BIT(4)
137#define XPKT_LOST_M BIT(5)
138#define RPKT_SAV_M BIT(6)
139#define RPKT_LOST_M BIT(7)
140#define AHB_ERR_M BIT(8)
141#define PHYSTS_CHG_M BIT(9)
142
143/* REG_MAC_MS_ADDRESS */
144#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */
145
146/* REG_INT_TIMER_CTRL */
147#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */
148#define TXINT_THR_MASK 0x7000
149#define TXINT_CNT_MASK 0xf00
150#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */
151#define RXINT_THR_MASK 0x70
152#define RXINT_CNT_MASK 0xF
153
154/* REG_APOLL_TIMER_CTRL */
155#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */
156#define TXPOLL_CNT_MASK 0xf00
157#define TXPOLL_CNT_SHIFT_BIT 8
158#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */
159#define RXPOLL_CNT_MASK 0xF
160#define RXPOLL_CNT_SHIFT_BIT 0
161
162/* REG_DMA_BLEN_CTRL */
163#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */
164#define RXFIFO_HTHR_MASK 0x1c0
165#define RXFIFO_LTHR_MASK 0x38
166#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */
167#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */
168#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */
169
170/* REG_MAC_CTRL */
171#define RX_BROADPKT BIT(17) /* receive broadcast packets */
172#define RX_MULTIPKT BIT(16) /* receive all multicast packets */
173#define FULLDUP BIT(15) /* full duplex */
174#define CRC_APD BIT(14) /* append CRC to transmitted packet */
175#define RCV_ALL BIT(12) /* ignore incoming packet destination */
176#define RX_FTL BIT(11) /* accept packets larger than 1518 B */
177#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */
178#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */
179#define RCV_EN BIT(8) /* receiver enable */
180#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */
181#define XMT_EN BIT(5) /* transmit enable */
182#define CRC_DIS BIT(4) /* disable CRC check when receiving */
183#define LOOP_EN BIT(3) /* internal loop-back */
184#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */
185#define RDMA_EN BIT(1) /* enable receive DMA chan */
186#define XDMA_EN BIT(0) /* enable transmit DMA chan */
187
188/* REG_MAC_STATUS */
189#define COL_EXCEED BIT(11) /* more than 16 collisions */
190#define LATE_COL BIT(10) /* transmit late collision detected */
191#define XPKT_LOST BIT(9) /* transmit to ethernet lost */
192#define XPKT_OK BIT(8) /* transmit to ethernet success */
193#define RUNT_MAC_STS BIT(7) /* receive runt detected */
194#define FTL_MAC_STS BIT(6) /* receive frame too long detected */
195#define CRC_ERR_MAC_STS BIT(5)
196#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */
197#define RPKT_SAVE BIT(3) /* RX FIFO receive success */
198#define COL BIT(2) /* collision, incoming packet dropped */
199#define MCPU_BROADCAST BIT(1)
200#define MCPU_MULTICAST BIT(0)
201
202/* REG_PHY_CTRL */
203#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
204#define MIIRD BIT(26)
205#define REGAD_MASK 0x3e00000
206#define PHYAD_MASK 0x1f0000
207#define MIIRDATA_MASK 0xffff
208
209/* REG_PHY_WRITE_DATA */
210#define MIIWDATA_MASK 0xffff
211
212/* REG_FLOW_CTRL */
213#define PAUSE_TIME_MASK 0xffff0000
214#define FC_HIGH_MASK 0xf000
215#define FC_LOW_MASK 0xf00
216#define RX_PAUSE BIT(4) /* receive pause frame */
217#define TX_PAUSED BIT(3) /* transmit pause due to receive */
218#define FCTHR_EN BIT(2) /* enable threshold mode. */
219#define TX_PAUSE BIT(1) /* transmit pause frame */
220#define FC_EN BIT(0) /* flow control mode enable */
221
222/* REG_BACK_PRESSURE */
223#define BACKP_LOW_MASK 0xf00
224#define BACKP_JAM_LEN_MASK 0xf0
225#define BACKP_MODE BIT(1) /* address mode */
226#define BACKP_ENABLE BIT(0)
227
228/* REG_TEST_SEED */
229#define TEST_SEED_MASK 0x3fff
230
231/* REG_DMA_FIFO_STATE */
232#define TX_DMA_REQUEST BIT(31)
233#define RX_DMA_REQUEST BIT(30)
234#define TX_DMA_GRANT BIT(29)
235#define RX_DMA_GRANT BIT(28)
236#define TX_FIFO_EMPTY BIT(27)
237#define RX_FIFO_EMPTY BIT(26)
238#define TX_DMA2_SM_MASK 0x7000
239#define TX_DMA1_SM_MASK 0xf00
240#define RX_DMA2_SM_MASK 0x70
241#define RX_DMA1_SM_MASK 0xF
242
243/* REG_TEST_MODE */
244#define SINGLE_PKT BIT(26) /* single packet mode */
245#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */
246#define ITIMER_TEST BIT(24) /* interrupt timer test mode */
247#define TEST_SEED_SELECT BIT(22)
248#define SEED_SELECT BIT(21)
249#define TEST_MODE BIT(20)
250#define TEST_TIME_MASK 0xffc00
251#define TEST_EXCEL_MASK 0x3e0
252
253/* REG_TX_COL_COUNTER */
254#define TX_MCOL_MASK 0xffff0000
255#define TX_MCOL_SHIFT_BIT 16
256#define TX_SCOL_MASK 0xffff
257#define TX_SCOL_SHIFT_BIT 0
258
259/* REG_RPF_AEP_COUNTER */
260#define RPF_MASK 0xffff0000
261#define RPF_SHIFT_BIT 16
262#define AEP_MASK 0xffff
263#define AEP_SHIFT_BIT 0
264
265/* REG_XM_PG_COUNTER */
266#define XM_MASK 0xffff0000
267#define XM_SHIFT_BIT 16
268#define PG_MASK 0xffff
269#define PG_SHIFT_BIT 0
270
271/* REG_RUNT_TLC_COUNTER */
272#define RUNT_CNT_MASK 0xffff0000
273#define RUNT_CNT_SHIFT_BIT 16
274#define TLCC_MASK 0xffff
275#define TLCC_SHIFT_BIT 0
276
277/* REG_CRC_FTL_COUNTER */
278#define CRCER_CNT_MASK 0xffff0000
279#define CRCER_CNT_SHIFT_BIT 16
280#define FTL_CNT_MASK 0xffff
281#define FTL_CNT_SHIFT_BIT 0
282
283/* REG_RLC_RCC_COUNTER */
284#define RLC_MASK 0xffff0000
285#define RLC_SHIFT_BIT 16
286#define RCC_MASK 0xffff
287#define RCC_SHIFT_BIT 0
288
289/* REG_PHY_STATUS */
290#define AN_COMPLETE 0x20
291#define LINK_STATUS 0x4
292
293struct moxart_mac_priv_t {
294 void __iomem *base;
295 struct net_device_stats stats;
296 unsigned int reg_maccr;
297 unsigned int reg_imr;
298 struct napi_struct napi;
299 struct net_device *ndev;
300
301 dma_addr_t rx_base;
302 dma_addr_t rx_mapping[RX_DESC_NUM];
303 void __iomem *rx_desc_base;
304 unsigned char *rx_buf_base;
305 unsigned char *rx_buf[RX_DESC_NUM];
306 unsigned int rx_head;
307 unsigned int rx_buf_size;
308
309 dma_addr_t tx_base;
310 dma_addr_t tx_mapping[TX_DESC_NUM];
311 void __iomem *tx_desc_base;
312 unsigned char *tx_buf_base;
313 unsigned char *tx_buf[RX_DESC_NUM];
314 unsigned int tx_head;
315 unsigned int tx_buf_size;
316
317 spinlock_t txlock;
318 unsigned int tx_len[TX_DESC_NUM];
319 struct sk_buff *tx_skb[TX_DESC_NUM];
320 unsigned int tx_tail;
321};
322
323#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX
324#error MOXA ART Ethernet device driver TX buffer is too large!
325#endif
326#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX
327#error MOXA ART Ethernet device driver RX buffer is too large!
328#endif
329
330#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 967bae8b85c5..149355b52ad0 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -74,6 +74,7 @@
74#ifdef CONFIG_MTRR 74#ifdef CONFIG_MTRR
75#include <asm/mtrr.h> 75#include <asm/mtrr.h>
76#endif 76#endif
77#include <net/busy_poll.h>
77 78
78#include "myri10ge_mcp.h" 79#include "myri10ge_mcp.h"
79#include "myri10ge_mcp_gen_header.h" 80#include "myri10ge_mcp_gen_header.h"
@@ -194,6 +195,21 @@ struct myri10ge_slice_state {
194 int cpu; 195 int cpu;
195 __be32 __iomem *dca_tag; 196 __be32 __iomem *dca_tag;
196#endif 197#endif
198#ifdef CONFIG_NET_RX_BUSY_POLL
199 unsigned int state;
200#define SLICE_STATE_IDLE 0
201#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
202#define SLICE_STATE_POLL 2 /* poll owns this slice */
203#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
204#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
205#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
206#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
207 spinlock_t lock;
208 unsigned long lock_napi_yield;
209 unsigned long lock_poll_yield;
210 unsigned long busy_poll_miss;
211 unsigned long busy_poll_cnt;
212#endif /* CONFIG_NET_RX_BUSY_POLL */
197 char irq_desc[32]; 213 char irq_desc[32];
198}; 214};
199 215
@@ -244,7 +260,7 @@ struct myri10ge_priv {
244 int fw_ver_minor; 260 int fw_ver_minor;
245 int fw_ver_tiny; 261 int fw_ver_tiny;
246 int adopted_rx_filter_bug; 262 int adopted_rx_filter_bug;
247 u8 mac_addr[6]; /* eeprom mac address */ 263 u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
248 unsigned long serial_number; 264 unsigned long serial_number;
249 int vendor_specific_offset; 265 int vendor_specific_offset;
250 int fw_multicast_support; 266 int fw_multicast_support;
@@ -909,6 +925,92 @@ abort:
909 return status; 925 return status;
910} 926}
911 927
928#ifdef CONFIG_NET_RX_BUSY_POLL
929static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
930{
931 spin_lock_init(&ss->lock);
932 ss->state = SLICE_STATE_IDLE;
933}
934
935static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
936{
937 int rc = true;
938 spin_lock(&ss->lock);
939 if ((ss->state & SLICE_LOCKED)) {
940 WARN_ON((ss->state & SLICE_STATE_NAPI));
941 ss->state |= SLICE_STATE_NAPI_YIELD;
942 rc = false;
943 ss->lock_napi_yield++;
944 } else
945 ss->state = SLICE_STATE_NAPI;
946 spin_unlock(&ss->lock);
947 return rc;
948}
949
950static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
951{
952 spin_lock(&ss->lock);
953 WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
954 ss->state = SLICE_STATE_IDLE;
955 spin_unlock(&ss->lock);
956}
957
958static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
959{
960 int rc = true;
961 spin_lock_bh(&ss->lock);
962 if ((ss->state & SLICE_LOCKED)) {
963 ss->state |= SLICE_STATE_POLL_YIELD;
964 rc = false;
965 ss->lock_poll_yield++;
966 } else
967 ss->state |= SLICE_STATE_POLL;
968 spin_unlock_bh(&ss->lock);
969 return rc;
970}
971
972static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
973{
974 spin_lock_bh(&ss->lock);
975 WARN_ON((ss->state & SLICE_STATE_NAPI));
976 ss->state = SLICE_STATE_IDLE;
977 spin_unlock_bh(&ss->lock);
978}
979
980static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
981{
982 WARN_ON(!(ss->state & SLICE_LOCKED));
983 return (ss->state & SLICE_USER_PEND);
984}
985#else /* CONFIG_NET_RX_BUSY_POLL */
986static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
987{
988}
989
990static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
991{
992 return false;
993}
994
995static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
996{
997}
998
999static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
1000{
1001 return false;
1002}
1003
1004static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
1005{
1006}
1007
1008static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
1009{
1010 return false;
1011}
1012#endif
1013
912static int myri10ge_reset(struct myri10ge_priv *mgp) 1014static int myri10ge_reset(struct myri10ge_priv *mgp)
913{ 1015{
914 struct myri10ge_cmd cmd; 1016 struct myri10ge_cmd cmd;
@@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1300 } 1402 }
1301} 1403}
1302 1404
1405#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
1406
1303static inline int 1407static inline int
1304myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) 1408myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1305{ 1409{
@@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1311 struct pci_dev *pdev = mgp->pdev; 1415 struct pci_dev *pdev = mgp->pdev;
1312 struct net_device *dev = mgp->dev; 1416 struct net_device *dev = mgp->dev;
1313 u8 *va; 1417 u8 *va;
1418 bool polling;
1314 1419
1315 if (len <= mgp->small_bytes) { 1420 if (len <= mgp->small_bytes) {
1316 rx = &ss->rx_small; 1421 rx = &ss->rx_small;
@@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1325 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; 1430 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1326 prefetch(va); 1431 prefetch(va);
1327 1432
1328 skb = napi_get_frags(&ss->napi); 1433 /* When busy polling in user context, allocate skb and copy headers to
1434 * skb's linear memory ourselves. When not busy polling, use the napi
1435 * gro api.
1436 */
1437 polling = myri10ge_ss_busy_polling(ss);
1438 if (polling)
1439 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1440 else
1441 skb = napi_get_frags(&ss->napi);
1329 if (unlikely(skb == NULL)) { 1442 if (unlikely(skb == NULL)) {
1330 ss->stats.rx_dropped++; 1443 ss->stats.rx_dropped++;
1331 for (i = 0, remainder = len; remainder > 0; i++) { 1444 for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1364 } 1477 }
1365 myri10ge_vlan_rx(mgp->dev, va, skb); 1478 myri10ge_vlan_rx(mgp->dev, va, skb);
1366 skb_record_rx_queue(skb, ss - &mgp->ss[0]); 1479 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1480 skb_mark_napi_id(skb, &ss->napi);
1481
1482 if (polling) {
1483 int hlen;
1484
1485 /* myri10ge_vlan_rx might have moved the header, so compute
1486 * length and address again.
1487 */
1488 hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
1489 va = page_address(skb_frag_page(&rx_frags[0])) +
1490 rx_frags[0].page_offset;
1491 /* Copy header into the skb linear memory */
1492 skb_copy_to_linear_data(skb, va, hlen);
1493 rx_frags[0].page_offset += hlen;
1494 rx_frags[0].size -= hlen;
1495 skb->data_len -= hlen;
1496 skb->tail += hlen;
1497 skb->protocol = eth_type_trans(skb, dev);
1498 netif_receive_skb(skb);
1499 }
1500 else
1501 napi_gro_frags(&ss->napi);
1367 1502
1368 napi_gro_frags(&ss->napi);
1369 return 1; 1503 return 1;
1370} 1504}
1371 1505
@@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1524 if (ss->mgp->dca_enabled) 1658 if (ss->mgp->dca_enabled)
1525 myri10ge_update_dca(ss); 1659 myri10ge_update_dca(ss);
1526#endif 1660#endif
1661 /* Try later if the busy_poll handler is running. */
1662 if (!myri10ge_ss_lock_napi(ss))
1663 return budget;
1527 1664
1528 /* process as many rx events as NAPI will allow */ 1665 /* process as many rx events as NAPI will allow */
1529 work_done = myri10ge_clean_rx_done(ss, budget); 1666 work_done = myri10ge_clean_rx_done(ss, budget);
1530 1667
1668 myri10ge_ss_unlock_napi(ss);
1531 if (work_done < budget) { 1669 if (work_done < budget) {
1532 napi_complete(napi); 1670 napi_complete(napi);
1533 put_be32(htonl(3), ss->irq_claim); 1671 put_be32(htonl(3), ss->irq_claim);
@@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1535 return work_done; 1673 return work_done;
1536} 1674}
1537 1675
1676#ifdef CONFIG_NET_RX_BUSY_POLL
1677static int myri10ge_busy_poll(struct napi_struct *napi)
1678{
1679 struct myri10ge_slice_state *ss =
1680 container_of(napi, struct myri10ge_slice_state, napi);
1681 struct myri10ge_priv *mgp = ss->mgp;
1682 int work_done;
1683
1684 /* Poll only when the link is up */
1685 if (mgp->link_state != MXGEFW_LINK_UP)
1686 return LL_FLUSH_FAILED;
1687
1688 if (!myri10ge_ss_lock_poll(ss))
1689 return LL_FLUSH_BUSY;
1690
1691 /* Process a small number of packets */
1692 work_done = myri10ge_clean_rx_done(ss, 4);
1693 if (work_done)
1694 ss->busy_poll_cnt += work_done;
1695 else
1696 ss->busy_poll_miss++;
1697
1698 myri10ge_ss_unlock_poll(ss);
1699
1700 return work_done;
1701}
1702#endif /* CONFIG_NET_RX_BUSY_POLL */
1703
1538static irqreturn_t myri10ge_intr(int irq, void *arg) 1704static irqreturn_t myri10ge_intr(int irq, void *arg)
1539{ 1705{
1540 struct myri10ge_slice_state *ss = arg; 1706 struct myri10ge_slice_state *ss = arg;
@@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1742 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", 1908 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1743 "rx_small_cnt", "rx_big_cnt", 1909 "rx_small_cnt", "rx_big_cnt",
1744 "wake_queue", "stop_queue", "tx_linearized", 1910 "wake_queue", "stop_queue", "tx_linearized",
1911#ifdef CONFIG_NET_RX_BUSY_POLL
1912 "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
1913 "rx_busy_poll_cnt",
1914#endif
1745}; 1915};
1746 1916
1747#define MYRI10GE_NET_STATS_LEN 21 1917#define MYRI10GE_NET_STATS_LEN 21
@@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1842 data[i++] = (unsigned int)ss->tx.wake_queue; 2012 data[i++] = (unsigned int)ss->tx.wake_queue;
1843 data[i++] = (unsigned int)ss->tx.stop_queue; 2013 data[i++] = (unsigned int)ss->tx.stop_queue;
1844 data[i++] = (unsigned int)ss->tx.linearized; 2014 data[i++] = (unsigned int)ss->tx.linearized;
2015#ifdef CONFIG_NET_RX_BUSY_POLL
2016 data[i++] = ss->lock_napi_yield;
2017 data[i++] = ss->lock_poll_yield;
2018 data[i++] = ss->busy_poll_miss;
2019 data[i++] = ss->busy_poll_cnt;
2020#endif
1845 } 2021 }
1846} 2022}
1847 2023
@@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev)
2405 goto abort_with_rings; 2581 goto abort_with_rings;
2406 } 2582 }
2407 2583
2584 /* Initialize the slice spinlock and state used for polling */
2585 myri10ge_ss_init_lock(ss);
2586
2408 /* must happen prior to any irq */ 2587 /* must happen prior to any irq */
2409 napi_enable(&(ss)->napi); 2588 napi_enable(&(ss)->napi);
2410 } 2589 }
@@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev)
2481 2660
2482 del_timer_sync(&mgp->watchdog_timer); 2661 del_timer_sync(&mgp->watchdog_timer);
2483 mgp->running = MYRI10GE_ETH_STOPPING; 2662 mgp->running = MYRI10GE_ETH_STOPPING;
2663 local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
2484 for (i = 0; i < mgp->num_slices; i++) { 2664 for (i = 0; i < mgp->num_slices; i++) {
2485 napi_disable(&mgp->ss[i].napi); 2665 napi_disable(&mgp->ss[i].napi);
2666 /* Lock the slice to prevent the busy_poll handler from
2667 * accessing it. Later when we bring the NIC up, myri10ge_open
2668 * resets the slice including this lock.
2669 */
2670 while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
2671 pr_info("Slice %d locked\n", i);
2672 mdelay(1);
2673 }
2486 } 2674 }
2675 local_bh_enable();
2487 netif_carrier_off(dev); 2676 netif_carrier_off(dev);
2488 2677
2489 netif_tx_stop_all_queues(dev); 2678 netif_tx_stop_all_queues(dev);
@@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3569 ss->fw_stats, ss->fw_stats_bus); 3758 ss->fw_stats, ss->fw_stats_bus);
3570 ss->fw_stats = NULL; 3759 ss->fw_stats = NULL;
3571 } 3760 }
3761 napi_hash_del(&ss->napi);
3572 netif_napi_del(&ss->napi); 3762 netif_napi_del(&ss->napi);
3573 } 3763 }
3764 /* Wait till napi structs are no longer used, and then free ss. */
3765 synchronize_rcu();
3574 kfree(mgp->ss); 3766 kfree(mgp->ss);
3575 mgp->ss = NULL; 3767 mgp->ss = NULL;
3576} 3768}
@@ -3591,9 +3783,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3591 for (i = 0; i < mgp->num_slices; i++) { 3783 for (i = 0; i < mgp->num_slices; i++) {
3592 ss = &mgp->ss[i]; 3784 ss = &mgp->ss[i];
3593 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3785 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3594 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3786 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
3595 &ss->rx_done.bus, 3787 &ss->rx_done.bus,
3596 GFP_KERNEL | __GFP_ZERO); 3788 GFP_KERNEL);
3597 if (ss->rx_done.entry == NULL) 3789 if (ss->rx_done.entry == NULL)
3598 goto abort; 3790 goto abort;
3599 bytes = sizeof(*ss->fw_stats); 3791 bytes = sizeof(*ss->fw_stats);
@@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3606 ss->dev = mgp->dev; 3798 ss->dev = mgp->dev;
3607 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, 3799 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3608 myri10ge_napi_weight); 3800 myri10ge_napi_weight);
3801 napi_hash_add(&ss->napi);
3609 } 3802 }
3610 return 0; 3803 return 0;
3611abort: 3804abort:
@@ -3625,13 +3818,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3625 struct pci_dev *pdev = mgp->pdev; 3818 struct pci_dev *pdev = mgp->pdev;
3626 char *old_fw; 3819 char *old_fw;
3627 bool old_allocated; 3820 bool old_allocated;
3628 int i, status, ncpus, msix_cap; 3821 int i, status, ncpus;
3629 3822
3630 mgp->num_slices = 1; 3823 mgp->num_slices = 1;
3631 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3632 ncpus = netif_get_num_default_rss_queues(); 3824 ncpus = netif_get_num_default_rss_queues();
3633 3825
3634 if (myri10ge_max_slices == 1 || msix_cap == 0 || 3826 if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
3635 (myri10ge_max_slices == -1 && ncpus < 2)) 3827 (myri10ge_max_slices == -1 && ncpus < 2))
3636 return; 3828 return;
3637 3829
@@ -3749,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = {
3749 .ndo_change_mtu = myri10ge_change_mtu, 3941 .ndo_change_mtu = myri10ge_change_mtu,
3750 .ndo_set_rx_mode = myri10ge_set_multicast_list, 3942 .ndo_set_rx_mode = myri10ge_set_multicast_list,
3751 .ndo_set_mac_address = myri10ge_set_mac_address, 3943 .ndo_set_mac_address = myri10ge_set_mac_address,
3944#ifdef CONFIG_NET_RX_BUSY_POLL
3945 .ndo_busy_poll = myri10ge_busy_poll,
3946#endif
3752}; 3947};
3753 3948
3754static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3949static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index dc2c6f561e9a..e6f0a4366f90 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
390 390
391 priv = netdev_priv(ndev); 391 priv = netdev_priv(ndev);
392 392
393 pdata = (struct netxeth_platform_data *)pdev->dev.platform_data; 393 pdata = dev_get_platdata(&pdev->dev);
394 priv->xc = request_xc(pdata->xcno, &pdev->dev); 394 priv->xc = request_xc(pdata->xcno, &pdev->dev);
395 if (!priv->xc) { 395 if (!priv->xc) {
396 dev_err(&pdev->dev, "unable to request xc engine\n"); 396 dev_err(&pdev->dev, "unable to request xc engine\n");
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index e88bdb1aa669..79645f74b3a8 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev)
922{ 922{
923 struct w90p910_ether *ether = netdev_priv(dev); 923 struct w90p910_ether *ether = netdev_priv(dev);
924 struct platform_device *pdev; 924 struct platform_device *pdev;
925 char addr[6]; 925 char addr[ETH_ALEN];
926 926
927 pdev = ether->pdev; 927 pdev = ether->pdev;
928 928
@@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev)
934 addr[5] = 0xa8; 934 addr[5] = 0xa8;
935 935
936 if (is_valid_ether_addr(addr)) 936 if (is_valid_ether_addr(addr))
937 memcpy(dev->dev_addr, &addr, 0x06); 937 memcpy(dev->dev_addr, &addr, ETH_ALEN);
938 else 938 else
939 dev_err(&pdev->dev, "invalid mac address\n"); 939 dev_err(&pdev->dev, "invalid mac address\n");
940} 940}
@@ -1014,7 +1014,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
1014 if (ether->rxirq < 0) { 1014 if (ether->rxirq < 0) {
1015 dev_err(&pdev->dev, "failed to get ether rx irq\n"); 1015 dev_err(&pdev->dev, "failed to get ether rx irq\n");
1016 error = -ENXIO; 1016 error = -ENXIO;
1017 goto failed_free_txirq; 1017 goto failed_free_io;
1018 } 1018 }
1019 1019
1020 platform_set_drvdata(pdev, dev); 1020 platform_set_drvdata(pdev, dev);
@@ -1023,7 +1023,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
1023 if (IS_ERR(ether->clk)) { 1023 if (IS_ERR(ether->clk)) {
1024 dev_err(&pdev->dev, "failed to get ether clock\n"); 1024 dev_err(&pdev->dev, "failed to get ether clock\n");
1025 error = PTR_ERR(ether->clk); 1025 error = PTR_ERR(ether->clk);
1026 goto failed_free_rxirq; 1026 goto failed_free_io;
1027 } 1027 }
1028 1028
1029 ether->rmiiclk = clk_get(&pdev->dev, "RMII"); 1029 ether->rmiiclk = clk_get(&pdev->dev, "RMII");
@@ -1049,10 +1049,6 @@ failed_put_rmiiclk:
1049 clk_put(ether->rmiiclk); 1049 clk_put(ether->rmiiclk);
1050failed_put_clk: 1050failed_put_clk:
1051 clk_put(ether->clk); 1051 clk_put(ether->clk);
1052failed_free_rxirq:
1053 free_irq(ether->rxirq, pdev);
1054failed_free_txirq:
1055 free_irq(ether->txirq, pdev);
1056failed_free_io: 1052failed_free_io:
1057 iounmap(ether->reg); 1053 iounmap(ether->reg);
1058failed_free_mem: 1054failed_free_mem:
@@ -1075,9 +1071,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
1075 iounmap(ether->reg); 1071 iounmap(ether->reg);
1076 release_mem_region(ether->res->start, resource_size(ether->res)); 1072 release_mem_region(ether->res->start, resource_size(ether->res));
1077 1073
1078 free_irq(ether->txirq, dev);
1079 free_irq(ether->rxirq, dev);
1080
1081 del_timer_sync(&ether->check_timer); 1074 del_timer_sync(&ether->check_timer);
1082 1075
1083 free_netdev(dev); 1076 free_netdev(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7779036690cc..6797b1075874 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -582,6 +582,19 @@ struct pch_gbe_hw_stats {
582}; 582};
583 583
584/** 584/**
585 * struct pch_gbe_privdata - PCI Device ID driver data
586 * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software
587 * @phy_disable_hibernate: Bool, disable PHY hibernation
588 * @platform_init: Platform initialization callback, called from
589 * probe, prior to PHY initialization.
590 */
591struct pch_gbe_privdata {
592 bool phy_tx_clk_delay;
593 bool phy_disable_hibernate;
594 int (*platform_init)(struct pci_dev *pdev);
595};
596
597/**
585 * struct pch_gbe_adapter - board specific private data structure 598 * struct pch_gbe_adapter - board specific private data structure
586 * @stats_lock: Spinlock structure for status 599 * @stats_lock: Spinlock structure for status
587 * @ethtool_lock: Spinlock structure for ethtool 600 * @ethtool_lock: Spinlock structure for ethtool
@@ -604,6 +617,7 @@ struct pch_gbe_hw_stats {
604 * @rx_buffer_len: Receive buffer length 617 * @rx_buffer_len: Receive buffer length
605 * @tx_queue_len: Transmit queue length 618 * @tx_queue_len: Transmit queue length
606 * @have_msi: PCI MSI mode flag 619 * @have_msi: PCI MSI mode flag
620 * @pch_gbe_privdata: PCI Device ID driver_data
607 */ 621 */
608 622
609struct pch_gbe_adapter { 623struct pch_gbe_adapter {
@@ -631,6 +645,7 @@ struct pch_gbe_adapter {
631 int hwts_tx_en; 645 int hwts_tx_en;
632 int hwts_rx_en; 646 int hwts_rx_en;
633 struct pci_dev *ptp_pdev; 647 struct pci_dev *ptp_pdev;
648 struct pch_gbe_privdata *pdata;
634}; 649};
635 650
636#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) 651#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 1129db0cdf82..f0ceb89af931 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
118 * filled by get_settings() on a down link, speed is -1: */ 118 * filled by get_settings() on a down link, speed is -1: */
119 if (speed == UINT_MAX) { 119 if (speed == UINT_MAX) {
120 speed = SPEED_1000; 120 speed = SPEED_1000;
121 ethtool_cmd_speed_set(ecmd, speed);
121 ecmd->duplex = DUPLEX_FULL; 122 ecmd->duplex = DUPLEX_FULL;
122 } 123 }
123 ret = mii_ethtool_sset(&adapter->mii, ecmd); 124 ret = mii_ethtool_sset(&adapter->mii, ecmd);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ab1039a95bf9..5a0f04c2c813 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
25#include <linux/ptp_classify.h> 25#include <linux/ptp_classify.h>
26#include <linux/gpio.h>
26 27
27#define DRV_VERSION "1.01" 28#define DRV_VERSION "1.01"
28const char pch_driver_version[] = DRV_VERSION; 29const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION;
111#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" 112#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
112#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" 113#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
113 114
115#define MINNOW_PHY_RESET_GPIO 13
116
114static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 117static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
115 118
116static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 119static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
682 } 685 }
683 adapter->hw.phy.addr = adapter->mii.phy_id; 686 adapter->hw.phy.addr = adapter->mii.phy_id;
684 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); 687 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
685 if (addr == 32) 688 if (addr == PCH_GBE_PHY_REGS_LEN)
686 return -EAGAIN; 689 return -EAGAIN;
687 /* Selected the phy and isolate the rest */ 690 /* Selected the phy and isolate the rest */
688 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) { 691 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -1488,9 +1491,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1488 bufsz = adapter->rx_buffer_len; 1491 bufsz = adapter->rx_buffer_len;
1489 1492
1490 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1493 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1491 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1494 rx_ring->rx_buff_pool =
1492 &rx_ring->rx_buff_pool_logic, 1495 dma_zalloc_coherent(&pdev->dev, size,
1493 GFP_KERNEL | __GFP_ZERO); 1496 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1494 if (!rx_ring->rx_buff_pool) 1497 if (!rx_ring->rx_buff_pool)
1495 return -ENOMEM; 1498 return -ENOMEM;
1496 1499
@@ -1804,9 +1807,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1804 1807
1805 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1808 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1806 1809
1807 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1810 tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1808 &tx_ring->dma, 1811 &tx_ring->dma, GFP_KERNEL);
1809 GFP_KERNEL | __GFP_ZERO);
1810 if (!tx_ring->desc) { 1812 if (!tx_ring->desc) {
1811 vfree(tx_ring->buffer_info); 1813 vfree(tx_ring->buffer_info);
1812 return -ENOMEM; 1814 return -ENOMEM;
@@ -1849,9 +1851,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1849 return -ENOMEM; 1851 return -ENOMEM;
1850 1852
1851 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1853 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1852 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1854 rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1853 &rx_ring->dma, 1855 &rx_ring->dma, GFP_KERNEL);
1854 GFP_KERNEL | __GFP_ZERO);
1855 if (!rx_ring->desc) { 1856 if (!rx_ring->desc) {
1856 vfree(rx_ring->buffer_info); 1857 vfree(rx_ring->buffer_info);
1857 return -ENOMEM; 1858 return -ENOMEM;
@@ -2635,6 +2636,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2635 adapter->pdev = pdev; 2636 adapter->pdev = pdev;
2636 adapter->hw.back = adapter; 2637 adapter->hw.back = adapter;
2637 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; 2638 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2639 adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2640 if (adapter->pdata && adapter->pdata->platform_init)
2641 adapter->pdata->platform_init(pdev);
2638 2642
2639 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2643 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2640 PCI_DEVFN(12, 4)); 2644 PCI_DEVFN(12, 4));
@@ -2710,6 +2714,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2710 2714
2711 dev_dbg(&pdev->dev, "PCH Network Connection\n"); 2715 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2712 2716
2717 /* Disable hibernation on certain platforms */
2718 if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2719 pch_gbe_phy_disable_hibernate(&adapter->hw);
2720
2713 device_set_wakeup_enable(&pdev->dev, 1); 2721 device_set_wakeup_enable(&pdev->dev, 1);
2714 return 0; 2722 return 0;
2715 2723
@@ -2720,9 +2728,48 @@ err_free_netdev:
2720 return ret; 2728 return ret;
2721} 2729}
2722 2730
2731/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
2732 * ensure it is awake for probe and init. Request the line and reset the PHY.
2733 */
2734static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2735{
2736 unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
2737 unsigned gpio = MINNOW_PHY_RESET_GPIO;
2738 int ret;
2739
2740 ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
2741 "minnow_phy_reset");
2742 if (ret) {
2743 dev_err(&pdev->dev,
2744 "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
2745 return ret;
2746 }
2747
2748 gpio_set_value(gpio, 0);
2749 usleep_range(1250, 1500);
2750 gpio_set_value(gpio, 1);
2751 usleep_range(1250, 1500);
2752
2753 return ret;
2754}
2755
2756static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2757 .phy_tx_clk_delay = true,
2758 .phy_disable_hibernate = true,
2759 .platform_init = pch_gbe_minnow_platform_init,
2760};
2761
2723static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { 2762static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2724 {.vendor = PCI_VENDOR_ID_INTEL, 2763 {.vendor = PCI_VENDOR_ID_INTEL,
2725 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, 2764 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2765 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2766 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2767 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2768 .class_mask = (0xFFFF00),
2769 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2770 },
2771 {.vendor = PCI_VENDOR_ID_INTEL,
2772 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2726 .subvendor = PCI_ANY_ID, 2773 .subvendor = PCI_ANY_ID,
2727 .subdevice = PCI_ANY_ID, 2774 .subdevice = PCI_ANY_ID,
2728 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2775 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index da079073a6c6..8b7ff75fc8e0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -74,6 +74,15 @@
74#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ 74#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
75#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ 75#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
76 76
77/* AR8031 PHY Debug Registers */
78#define PHY_AR803X_ID 0x00001374
79#define PHY_AR8031_DBG_OFF 0x1D
80#define PHY_AR8031_DBG_DAT 0x1E
81#define PHY_AR8031_SERDES 0x05
82#define PHY_AR8031_HIBERNATE 0x0B
83#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */
84#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */
85
77/* Phy Id Register (word 2) */ 86/* Phy Id Register (word 2) */
78#define PHY_REVISION_MASK 0x000F 87#define PHY_REVISION_MASK 0x000F
79 88
@@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
249} 258}
250 259
251/** 260/**
261 * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
262 * @hw: Pointer to the HW structure
263 * Returns
264 * 0: Successful.
265 * -EINVAL: Invalid argument.
266 */
267static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
268{
269 /* The RGMII interface requires a ~2ns TX clock delay. This is typically
270 * done in layout with a longer trace or via PHY strapping, but can also
271 * be done via PHY configuration registers.
272 */
273 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
274 u16 mii_reg;
275 int ret = 0;
276
277 switch (hw->phy.id) {
278 case PHY_AR803X_ID:
279 netdev_dbg(adapter->netdev,
280 "Configuring AR803X PHY for 2ns TX clock delay\n");
281 pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
282 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
283 PHY_AR8031_SERDES);
284 if (ret)
285 break;
286
287 pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
288 mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
289 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
290 mii_reg);
291 break;
292 default:
293 netdev_err(adapter->netdev,
294 "Unknown PHY (%x), could not set TX clock delay\n",
295 hw->phy.id);
296 return -EINVAL;
297 }
298
299 if (ret)
300 netdev_err(adapter->netdev,
301 "Could not configure tx clock delay for PHY\n");
302 return ret;
303}
304
305/**
252 * pch_gbe_phy_init_setting - PHY initial setting 306 * pch_gbe_phy_init_setting - PHY initial setting
253 * @hw: Pointer to the HW structure 307 * @hw: Pointer to the HW structure
254 */ 308 */
@@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
277 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); 331 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
278 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; 332 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
279 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); 333 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
334
335 /* Setup a TX clock delay on certain platforms */
336 if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
337 pch_gbe_phy_tx_clk_delay(hw);
338}
339
340/**
341 * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
342 * @hw: Pointer to the HW structure
343 * Returns
344 * 0: Successful.
345 * -EINVAL: Invalid argument.
346 */
347int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
348{
349 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
350 u16 mii_reg;
351 int ret = 0;
352
353 switch (hw->phy.id) {
354 case PHY_AR803X_ID:
355 netdev_dbg(adapter->netdev,
356 "Disabling hibernation for AR803X PHY\n");
357 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
358 PHY_AR8031_HIBERNATE);
359 if (ret)
360 break;
361
362 pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
363 mii_reg &= ~PHY_AR8031_PS_HIB_EN;
364 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
365 mii_reg);
366 break;
367 default:
368 netdev_err(adapter->netdev,
369 "Unknown PHY (%x), could not disable hibernation\n",
370 hw->phy.id);
371 return -EINVAL;
372 }
373
374 if (ret)
375 netdev_err(adapter->netdev,
376 "Could not disable PHY hibernation\n");
377 return ret;
280} 378}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc7b5ec..0cbe69206e04 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
33void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); 33void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
34void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw); 34void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
35void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw); 35void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
36int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
36 37
37#endif /* _PCH_GBE_PHY_H_ */ 38#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5f0b5da6149..c498181a9aa8 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
191 struct device_node *dn = pci_device_to_OF_node(pdev); 191 struct device_node *dn = pci_device_to_OF_node(pdev);
192 int len; 192 int len;
193 const u8 *maddr; 193 const u8 *maddr;
194 u8 addr[6]; 194 u8 addr[ETH_ALEN];
195 195
196 if (!dn) { 196 if (!dn) {
197 dev_dbg(&pdev->dev, 197 dev_dbg(&pdev->dev,
@@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
201 201
202 maddr = of_get_property(dn, "local-mac-address", &len); 202 maddr = of_get_property(dn, "local-mac-address", &len);
203 203
204 if (maddr && len == 6) { 204 if (maddr && len == ETH_ALEN) {
205 memcpy(mac->mac_addr, maddr, 6); 205 memcpy(mac->mac_addr, maddr, ETH_ALEN);
206 return 0; 206 return 0;
207 } 207 }
208 208
@@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
219 return -ENOENT; 219 return -ENOENT;
220 } 220 }
221 221
222 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], 222 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
223 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { 223 &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
224 != ETH_ALEN) {
224 dev_warn(&pdev->dev, 225 dev_warn(&pdev->dev,
225 "can't parse mac address, not configuring\n"); 226 "can't parse mac address, not configuring\n");
226 return -EINVAL; 227 return -EINVAL;
227 } 228 }
228 229
229 memcpy(mac->mac_addr, addr, 6); 230 memcpy(mac->mac_addr, addr, ETH_ALEN);
230 231
231 return 0; 232 return 0;
232} 233}
@@ -439,10 +440,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
439 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 440 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
440 goto out_ring_desc; 441 goto out_ring_desc;
441 442
442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 443 ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
443 RX_RING_SIZE * sizeof(u64), 444 RX_RING_SIZE * sizeof(u64),
444 &ring->buf_dma, 445 &ring->buf_dma, GFP_KERNEL);
445 GFP_KERNEL | __GFP_ZERO);
446 if (!ring->buffers) 446 if (!ring->buffers)
447 goto out_ring_desc; 447 goto out_ring_desc;
448 448
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index e2f4efa8ad46..f2749d46c125 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -83,7 +83,7 @@ struct pasemi_mac {
83#define MAC_TYPE_GMAC 1 83#define MAC_TYPE_GMAC 1
84#define MAC_TYPE_XAUI 2 84#define MAC_TYPE_XAUI 2
85 85
86 u8 mac_addr[6]; 86 u8 mac_addr[ETH_ALEN];
87 87
88 struct net_lro_mgr lro_mgr; 88 struct net_lro_mgr lro_mgr;
89 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; 89 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0e1797295a48..f59e6be4a66e 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -45,6 +45,17 @@ config QLCNIC_SRIOV
45 This allows for virtual function acceleration in virtualized 45 This allows for virtual function acceleration in virtualized
46 environments. 46 environments.
47 47
48config QLCNIC_DCB
49 bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
50 depends on QLCNIC && DCB
51 default y
52 ---help---
53 This configuration parameter enables DCB support in QLE83XX
54 and QLE82XX Converged Ethernet devices. This allows for DCB
55 get operations support through rtNetlink interface. Only CEE
56 mode of DCB is supported. PG and PFC values are related only
57 to Tx.
58
48config QLGE 59config QLGE
49 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 60 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
50 depends on PCI 61 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 9fbb1cdbfa47..8375cbde9969 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
536{ 536{
537 struct netxen_adapter *adapter = netdev_priv(netdev); 537 struct netxen_adapter *adapter = netdev_priv(netdev);
538 struct netdev_hw_addr *ha; 538 struct netdev_hw_addr *ha;
539 u8 null_addr[6]; 539 u8 null_addr[ETH_ALEN];
540 int i; 540 int i;
541 541
542 memset(null_addr, 0, 6); 542 memset(null_addr, 0, ETH_ALEN);
543 543
544 if (netdev->flags & IFF_PROMISC) { 544 if (netdev->flags & IFF_PROMISC) {
545 545
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ec4cf7fd4123..cbd75f97ffb3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
459static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) 459static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
460{ 460{
461 u32 control; 461 u32 control;
462 int pos;
463 462
464 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 463 if (pdev->msix_cap) {
465 if (pos) { 464 pci_read_config_dword(pdev, pdev->msix_cap, &control);
466 pci_read_config_dword(pdev, pos, &control);
467 if (enable) 465 if (enable)
468 control |= PCI_MSIX_FLAGS_ENABLE; 466 control |= PCI_MSIX_FLAGS_ENABLE;
469 else 467 else
470 control = 0; 468 control = 0;
471 pci_write_config_dword(pdev, pos, control); 469 pci_write_config_dword(pdev, pdev->msix_cap, control);
472 } 470 }
473} 471}
474 472
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 4b1fb3faa3b7..a848d2979722 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -11,3 +11,5 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
11 qlcnic_minidump.o qlcnic_sriov_common.o 11 qlcnic_minidump.o qlcnic_sriov_common.o
12 12
13qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o 13qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
14
15qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 221645e9f182..88349b8fa39a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -20,7 +20,6 @@
20#include <linux/tcp.h> 20#include <linux/tcp.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23
24#include <linux/ethtool.h> 23#include <linux/ethtool.h>
25#include <linux/mii.h> 24#include <linux/mii.h>
26#include <linux/timer.h> 25#include <linux/timer.h>
@@ -35,11 +34,12 @@
35#include "qlcnic_hdr.h" 34#include "qlcnic_hdr.h"
36#include "qlcnic_hw.h" 35#include "qlcnic_hw.h"
37#include "qlcnic_83xx_hw.h" 36#include "qlcnic_83xx_hw.h"
37#include "qlcnic_dcb.h"
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 2 40#define _QLCNIC_LINUX_MINOR 3
41#define _QLCNIC_LINUX_SUBVERSION 44 41#define _QLCNIC_LINUX_SUBVERSION 50
42#define QLCNIC_LINUX_VERSIONID "5.2.44" 42#define QLCNIC_LINUX_VERSIONID "5.3.50"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,6 +98,9 @@
98#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ 98#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
99 + MGMT_CMD_DESC_RESV) 99 + MGMT_CMD_DESC_RESV)
100#define QLCNIC_MAX_TX_TIMEOUTS 2 100#define QLCNIC_MAX_TX_TIMEOUTS 2
101#define QLCNIC_MAX_TX_RINGS 8
102#define QLCNIC_MAX_SDS_RINGS 8
103
101/* 104/*
102 * Following are the states of the Phantom. Phantom will set them and 105 * Following are the states of the Phantom. Phantom will set them and
103 * Host will read to check if the fields are correct. 106 * Host will read to check if the fields are correct.
@@ -389,7 +392,7 @@ struct qlcnic_dump_template_hdr {
389 392
390struct qlcnic_fw_dump { 393struct qlcnic_fw_dump {
391 u8 clr; /* flag to indicate if dump is cleared */ 394 u8 clr; /* flag to indicate if dump is cleared */
392 u8 enable; /* enable/disable dump */ 395 bool enable; /* enable/disable dump */
393 u32 size; /* total size of the dump */ 396 u32 size; /* total size of the dump */
394 void *data; /* dump data area */ 397 void *data; /* dump data area */
395 struct qlcnic_dump_template_hdr *tmpl_hdr; 398 struct qlcnic_dump_template_hdr *tmpl_hdr;
@@ -460,14 +463,16 @@ struct qlcnic_hardware_context {
460 struct qlcnic_fdt fdt; 463 struct qlcnic_fdt fdt;
461 struct qlc_83xx_reset reset; 464 struct qlc_83xx_reset reset;
462 struct qlc_83xx_idc idc; 465 struct qlc_83xx_idc idc;
463 struct qlc_83xx_fw_info fw_info; 466 struct qlc_83xx_fw_info *fw_info;
464 struct qlcnic_intrpt_config *intr_tbl; 467 struct qlcnic_intrpt_config *intr_tbl;
465 struct qlcnic_sriov *sriov; 468 struct qlcnic_sriov *sriov;
466 u32 *reg_tbl; 469 u32 *reg_tbl;
467 u32 *ext_reg_tbl; 470 u32 *ext_reg_tbl;
468 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; 471 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
469 u32 mbox_reg[4]; 472 u32 mbox_reg[4];
470 spinlock_t mbx_lock; 473 struct qlcnic_mailbox *mailbox;
474 u8 extend_lb_time;
475 u8 phys_port_id[ETH_ALEN];
471}; 476};
472 477
473struct qlcnic_adapter_stats { 478struct qlcnic_adapter_stats {
@@ -515,6 +520,7 @@ struct qlcnic_host_sds_ring {
515 u32 num_desc; 520 u32 num_desc;
516 void __iomem *crb_sts_consumer; 521 void __iomem *crb_sts_consumer;
517 522
523 struct qlcnic_host_tx_ring *tx_ring;
518 struct status_desc *desc_head; 524 struct status_desc *desc_head;
519 struct qlcnic_adapter *adapter; 525 struct qlcnic_adapter *adapter;
520 struct napi_struct napi; 526 struct napi_struct napi;
@@ -532,9 +538,17 @@ struct qlcnic_host_tx_ring {
532 void __iomem *crb_intr_mask; 538 void __iomem *crb_intr_mask;
533 char name[IFNAMSIZ + 12]; 539 char name[IFNAMSIZ + 12];
534 u16 ctx_id; 540 u16 ctx_id;
541
542 u32 state;
535 u32 producer; 543 u32 producer;
536 u32 sw_consumer; 544 u32 sw_consumer;
537 u32 num_desc; 545 u32 num_desc;
546
547 u64 xmit_on;
548 u64 xmit_off;
549 u64 xmit_called;
550 u64 xmit_finished;
551
538 void __iomem *crb_cmd_producer; 552 void __iomem *crb_cmd_producer;
539 struct cmd_desc_type0 *desc_head; 553 struct cmd_desc_type0 *desc_head;
540 struct qlcnic_adapter *adapter; 554 struct qlcnic_adapter *adapter;
@@ -559,7 +573,6 @@ struct qlcnic_recv_context {
559 u32 state; 573 u32 state;
560 u16 context_id; 574 u16 context_id;
561 u16 virt_port; 575 u16 virt_port;
562
563}; 576};
564 577
565/* HW context creation */ 578/* HW context creation */
@@ -604,6 +617,7 @@ struct qlcnic_recv_context {
604#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) 617#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
605#define QLCNIC_CAP0_VALIDOFF (1 << 11) 618#define QLCNIC_CAP0_VALIDOFF (1 << 11)
606#define QLCNIC_CAP0_LRO_MSS (1 << 21) 619#define QLCNIC_CAP0_LRO_MSS (1 << 21)
620#define QLCNIC_CAP0_TX_MULTI (1 << 22)
607 621
608/* 622/*
609 * Context state 623 * Context state
@@ -631,7 +645,7 @@ struct qlcnic_hostrq_rds_ring {
631 645
632struct qlcnic_hostrq_rx_ctx { 646struct qlcnic_hostrq_rx_ctx {
633 __le64 host_rsp_dma_addr; /* Response dma'd here */ 647 __le64 host_rsp_dma_addr; /* Response dma'd here */
634 __le32 capabilities[4]; /* Flag bit vector */ 648 __le32 capabilities[4]; /* Flag bit vector */
635 __le32 host_int_crb_mode; /* Interrupt crb usage */ 649 __le32 host_int_crb_mode; /* Interrupt crb usage */
636 __le32 host_rds_crb_mode; /* RDS crb usage */ 650 __le32 host_rds_crb_mode; /* RDS crb usage */
637 /* These ring offsets are relative to data[0] below */ 651 /* These ring offsets are relative to data[0] below */
@@ -802,6 +816,7 @@ struct qlcnic_mac_list_s {
802 816
803#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f 817#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
804#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D 818#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
819#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
805 820
806#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ 821#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
807#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ 822#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -814,6 +829,7 @@ struct qlcnic_mac_list_s {
814#define QLCNIC_FW_CAPABILITY_BDG BIT_8 829#define QLCNIC_FW_CAPABILITY_BDG BIT_8
815#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 830#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
816#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 831#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
832#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
817#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 833#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
818#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31 834#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
819 835
@@ -821,6 +837,7 @@ struct qlcnic_mac_list_s {
821#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 837#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
822#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 838#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
823#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 839#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
840#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
824 841
825/* module types */ 842/* module types */
826#define LINKEVENT_MODULE_NOT_PRESENT 1 843#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -913,6 +930,8 @@ struct qlcnic_ipaddr {
913#define QLCNIC_FW_LRO_MSS_CAP 0x8000 930#define QLCNIC_FW_LRO_MSS_CAP 0x8000
914#define QLCNIC_TX_INTR_SHARED 0x10000 931#define QLCNIC_TX_INTR_SHARED 0x10000
915#define QLCNIC_APP_CHANGED_FLAGS 0x20000 932#define QLCNIC_APP_CHANGED_FLAGS 0x20000
933#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
934
916#define QLCNIC_IS_MSI_FAMILY(adapter) \ 935#define QLCNIC_IS_MSI_FAMILY(adapter) \
917 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 936 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
918#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 937#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -922,11 +941,11 @@ struct qlcnic_ipaddr {
922#define QLCNIC_BEACON_DISABLE 0xD 941#define QLCNIC_BEACON_DISABLE 0xD
923 942
924#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 943#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
944#define QLCNIC_DEF_NUM_TX_RINGS 4
925#define QLCNIC_MSIX_TBL_SPACE 8192 945#define QLCNIC_MSIX_TBL_SPACE 8192
926#define QLCNIC_PCI_REG_MSIX_TBL 0x44 946#define QLCNIC_PCI_REG_MSIX_TBL 0x44
927#define QLCNIC_MSIX_TBL_PGSIZE 4096 947#define QLCNIC_MSIX_TBL_PGSIZE 4096
928 948
929#define QLCNIC_NETDEV_WEIGHT 128
930#define QLCNIC_ADAPTER_UP_MAGIC 777 949#define QLCNIC_ADAPTER_UP_MAGIC 777
931 950
932#define __QLCNIC_FW_ATTACHED 0 951#define __QLCNIC_FW_ATTACHED 0
@@ -937,10 +956,13 @@ struct qlcnic_ipaddr {
937#define __QLCNIC_DIAG_RES_ALLOC 6 956#define __QLCNIC_DIAG_RES_ALLOC 6
938#define __QLCNIC_LED_ENABLE 7 957#define __QLCNIC_LED_ENABLE 7
939#define __QLCNIC_ELB_INPROGRESS 8 958#define __QLCNIC_ELB_INPROGRESS 8
959#define __QLCNIC_MULTI_TX_UNIQUE 9
940#define __QLCNIC_SRIOV_ENABLE 10 960#define __QLCNIC_SRIOV_ENABLE 10
941#define __QLCNIC_SRIOV_CAPABLE 11 961#define __QLCNIC_SRIOV_CAPABLE 11
942#define __QLCNIC_MBX_POLL_ENABLE 12 962#define __QLCNIC_MBX_POLL_ENABLE 12
943#define __QLCNIC_DIAG_MODE 13 963#define __QLCNIC_DIAG_MODE 13
964#define __QLCNIC_DCB_STATE 14
965#define __QLCNIC_DCB_IN_AEN 15
944 966
945#define QLCNIC_INTERRUPT_TEST 1 967#define QLCNIC_INTERRUPT_TEST 1
946#define QLCNIC_LOOPBACK_TEST 2 968#define QLCNIC_LOOPBACK_TEST 2
@@ -950,12 +972,6 @@ struct qlcnic_ipaddr {
950#define QLCNIC_READD_AGE 20 972#define QLCNIC_READD_AGE 20
951#define QLCNIC_LB_MAX_FILTERS 64 973#define QLCNIC_LB_MAX_FILTERS 64
952#define QLCNIC_LB_BUCKET_SIZE 32 974#define QLCNIC_LB_BUCKET_SIZE 32
953
954/* QLCNIC Driver Error Code */
955#define QLCNIC_FW_NOT_RESPOND 51
956#define QLCNIC_TEST_IN_PROGRESS 52
957#define QLCNIC_UNDEFINED_ERROR 53
958#define QLCNIC_LB_CABLE_NOT_CONN 54
959#define QLCNIC_ILB_MAX_RCV_LOOP 10 975#define QLCNIC_ILB_MAX_RCV_LOOP 10
960 976
961struct qlcnic_filter { 977struct qlcnic_filter {
@@ -972,6 +988,21 @@ struct qlcnic_filter_hash {
972 u16 fbucket_size; 988 u16 fbucket_size;
973}; 989};
974 990
991/* Mailbox specific data structures */
992struct qlcnic_mailbox {
993 struct workqueue_struct *work_q;
994 struct qlcnic_adapter *adapter;
995 struct qlcnic_mbx_ops *ops;
996 struct work_struct work;
997 struct completion completion;
998 struct list_head cmd_q;
999 unsigned long status;
1000 spinlock_t queue_lock; /* Mailbox queue lock */
1001 spinlock_t aen_lock; /* Mailbox response/AEN lock */
1002 atomic_t rsp_status;
1003 u32 num_cmds;
1004};
1005
975struct qlcnic_adapter { 1006struct qlcnic_adapter {
976 struct qlcnic_hardware_context *ahw; 1007 struct qlcnic_hardware_context *ahw;
977 struct qlcnic_recv_context *recv_ctx; 1008 struct qlcnic_recv_context *recv_ctx;
@@ -1035,6 +1066,7 @@ struct qlcnic_adapter {
1035 struct delayed_work fw_work; 1066 struct delayed_work fw_work;
1036 struct delayed_work idc_aen_work; 1067 struct delayed_work idc_aen_work;
1037 struct delayed_work mbx_poll_work; 1068 struct delayed_work mbx_poll_work;
1069 struct qlcnic_dcb *dcb;
1038 1070
1039 struct qlcnic_filter_hash fhash; 1071 struct qlcnic_filter_hash fhash;
1040 struct qlcnic_filter_hash rx_fhash; 1072 struct qlcnic_filter_hash rx_fhash;
@@ -1152,6 +1184,7 @@ struct qlcnic_pci_info {
1152}; 1184};
1153 1185
1154struct qlcnic_npar_info { 1186struct qlcnic_npar_info {
1187 bool eswitch_status;
1155 u16 pvid; 1188 u16 pvid;
1156 u16 min_bw; 1189 u16 min_bw;
1157 u16 max_bw; 1190 u16 max_bw;
@@ -1371,7 +1404,6 @@ struct qlcnic_esw_statistics {
1371 struct __qlcnic_esw_statistics tx; 1404 struct __qlcnic_esw_statistics tx;
1372}; 1405};
1373 1406
1374#define QLCNIC_DUMP_MASK_DEF 0x1f
1375#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed 1407#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
1376#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed 1408#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1377#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed 1409#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1385,9 +1417,20 @@ struct _cdrp_cmd {
1385}; 1417};
1386 1418
1387struct qlcnic_cmd_args { 1419struct qlcnic_cmd_args {
1388 struct _cdrp_cmd req; 1420 struct completion completion;
1389 struct _cdrp_cmd rsp; 1421 struct list_head list;
1390 int op_type; 1422 struct _cdrp_cmd req;
1423 struct _cdrp_cmd rsp;
1424 atomic_t rsp_status;
1425 int pay_size;
1426 u32 rsp_opcode;
1427 u32 total_cmds;
1428 u32 op_type;
1429 u32 type;
1430 u32 cmd_op;
1431 u32 *hdr; /* Back channel message header */
1432 u32 *pay; /* Back channel message payload */
1433 u8 func_num;
1391}; 1434};
1392 1435
1393int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); 1436int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1435,6 +1478,12 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1435void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); 1478void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1436void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); 1479void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1437int qlcnic_dump_fw(struct qlcnic_adapter *); 1480int qlcnic_dump_fw(struct qlcnic_adapter *);
1481int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
1482bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
1483pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
1484 pci_channel_state_t);
1485pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
1486void qlcnic_82xx_io_resume(struct pci_dev *);
1438 1487
1439/* Functions from qlcnic_init.c */ 1488/* Functions from qlcnic_init.c */
1440void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int); 1489void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1462,7 +1511,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
1462 1511
1463void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); 1512void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1464void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); 1513void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1465void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); 1514void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
1515 struct qlcnic_host_tx_ring *);
1466 1516
1467int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); 1517int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1468void qlcnic_watchdog_task(struct work_struct *work); 1518void qlcnic_watchdog_task(struct work_struct *work);
@@ -1474,6 +1524,7 @@ void __qlcnic_set_multi(struct net_device *, u16);
1474int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16); 1524int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
1475int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); 1525int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
1476void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); 1526void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
1527int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
1477 1528
1478int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1529int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1479int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32); 1530int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
@@ -1495,8 +1546,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *);
1495void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); 1546void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1496int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1547int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1497netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1548netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1498int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); 1549int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
1499int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); 1550int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
1551int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
1500void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); 1552void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
1501void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); 1553void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
1502int qlcnic_enable_msix(struct qlcnic_adapter *, u32); 1554int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1523,6 +1575,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
1523void qlcnic_advert_link_change(struct qlcnic_adapter *, int); 1575void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
1524void qlcnic_free_tx_rings(struct qlcnic_adapter *); 1576void qlcnic_free_tx_rings(struct qlcnic_adapter *);
1525int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *); 1577int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
1578void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1526 1579
1527void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); 1580void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
1528void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); 1581void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
@@ -1585,6 +1638,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1585 tx_ring->producer; 1638 tx_ring->producer;
1586} 1639}
1587 1640
1641static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1642 struct net_device *netdev)
1643{
1644 int err, tx_q;
1645
1646 tx_q = adapter->max_drv_tx_rings;
1647
1648 netdev->num_tx_queues = tx_q;
1649 netdev->real_num_tx_queues = tx_q;
1650
1651 err = netif_set_real_num_tx_queues(netdev, tx_q);
1652 if (err)
1653 dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
1654 tx_q);
1655 else
1656 dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
1657
1658 return err;
1659}
1660
1588struct qlcnic_nic_template { 1661struct qlcnic_nic_template {
1589 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1662 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1590 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1663 int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -1600,6 +1673,20 @@ struct qlcnic_nic_template {
1600 int (*resume)(struct qlcnic_adapter *); 1673 int (*resume)(struct qlcnic_adapter *);
1601}; 1674};
1602 1675
1676struct qlcnic_mbx_ops {
1677 int (*enqueue_cmd) (struct qlcnic_adapter *,
1678 struct qlcnic_cmd_args *, unsigned long *);
1679 void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1680 void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1681 void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1682 void (*nofity_fw) (struct qlcnic_adapter *, u8);
1683};
1684
1685int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1686void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1687void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1688void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1689
1603/* Adapter hardware abstraction */ 1690/* Adapter hardware abstraction */
1604struct qlcnic_hardware_ops { 1691struct qlcnic_hardware_ops {
1605 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); 1692 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -1607,8 +1694,8 @@ struct qlcnic_hardware_ops {
1607 int (*read_reg) (struct qlcnic_adapter *, ulong, int *); 1694 int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
1608 int (*write_reg) (struct qlcnic_adapter *, ulong, u32); 1695 int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
1609 void (*get_ocm_win) (struct qlcnic_hardware_context *); 1696 void (*get_ocm_win) (struct qlcnic_hardware_context *);
1610 int (*get_mac_address) (struct qlcnic_adapter *, u8 *); 1697 int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
1611 int (*setup_intr) (struct qlcnic_adapter *, u8); 1698 int (*setup_intr) (struct qlcnic_adapter *, u8, int);
1612 int (*alloc_mbx_args)(struct qlcnic_cmd_args *, 1699 int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
1613 struct qlcnic_adapter *, u32); 1700 struct qlcnic_adapter *, u32);
1614 int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); 1701 int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1641,6 +1728,11 @@ struct qlcnic_hardware_ops {
1641 int (*get_board_info) (struct qlcnic_adapter *); 1728 int (*get_board_info) (struct qlcnic_adapter *);
1642 void (*set_mac_filter_count) (struct qlcnic_adapter *); 1729 void (*set_mac_filter_count) (struct qlcnic_adapter *);
1643 void (*free_mac_list) (struct qlcnic_adapter *); 1730 void (*free_mac_list) (struct qlcnic_adapter *);
1731 int (*read_phys_port_id) (struct qlcnic_adapter *);
1732 pci_ers_result_t (*io_error_detected) (struct pci_dev *,
1733 pci_channel_state_t);
1734 pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
1735 void (*io_resume) (struct pci_dev *);
1644}; 1736};
1645 1737
1646extern struct qlcnic_nic_template qlcnic_vf_ops; 1738extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1669,14 +1761,15 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
1669} 1761}
1670 1762
1671static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, 1763static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
1672 u8 *mac) 1764 u8 *mac, u8 function)
1673{ 1765{
1674 return adapter->ahw->hw_ops->get_mac_address(adapter, mac); 1766 return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
1675} 1767}
1676 1768
1677static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) 1769static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
1770 u8 num_intr, int txq)
1678{ 1771{
1679 return adapter->ahw->hw_ops->setup_intr(adapter, num_intr); 1772 return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
1680} 1773}
1681 1774
1682static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 1775static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -1867,6 +1960,12 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
1867 adapter->ahw->hw_ops->set_mac_filter_count(adapter); 1960 adapter->ahw->hw_ops->set_mac_filter_count(adapter);
1868} 1961}
1869 1962
1963static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
1964{
1965 if (adapter->ahw->hw_ops->read_phys_port_id)
1966 adapter->ahw->hw_ops->read_phys_port_id(adapter);
1967}
1968
1870static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1969static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
1871 u32 key) 1970 u32 key)
1872{ 1971{
@@ -1898,16 +1997,45 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
1898 adapter->nic_ops->config_ipaddr(adapter, ip, cmd); 1997 adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
1899} 1998}
1900 1999
2000static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
2001{
2002 return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
2003}
2004
2005static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
2006{
2007 test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
2008 adapter->max_drv_tx_rings = 1;
2009}
2010
2011/* When operating in a muti tx mode, driver needs to write 0x1
2012 * to src register, instead of 0x0 to disable receiving interrupt.
2013 */
1901static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) 2014static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
1902{ 2015{
1903 writel(0, sds_ring->crb_intr_mask); 2016 struct qlcnic_adapter *adapter = sds_ring->adapter;
2017
2018 if (qlcnic_check_multi_tx(adapter) &&
2019 !adapter->ahw->diag_test &&
2020 (adapter->flags & QLCNIC_MSIX_ENABLED))
2021 writel(0x1, sds_ring->crb_intr_mask);
2022 else
2023 writel(0, sds_ring->crb_intr_mask);
1904} 2024}
1905 2025
2026/* When operating in a muti tx mode, driver needs to write 0x0
2027 * to src register, instead of 0x1 to enable receiving interrupts.
2028 */
1906static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) 2029static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
1907{ 2030{
1908 struct qlcnic_adapter *adapter = sds_ring->adapter; 2031 struct qlcnic_adapter *adapter = sds_ring->adapter;
1909 2032
1910 writel(0x1, sds_ring->crb_intr_mask); 2033 if (qlcnic_check_multi_tx(adapter) &&
2034 !adapter->ahw->diag_test &&
2035 (adapter->flags & QLCNIC_MSIX_ENABLED))
2036 writel(0, sds_ring->crb_intr_mask);
2037 else
2038 writel(0x1, sds_ring->crb_intr_mask);
1911 2039
1912 if (!QLCNIC_IS_MSI_FAMILY(adapter)) 2040 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1913 writel(0xfbff, adapter->tgt_mask_reg); 2041 writel(0xfbff, adapter->tgt_mask_reg);
@@ -1939,9 +2067,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1939 __func__, ##_args); \ 2067 __func__, ##_args); \
1940 } while (0) 2068 } while (0)
1941 2069
1942#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 2070#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
2071#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
1943#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 2072#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
1944#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 2073#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
2074#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
1945 2075
1946static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) 2076static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1947{ 2077{
@@ -1949,12 +2079,22 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1949 return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false; 2079 return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
1950} 2080}
1951 2081
2082static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
2083{
2084 unsigned short device = adapter->pdev->device;
2085
2086 return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
2087 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
2088}
2089
1952static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) 2090static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
1953{ 2091{
1954 unsigned short device = adapter->pdev->device; 2092 unsigned short device = adapter->pdev->device;
1955 bool status; 2093 bool status;
1956 2094
1957 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || 2095 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
2096 (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
2097 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
1958 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; 2098 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
1959 2099
1960 return status; 2100 return status;
@@ -1968,7 +2108,105 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
1968static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) 2108static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
1969{ 2109{
1970 unsigned short device = adapter->pdev->device; 2110 unsigned short device = adapter->pdev->device;
2111 bool status;
2112
2113 status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
2114 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
2115
2116 return status;
2117}
2118
2119static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
2120{
2121 struct qlcnic_dcb *dcb = adapter->dcb;
2122
2123 if (dcb && dcb->ops->get_hw_capability)
2124 return dcb->ops->get_hw_capability(adapter);
2125
2126 return 0;
2127}
2128
2129static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
2130{
2131 struct qlcnic_dcb *dcb = adapter->dcb;
2132
2133 if (dcb && dcb->ops->free)
2134 dcb->ops->free(adapter);
2135}
2136
2137static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
2138{
2139 struct qlcnic_dcb *dcb = adapter->dcb;
2140
2141 if (dcb && dcb->ops->attach)
2142 return dcb->ops->attach(adapter);
2143
2144 return 0;
2145}
2146
2147static inline int
2148qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
2149{
2150 struct qlcnic_dcb *dcb = adapter->dcb;
2151
2152 if (dcb && dcb->ops->query_hw_capability)
2153 return dcb->ops->query_hw_capability(adapter, buf);
2154
2155 return 0;
2156}
2157
2158static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
2159{
2160 struct qlcnic_dcb *dcb = adapter->dcb;
2161
2162 if (dcb && dcb->ops->get_info)
2163 dcb->ops->get_info(adapter);
2164}
2165
2166static inline int
2167qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
2168{
2169 struct qlcnic_dcb *dcb = adapter->dcb;
2170
2171 if (dcb && dcb->ops->query_cee_param)
2172 return dcb->ops->query_cee_param(adapter, buf, type);
2173
2174 return 0;
2175}
2176
2177static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
2178{
2179 struct qlcnic_dcb *dcb = adapter->dcb;
2180
2181 if (dcb && dcb->ops->get_cee_cfg)
2182 return dcb->ops->get_cee_cfg(adapter);
2183
2184 return 0;
2185}
2186
2187static inline void
2188qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
2189{
2190 struct qlcnic_dcb *dcb = adapter->dcb;
2191
2192 if (dcb && dcb->ops->register_aen)
2193 dcb->ops->register_aen(adapter, flag);
2194}
2195
2196static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
2197 void *msg)
2198{
2199 struct qlcnic_dcb *dcb = adapter->dcb;
2200
2201 if (dcb && dcb->ops->handle_aen)
2202 dcb->ops->handle_aen(adapter, msg);
2203}
2204
2205static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
2206{
2207 struct qlcnic_dcb *dcb = adapter->dcb;
1971 2208
1972 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; 2209 if (dcb && dcb->ops->init_dcbnl_ops)
2210 dcb->ops->init_dcbnl_ops(adapter);
1973} 2211}
1974#endif /* __QLCNIC_H_ */ 2212#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 9d4bb7f83904..a1818dae47b6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -11,6 +11,7 @@
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/aer.h>
14 15
15#define QLCNIC_MAX_TX_QUEUES 1 16#define QLCNIC_MAX_TX_QUEUES 1
16#define RSS_HASHTYPE_IP_TCP 0x3 17#define RSS_HASHTYPE_IP_TCP 0x3
@@ -67,6 +68,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
67 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, 68 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
68 {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, 69 {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
69 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, 70 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
71 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
72 {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
70}; 73};
71 74
72const u32 qlcnic_83xx_ext_reg_tbl[] = { 75const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -149,7 +152,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
149 .get_mac_address = qlcnic_83xx_get_mac_address, 152 .get_mac_address = qlcnic_83xx_get_mac_address,
150 .setup_intr = qlcnic_83xx_setup_intr, 153 .setup_intr = qlcnic_83xx_setup_intr,
151 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 154 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
152 .mbx_cmd = qlcnic_83xx_mbx_op, 155 .mbx_cmd = qlcnic_83xx_issue_cmd,
153 .get_func_no = qlcnic_83xx_get_func_no, 156 .get_func_no = qlcnic_83xx_get_func_no,
154 .api_lock = qlcnic_83xx_cam_lock, 157 .api_lock = qlcnic_83xx_cam_lock,
155 .api_unlock = qlcnic_83xx_cam_unlock, 158 .api_unlock = qlcnic_83xx_cam_unlock,
@@ -175,6 +178,10 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
175 .get_board_info = qlcnic_83xx_get_port_info, 178 .get_board_info = qlcnic_83xx_get_port_info,
176 .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, 179 .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
177 .free_mac_list = qlcnic_82xx_free_mac_list, 180 .free_mac_list = qlcnic_82xx_free_mac_list,
181 .io_error_detected = qlcnic_83xx_io_error_detected,
182 .io_slot_reset = qlcnic_83xx_io_slot_reset,
183 .io_resume = qlcnic_83xx_io_resume,
184
178}; 185};
179 186
180static struct qlcnic_nic_template qlcnic_83xx_ops = { 187static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -261,7 +268,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
261 } 268 }
262} 269}
263 270
264int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) 271int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
265{ 272{
266 int err, i, num_msix; 273 int err, i, num_msix;
267 struct qlcnic_hardware_context *ahw = adapter->ahw; 274 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -362,6 +369,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
362 struct qlcnic_cmd_args *cmd) 369 struct qlcnic_cmd_args *cmd)
363{ 370{
364 int i; 371 int i;
372
373 if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
374 return;
375
365 for (i = 0; i < cmd->rsp.num; i++) 376 for (i = 0; i < cmd->rsp.num; i++)
366 cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); 377 cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
367} 378}
@@ -398,24 +409,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
398 return IRQ_HANDLED; 409 return IRQ_HANDLED;
399} 410}
400 411
412static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
413{
414 atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
415 complete(&mbx->completion);
416}
417
401static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) 418static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
402{ 419{
403 u32 resp, event; 420 u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
421 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
404 unsigned long flags; 422 unsigned long flags;
405 423
406 spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); 424 spin_lock_irqsave(&mbx->aen_lock, flags);
407
408 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); 425 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
409 if (!(resp & QLCNIC_SET_OWNER)) 426 if (!(resp & QLCNIC_SET_OWNER))
410 goto out; 427 goto out;
411 428
412 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); 429 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
413 if (event & QLCNIC_MBX_ASYNC_EVENT) 430 if (event & QLCNIC_MBX_ASYNC_EVENT) {
414 __qlcnic_83xx_process_aen(adapter); 431 __qlcnic_83xx_process_aen(adapter);
415 432 } else {
433 if (atomic_read(&mbx->rsp_status) != rsp_status)
434 qlcnic_83xx_notify_mbx_response(mbx);
435 }
416out: 436out:
417 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 437 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
418 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); 438 spin_unlock_irqrestore(&mbx->aen_lock, flags);
419} 439}
420 440
421irqreturn_t qlcnic_83xx_intr(int irq, void *data) 441irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@ -515,7 +535,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
515 } 535 }
516 536
517 /* Enable mailbox interrupt */ 537 /* Enable mailbox interrupt */
518 qlcnic_83xx_enable_mbx_intrpt(adapter); 538 qlcnic_83xx_enable_mbx_interrupt(adapter);
519 539
520 return err; 540 return err;
521} 541}
@@ -628,7 +648,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
628 ahw->max_uc_count = count; 648 ahw->max_uc_count = count;
629} 649}
630 650
631void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) 651void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
632{ 652{
633 u32 val; 653 u32 val;
634 654
@@ -682,11 +702,14 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
682static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, 702static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
683 u32 data[]); 703 u32 data[]);
684 704
685static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, 705void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
686 struct qlcnic_cmd_args *cmd) 706 struct qlcnic_cmd_args *cmd)
687{ 707{
688 int i; 708 int i;
689 709
710 if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
711 return;
712
690 dev_info(&adapter->pdev->dev, 713 dev_info(&adapter->pdev->dev,
691 "Host MBX regs(%d)\n", cmd->req.num); 714 "Host MBX regs(%d)\n", cmd->req.num);
692 for (i = 0; i < cmd->req.num; i++) { 715 for (i = 0; i < cmd->req.num; i++) {
@@ -705,120 +728,73 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
705 pr_info("\n"); 728 pr_info("\n");
706} 729}
707 730
708/* Mailbox response for mac rcode */ 731static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
709u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) 732 struct qlcnic_cmd_args *cmd)
710{ 733{
711 u32 fw_data; 734 struct qlcnic_hardware_context *ahw = adapter->ahw;
712 u8 mac_cmd_rcode; 735 int opcode = LSW(cmd->req.arg[0]);
736 unsigned long max_loops;
713 737
714 fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); 738 max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
715 mac_cmd_rcode = (u8)fw_data;
716 if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
717 mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
718 mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
719 return QLCNIC_RCODE_SUCCESS;
720 return 1;
721}
722 739
723u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) 740 for (; max_loops; max_loops--) {
724{ 741 if (atomic_read(&cmd->rsp_status) ==
725 u32 data; 742 QLC_83XX_MBX_RESPONSE_ARRIVED)
726 struct qlcnic_hardware_context *ahw = adapter->ahw; 743 return;
727 /* wait for mailbox completion */ 744
728 do { 745 udelay(1);
729 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 746 }
730 if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { 747
731 data = QLCNIC_RCODE_TIMEOUT; 748 dev_err(&adapter->pdev->dev,
732 break; 749 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
733 } 750 __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
734 mdelay(1); 751 flush_workqueue(ahw->mailbox->work_q);
735 } while (!data); 752 return;
736 return data;
737} 753}
738 754
739int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, 755int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
740 struct qlcnic_cmd_args *cmd) 756 struct qlcnic_cmd_args *cmd)
741{ 757{
742 int i; 758 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
743 u16 opcode;
744 u8 mbx_err_code;
745 unsigned long flags;
746 struct qlcnic_hardware_context *ahw = adapter->ahw; 759 struct qlcnic_hardware_context *ahw = adapter->ahw;
747 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; 760 int cmd_type, err, opcode;
761 unsigned long timeout;
748 762
749 opcode = LSW(cmd->req.arg[0]); 763 opcode = LSW(cmd->req.arg[0]);
750 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 764 cmd_type = cmd->type;
751 dev_info(&adapter->pdev->dev, 765 err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
752 "Mailbox cmd attempted, 0x%x\n", opcode); 766 if (err) {
753 dev_info(&adapter->pdev->dev, "Mailbox detached\n"); 767 dev_err(&adapter->pdev->dev,
754 return 0; 768 "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
769 __func__, opcode, cmd->type, ahw->pci_func,
770 ahw->op_mode);
771 return err;
755 } 772 }
756 773
757 spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); 774 switch (cmd_type) {
758 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 775 case QLC_83XX_MBX_CMD_WAIT:
759 776 if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
760 if (mbx_val) {
761 QLCDB(adapter, DRV,
762 "Mailbox cmd attempted, 0x%x\n", opcode);
763 QLCDB(adapter, DRV,
764 "Mailbox not available, 0x%x, collect FW dump\n",
765 mbx_val);
766 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
767 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
768 return cmd->rsp.arg[0];
769 }
770
771 /* Fill in mailbox registers */
772 mbx_cmd = cmd->req.arg[0];
773 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
774 for (i = 1; i < cmd->req.num; i++)
775 writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
776
777 /* Signal FW about the impending command */
778 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
779poll:
780 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
781 if (rsp != QLCNIC_RCODE_TIMEOUT) {
782 /* Get the FW response data */
783 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
784 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
785 __qlcnic_83xx_process_aen(adapter);
786 goto poll;
787 }
788 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
789 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
790 opcode = QLCNIC_MBX_RSP(fw_data);
791 qlcnic_83xx_get_mbx_data(adapter, cmd);
792
793 switch (mbx_err_code) {
794 case QLCNIC_MBX_RSP_OK:
795 case QLCNIC_MBX_PORT_RSP_OK:
796 rsp = QLCNIC_RCODE_SUCCESS;
797 break;
798 default:
799 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
800 rsp = qlcnic_83xx_mac_rcode(adapter);
801 if (!rsp)
802 goto out;
803 }
804 dev_err(&adapter->pdev->dev, 777 dev_err(&adapter->pdev->dev,
805 "MBX command 0x%x failed with err:0x%x\n", 778 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
806 opcode, mbx_err_code); 779 __func__, opcode, cmd_type, ahw->pci_func,
807 rsp = mbx_err_code; 780 ahw->op_mode);
808 qlcnic_dump_mbx(adapter, cmd); 781 flush_workqueue(mbx->work_q);
809 break;
810 } 782 }
811 goto out; 783 break;
784 case QLC_83XX_MBX_CMD_NO_WAIT:
785 return 0;
786 case QLC_83XX_MBX_CMD_BUSY_WAIT:
787 qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
788 break;
789 default:
790 dev_err(&adapter->pdev->dev,
791 "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
792 __func__, opcode, cmd_type, ahw->pci_func,
793 ahw->op_mode);
794 qlcnic_83xx_detach_mailbox_work(adapter);
812 } 795 }
813 796
814 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", 797 return cmd->rsp_opcode;
815 QLCNIC_MBX_RSP(mbx_cmd));
816 rsp = QLCNIC_RCODE_TIMEOUT;
817out:
818 /* clear fw mbx control register */
819 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
820 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
821 return rsp;
822} 798}
823 799
824int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 800int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -828,6 +804,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
828 u32 temp; 804 u32 temp;
829 const struct qlcnic_mailbox_metadata *mbx_tbl; 805 const struct qlcnic_mailbox_metadata *mbx_tbl;
830 806
807 memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
831 mbx_tbl = qlcnic_83xx_mbx_tbl; 808 mbx_tbl = qlcnic_83xx_mbx_tbl;
832 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); 809 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
833 for (i = 0; i < size; i++) { 810 for (i = 0; i < size; i++) {
@@ -850,6 +827,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
850 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 827 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
851 temp = adapter->ahw->fw_hal_version << 29; 828 temp = adapter->ahw->fw_hal_version << 29;
852 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); 829 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
830 mbx->cmd_op = type;
853 return 0; 831 return 0;
854 } 832 }
855 } 833 }
@@ -888,9 +866,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
888 866
889void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) 867void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
890{ 868{
869 struct qlcnic_hardware_context *ahw = adapter->ahw;
891 u32 event[QLC_83XX_MBX_AEN_CNT]; 870 u32 event[QLC_83XX_MBX_AEN_CNT];
892 int i; 871 int i;
893 struct qlcnic_hardware_context *ahw = adapter->ahw;
894 872
895 for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) 873 for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
896 event[i] = readl(QLCNIC_MBX_FW(ahw, i)); 874 event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -910,6 +888,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
910 &adapter->idc_aen_work, 0); 888 &adapter->idc_aen_work, 0);
911 break; 889 break;
912 case QLCNIC_MBX_TIME_EXTEND_EVENT: 890 case QLCNIC_MBX_TIME_EXTEND_EVENT:
891 ahw->extend_lb_time = event[1] >> 8 & 0xf;
913 break; 892 break;
914 case QLCNIC_MBX_BC_EVENT: 893 case QLCNIC_MBX_BC_EVENT:
915 qlcnic_sriov_handle_bc_event(adapter, event[1]); 894 qlcnic_sriov_handle_bc_event(adapter, event[1]);
@@ -922,6 +901,9 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
922 dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n", 901 dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
923 QLCNIC_MBX_RSP(event[0])); 902 QLCNIC_MBX_RSP(event[0]));
924 break; 903 break;
904 case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
905 qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
906 break;
925 default: 907 default:
926 dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", 908 dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
927 QLCNIC_MBX_RSP(event[0])); 909 QLCNIC_MBX_RSP(event[0]));
@@ -933,20 +915,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
933 915
934static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) 916static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
935{ 917{
918 u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
936 struct qlcnic_hardware_context *ahw = adapter->ahw; 919 struct qlcnic_hardware_context *ahw = adapter->ahw;
937 u32 resp, event; 920 struct qlcnic_mailbox *mbx = ahw->mailbox;
938 unsigned long flags; 921 unsigned long flags;
939 922
940 spin_lock_irqsave(&ahw->mbx_lock, flags); 923 spin_lock_irqsave(&mbx->aen_lock, flags);
941
942 resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 924 resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
943 if (resp & QLCNIC_SET_OWNER) { 925 if (resp & QLCNIC_SET_OWNER) {
944 event = readl(QLCNIC_MBX_FW(ahw, 0)); 926 event = readl(QLCNIC_MBX_FW(ahw, 0));
945 if (event & QLCNIC_MBX_ASYNC_EVENT) 927 if (event & QLCNIC_MBX_ASYNC_EVENT) {
946 __qlcnic_83xx_process_aen(adapter); 928 __qlcnic_83xx_process_aen(adapter);
929 } else {
930 if (atomic_read(&mbx->rsp_status) != rsp_status)
931 qlcnic_83xx_notify_mbx_response(mbx);
932 }
947 } 933 }
948 934 spin_unlock_irqrestore(&mbx->aen_lock, flags);
949 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
950} 935}
951 936
952static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) 937static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@ -969,6 +954,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
969 return; 954 return;
970 955
971 INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work); 956 INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
957 queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
972} 958}
973 959
974void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter) 960void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@ -1355,8 +1341,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1355 1341
1356 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1342 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1357 /* disable and free mailbox interrupt */ 1343 /* disable and free mailbox interrupt */
1358 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1344 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1345 qlcnic_83xx_enable_mbx_poll(adapter);
1359 qlcnic_83xx_free_mbx_intr(adapter); 1346 qlcnic_83xx_free_mbx_intr(adapter);
1347 }
1360 adapter->ahw->loopback_state = 0; 1348 adapter->ahw->loopback_state = 0;
1361 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1349 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1362 } 1350 }
@@ -1377,6 +1365,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1377 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1365 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1378 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1366 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1379 qlcnic_83xx_disable_intr(adapter, sds_ring); 1367 qlcnic_83xx_disable_intr(adapter, sds_ring);
1368 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1369 qlcnic_83xx_enable_mbx_poll(adapter);
1380 } 1370 }
1381 } 1371 }
1382 1372
@@ -1386,6 +1376,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1386 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1376 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1387 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { 1377 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1388 err = qlcnic_83xx_setup_mbx_intr(adapter); 1378 err = qlcnic_83xx_setup_mbx_intr(adapter);
1379 qlcnic_83xx_disable_mbx_poll(adapter);
1389 if (err) { 1380 if (err) {
1390 dev_err(&adapter->pdev->dev, 1381 dev_err(&adapter->pdev->dev,
1391 "%s: failed to setup mbx interrupt\n", 1382 "%s: failed to setup mbx interrupt\n",
@@ -1402,6 +1393,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1402 1393
1403 if (netif_running(netdev)) 1394 if (netif_running(netdev))
1404 __qlcnic_up(adapter, netdev); 1395 __qlcnic_up(adapter, netdev);
1396
1397 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
1398 !(adapter->flags & QLCNIC_MSIX_ENABLED))
1399 qlcnic_83xx_disable_mbx_poll(adapter);
1405out: 1400out:
1406 netif_device_attach(netdev); 1401 netif_device_attach(netdev);
1407} 1402}
@@ -1619,26 +1614,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
1619 1614
1620int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 1615int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
1621{ 1616{
1622 int err; 1617 struct qlcnic_cmd_args *cmd = NULL;
1623 u32 temp = 0; 1618 u32 temp = 0;
1624 struct qlcnic_cmd_args cmd; 1619 int err;
1625 1620
1626 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1621 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1627 return -EIO; 1622 return -EIO;
1628 1623
1629 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1624 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1625 if (!cmd)
1626 return -ENOMEM;
1627
1628 err = qlcnic_alloc_mbx_args(cmd, adapter,
1630 QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); 1629 QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
1631 if (err) 1630 if (err)
1632 return err; 1631 goto out;
1633 1632
1633 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
1634 qlcnic_83xx_set_interface_id_promisc(adapter, &temp); 1634 qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
1635 cmd.req.arg[1] = (mode ? 1 : 0) | temp; 1635 cmd->req.arg[1] = (mode ? 1 : 0) | temp;
1636 err = qlcnic_issue_cmd(adapter, &cmd); 1636 err = qlcnic_issue_cmd(adapter, cmd);
1637 if (err) 1637 if (!err)
1638 dev_info(&adapter->pdev->dev, 1638 return err;
1639 "Promiscous mode config failed\n");
1640 1639
1641 qlcnic_free_mbx_args(&cmd); 1640 qlcnic_free_mbx_args(cmd);
1641
1642out:
1643 kfree(cmd);
1642 return err; 1644 return err;
1643} 1645}
1644 1646
@@ -1651,7 +1653,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1651 if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { 1653 if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1652 netdev_warn(netdev, 1654 netdev_warn(netdev,
1653 "Loopback test not supported in non privileged mode\n"); 1655 "Loopback test not supported in non privileged mode\n");
1654 return ret; 1656 return -ENOTSUPP;
1655 } 1657 }
1656 1658
1657 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1659 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
@@ -1679,19 +1681,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1679 /* Poll for link up event before running traffic */ 1681 /* Poll for link up event before running traffic */
1680 do { 1682 do {
1681 msleep(QLC_83XX_LB_MSLEEP_COUNT); 1683 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1682 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1683 qlcnic_83xx_process_aen(adapter);
1684 1684
1685 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1685 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1686 netdev_info(netdev, 1686 netdev_info(netdev,
1687 "Device is resetting, free LB test resources\n"); 1687 "Device is resetting, free LB test resources\n");
1688 ret = -EIO; 1688 ret = -EBUSY;
1689 goto free_diag_res; 1689 goto free_diag_res;
1690 } 1690 }
1691 if (loop++ > QLC_83XX_LB_WAIT_COUNT) { 1691 if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
1692 netdev_info(netdev, 1692 netdev_info(netdev,
1693 "Firmware didn't sent link up event to loopback request\n"); 1693 "Firmware didn't sent link up event to loopback request\n");
1694 ret = -QLCNIC_FW_NOT_RESPOND; 1694 ret = -ETIMEDOUT;
1695 qlcnic_83xx_clear_lb_mode(adapter, mode); 1695 qlcnic_83xx_clear_lb_mode(adapter, mode);
1696 goto free_diag_res; 1696 goto free_diag_res;
1697 } 1697 }
@@ -1700,7 +1700,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1700 /* Make sure carrier is off and queue is stopped during loopback */ 1700 /* Make sure carrier is off and queue is stopped during loopback */
1701 if (netif_running(netdev)) { 1701 if (netif_running(netdev)) {
1702 netif_carrier_off(netdev); 1702 netif_carrier_off(netdev);
1703 netif_stop_queue(netdev); 1703 netif_tx_stop_all_queues(netdev);
1704 } 1704 }
1705 1705
1706 ret = qlcnic_do_lb_test(adapter, mode); 1706 ret = qlcnic_do_lb_test(adapter, mode);
@@ -1716,18 +1716,42 @@ fail_diag_alloc:
1716 return ret; 1716 return ret;
1717} 1717}
1718 1718
1719static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
1720 u32 *max_wait_count)
1721{
1722 struct qlcnic_hardware_context *ahw = adapter->ahw;
1723 int temp;
1724
1725 netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
1726 ahw->extend_lb_time);
1727 temp = ahw->extend_lb_time * 1000;
1728 *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
1729 ahw->extend_lb_time = 0;
1730}
1731
1719int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) 1732int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1720{ 1733{
1721 struct qlcnic_hardware_context *ahw = adapter->ahw; 1734 struct qlcnic_hardware_context *ahw = adapter->ahw;
1722 struct net_device *netdev = adapter->netdev; 1735 struct net_device *netdev = adapter->netdev;
1736 u32 config, max_wait_count;
1723 int status = 0, loop = 0; 1737 int status = 0, loop = 0;
1724 u32 config;
1725 1738
1739 ahw->extend_lb_time = 0;
1740 max_wait_count = QLC_83XX_LB_WAIT_COUNT;
1726 status = qlcnic_83xx_get_port_config(adapter); 1741 status = qlcnic_83xx_get_port_config(adapter);
1727 if (status) 1742 if (status)
1728 return status; 1743 return status;
1729 1744
1730 config = ahw->port_config; 1745 config = ahw->port_config;
1746
1747 /* Check if port is already in loopback mode */
1748 if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
1749 (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
1750 netdev_err(netdev,
1751 "Port already in Loopback mode.\n");
1752 return -EINPROGRESS;
1753 }
1754
1731 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1755 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1732 1756
1733 if (mode == QLCNIC_ILB_MODE) 1757 if (mode == QLCNIC_ILB_MODE)
@@ -1748,21 +1772,24 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1748 /* Wait for Link and IDC Completion AEN */ 1772 /* Wait for Link and IDC Completion AEN */
1749 do { 1773 do {
1750 msleep(QLC_83XX_LB_MSLEEP_COUNT); 1774 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1751 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1752 qlcnic_83xx_process_aen(adapter);
1753 1775
1754 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1776 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1755 netdev_info(netdev, 1777 netdev_info(netdev,
1756 "Device is resetting, free LB test resources\n"); 1778 "Device is resetting, free LB test resources\n");
1757 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1779 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1758 return -EIO; 1780 return -EBUSY;
1759 } 1781 }
1760 if (loop++ > QLC_83XX_LB_WAIT_COUNT) { 1782
1761 netdev_err(netdev, 1783 if (ahw->extend_lb_time)
1762 "Did not receive IDC completion AEN\n"); 1784 qlcnic_extend_lb_idc_cmpltn_wait(adapter,
1785 &max_wait_count);
1786
1787 if (loop++ > max_wait_count) {
1788 netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
1789 __func__);
1763 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1790 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1764 qlcnic_83xx_clear_lb_mode(adapter, mode); 1791 qlcnic_83xx_clear_lb_mode(adapter, mode);
1765 return -EIO; 1792 return -ETIMEDOUT;
1766 } 1793 }
1767 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); 1794 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
1768 1795
@@ -1774,10 +1801,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1774int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) 1801int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1775{ 1802{
1776 struct qlcnic_hardware_context *ahw = adapter->ahw; 1803 struct qlcnic_hardware_context *ahw = adapter->ahw;
1804 u32 config = ahw->port_config, max_wait_count;
1777 struct net_device *netdev = adapter->netdev; 1805 struct net_device *netdev = adapter->netdev;
1778 int status = 0, loop = 0; 1806 int status = 0, loop = 0;
1779 u32 config = ahw->port_config;
1780 1807
1808 ahw->extend_lb_time = 0;
1809 max_wait_count = QLC_83XX_LB_WAIT_COUNT;
1781 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1810 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1782 if (mode == QLCNIC_ILB_MODE) 1811 if (mode == QLCNIC_ILB_MODE)
1783 ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS; 1812 ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
@@ -1797,21 +1826,23 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1797 /* Wait for Link and IDC Completion AEN */ 1826 /* Wait for Link and IDC Completion AEN */
1798 do { 1827 do {
1799 msleep(QLC_83XX_LB_MSLEEP_COUNT); 1828 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1800 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1801 qlcnic_83xx_process_aen(adapter);
1802 1829
1803 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1830 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1804 netdev_info(netdev, 1831 netdev_info(netdev,
1805 "Device is resetting, free LB test resources\n"); 1832 "Device is resetting, free LB test resources\n");
1806 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1833 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1807 return -EIO; 1834 return -EBUSY;
1808 } 1835 }
1809 1836
1810 if (loop++ > QLC_83XX_LB_WAIT_COUNT) { 1837 if (ahw->extend_lb_time)
1811 netdev_err(netdev, 1838 qlcnic_extend_lb_idc_cmpltn_wait(adapter,
1812 "Did not receive IDC completion AEN\n"); 1839 &max_wait_count);
1840
1841 if (loop++ > max_wait_count) {
1842 netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
1843 __func__);
1813 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1844 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1814 return -EIO; 1845 return -ETIMEDOUT;
1815 } 1846 }
1816 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); 1847 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
1817 1848
@@ -1950,25 +1981,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
1950int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 1981int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1951 u16 vlan_id, u8 op) 1982 u16 vlan_id, u8 op)
1952{ 1983{
1953 int err; 1984 struct qlcnic_cmd_args *cmd = NULL;
1954 u32 *buf, temp = 0;
1955 struct qlcnic_cmd_args cmd;
1956 struct qlcnic_macvlan_mbx mv; 1985 struct qlcnic_macvlan_mbx mv;
1986 u32 *buf, temp = 0;
1987 int err;
1957 1988
1958 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1989 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1959 return -EIO; 1990 return -EIO;
1960 1991
1961 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); 1992 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1993 if (!cmd)
1994 return -ENOMEM;
1995
1996 err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
1962 if (err) 1997 if (err)
1963 return err; 1998 goto out;
1999
2000 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
1964 2001
1965 if (vlan_id) 2002 if (vlan_id)
1966 op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? 2003 op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
1967 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; 2004 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
1968 2005
1969 cmd.req.arg[1] = op | (1 << 8); 2006 cmd->req.arg[1] = op | (1 << 8);
1970 qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); 2007 qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
1971 cmd.req.arg[1] |= temp; 2008 cmd->req.arg[1] |= temp;
1972 mv.vlan = vlan_id; 2009 mv.vlan = vlan_id;
1973 mv.mac_addr0 = addr[0]; 2010 mv.mac_addr0 = addr[0];
1974 mv.mac_addr1 = addr[1]; 2011 mv.mac_addr1 = addr[1];
@@ -1976,14 +2013,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1976 mv.mac_addr3 = addr[3]; 2013 mv.mac_addr3 = addr[3];
1977 mv.mac_addr4 = addr[4]; 2014 mv.mac_addr4 = addr[4];
1978 mv.mac_addr5 = addr[5]; 2015 mv.mac_addr5 = addr[5];
1979 buf = &cmd.req.arg[2]; 2016 buf = &cmd->req.arg[2];
1980 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 2017 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
1981 err = qlcnic_issue_cmd(adapter, &cmd); 2018 err = qlcnic_issue_cmd(adapter, cmd);
1982 if (err) 2019 if (!err)
1983 dev_err(&adapter->pdev->dev, 2020 return err;
1984 "MAC-VLAN %s to CAM failed, err=%d.\n", 2021
1985 ((op == 1) ? "add " : "delete "), err); 2022 qlcnic_free_mbx_args(cmd);
1986 qlcnic_free_mbx_args(&cmd); 2023out:
2024 kfree(cmd);
1987 return err; 2025 return err;
1988} 2026}
1989 2027
@@ -2008,12 +2046,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
2008 cmd->req.arg[1] = type; 2046 cmd->req.arg[1] = type;
2009} 2047}
2010 2048
2011int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 2049int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
2050 u8 function)
2012{ 2051{
2013 int err, i; 2052 int err, i;
2014 struct qlcnic_cmd_args cmd; 2053 struct qlcnic_cmd_args cmd;
2015 u32 mac_low, mac_high; 2054 u32 mac_low, mac_high;
2016 2055
2056 function = 0;
2017 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 2057 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
2018 if (err) 2058 if (err)
2019 return err; 2059 return err;
@@ -2099,10 +2139,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2099irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) 2139irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
2100{ 2140{
2101 struct qlcnic_adapter *adapter = data; 2141 struct qlcnic_adapter *adapter = data;
2102 unsigned long flags; 2142 struct qlcnic_mailbox *mbx;
2103 u32 mask, resp, event; 2143 u32 mask, resp, event;
2144 unsigned long flags;
2104 2145
2105 spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); 2146 mbx = adapter->ahw->mailbox;
2147 spin_lock_irqsave(&mbx->aen_lock, flags);
2106 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); 2148 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
2107 if (!(resp & QLCNIC_SET_OWNER)) 2149 if (!(resp & QLCNIC_SET_OWNER))
2108 goto out; 2150 goto out;
@@ -2110,11 +2152,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
2110 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); 2152 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
2111 if (event & QLCNIC_MBX_ASYNC_EVENT) 2153 if (event & QLCNIC_MBX_ASYNC_EVENT)
2112 __qlcnic_83xx_process_aen(adapter); 2154 __qlcnic_83xx_process_aen(adapter);
2155 else
2156 qlcnic_83xx_notify_mbx_response(mbx);
2157
2113out: 2158out:
2114 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); 2159 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
2115 writel(0, adapter->ahw->pci_base0 + mask); 2160 writel(0, adapter->ahw->pci_base0 + mask);
2116 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); 2161 spin_unlock_irqrestore(&mbx->aen_lock, flags);
2117
2118 return IRQ_HANDLED; 2162 return IRQ_HANDLED;
2119} 2163}
2120 2164
@@ -2287,7 +2331,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2287 pci_info->tx_max_bw, pci_info->mac); 2331 pci_info->tx_max_bw, pci_info->mac);
2288 } 2332 }
2289 if (ahw->op_mode == QLCNIC_MGMT_FUNC) 2333 if (ahw->op_mode == QLCNIC_MGMT_FUNC)
2290 dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n", 2334 dev_info(dev, "Max functions = %d, active functions = %d\n",
2291 ahw->max_pci_func, ahw->act_pci_func); 2335 ahw->max_pci_func, ahw->act_pci_func);
2292 2336
2293 } else { 2337 } else {
@@ -3477,3 +3521,360 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
3477 idc->delay); 3521 idc->delay);
3478 return err; 3522 return err;
3479} 3523}
3524
3525void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
3526{
3527 INIT_COMPLETION(mbx->completion);
3528 set_bit(QLC_83XX_MBX_READY, &mbx->status);
3529}
3530
3531void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
3532{
3533 destroy_workqueue(mbx->work_q);
3534 kfree(mbx);
3535}
3536
3537static inline void
3538qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
3539 struct qlcnic_cmd_args *cmd)
3540{
3541 atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
3542
3543 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
3544 qlcnic_free_mbx_args(cmd);
3545 kfree(cmd);
3546 return;
3547 }
3548 complete(&cmd->completion);
3549}
3550
3551static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
3552{
3553 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3554 struct list_head *head = &mbx->cmd_q;
3555 struct qlcnic_cmd_args *cmd = NULL;
3556
3557 spin_lock(&mbx->queue_lock);
3558
3559 while (!list_empty(head)) {
3560 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
3561 dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
3562 __func__, cmd->cmd_op);
3563 list_del(&cmd->list);
3564 mbx->num_cmds--;
3565 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
3566 }
3567
3568 spin_unlock(&mbx->queue_lock);
3569}
3570
3571static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
3572{
3573 struct qlcnic_hardware_context *ahw = adapter->ahw;
3574 struct qlcnic_mailbox *mbx = ahw->mailbox;
3575 u32 host_mbx_ctrl;
3576
3577 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
3578 return -EBUSY;
3579
3580 host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
3581 if (host_mbx_ctrl) {
3582 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3583 ahw->idc.collect_dump = 1;
3584 return -EIO;
3585 }
3586
3587 return 0;
3588}
3589
3590static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
3591 u8 issue_cmd)
3592{
3593 if (issue_cmd)
3594 QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
3595 else
3596 QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
3597}
3598
3599static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
3600 struct qlcnic_cmd_args *cmd)
3601{
3602 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3603
3604 spin_lock(&mbx->queue_lock);
3605
3606 list_del(&cmd->list);
3607 mbx->num_cmds--;
3608
3609 spin_unlock(&mbx->queue_lock);
3610
3611 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
3612}
3613
3614static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
3615 struct qlcnic_cmd_args *cmd)
3616{
3617 u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
3618 struct qlcnic_hardware_context *ahw = adapter->ahw;
3619 int i, j;
3620
3621 if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
3622 mbx_cmd = cmd->req.arg[0];
3623 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
3624 for (i = 1; i < cmd->req.num; i++)
3625 writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
3626 } else {
3627 fw_hal_version = ahw->fw_hal_version;
3628 hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
3629 total_size = cmd->pay_size + hdr_size;
3630 tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
3631 mbx_cmd = tmp | fw_hal_version << 29;
3632 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
3633
3634 /* Back channel specific operations bits */
3635 mbx_cmd = 0x1 | 1 << 4;
3636
3637 if (qlcnic_sriov_pf_check(adapter))
3638 mbx_cmd |= cmd->func_num << 5;
3639
3640 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
3641
3642 for (i = 2, j = 0; j < hdr_size; i++, j++)
3643 writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
3644 for (j = 0; j < cmd->pay_size; j++, i++)
3645 writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
3646 }
3647}
3648
3649void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
3650{
3651 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3652
3653 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3654 complete(&mbx->completion);
3655 cancel_work_sync(&mbx->work);
3656 flush_workqueue(mbx->work_q);
3657 qlcnic_83xx_flush_mbx_queue(adapter);
3658}
3659
3660static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
3661 struct qlcnic_cmd_args *cmd,
3662 unsigned long *timeout)
3663{
3664 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3665
3666 if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
3667 atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
3668 init_completion(&cmd->completion);
3669 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
3670
3671 spin_lock(&mbx->queue_lock);
3672
3673 list_add_tail(&cmd->list, &mbx->cmd_q);
3674 mbx->num_cmds++;
3675 cmd->total_cmds = mbx->num_cmds;
3676 *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
3677 queue_work(mbx->work_q, &mbx->work);
3678
3679 spin_unlock(&mbx->queue_lock);
3680
3681 return 0;
3682 }
3683
3684 return -EBUSY;
3685}
3686
3687static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
3688 struct qlcnic_cmd_args *cmd)
3689{
3690 u8 mac_cmd_rcode;
3691 u32 fw_data;
3692
3693 if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
3694 fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
3695 mac_cmd_rcode = (u8)fw_data;
3696 if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
3697 mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
3698 mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
3699 cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
3700 return QLCNIC_RCODE_SUCCESS;
3701 }
3702 }
3703
3704 return -EINVAL;
3705}
3706
3707static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
3708 struct qlcnic_cmd_args *cmd)
3709{
3710 struct qlcnic_hardware_context *ahw = adapter->ahw;
3711 struct device *dev = &adapter->pdev->dev;
3712 u8 mbx_err_code;
3713 u32 fw_data;
3714
3715 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
3716 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
3717 qlcnic_83xx_get_mbx_data(adapter, cmd);
3718
3719 switch (mbx_err_code) {
3720 case QLCNIC_MBX_RSP_OK:
3721 case QLCNIC_MBX_PORT_RSP_OK:
3722 cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
3723 break;
3724 default:
3725 if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
3726 break;
3727
3728 dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
3729 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3730 ahw->op_mode, mbx_err_code);
3731 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
3732 qlcnic_dump_mbx(adapter, cmd);
3733 }
3734
3735 return;
3736}
3737
3738static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3739{
3740 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
3741 work);
3742 struct qlcnic_adapter *adapter = mbx->adapter;
3743 struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
3744 struct device *dev = &adapter->pdev->dev;
3745 atomic_t *rsp_status = &mbx->rsp_status;
3746 struct list_head *head = &mbx->cmd_q;
3747 struct qlcnic_hardware_context *ahw;
3748 struct qlcnic_cmd_args *cmd = NULL;
3749
3750 ahw = adapter->ahw;
3751
3752 while (true) {
3753 if (qlcnic_83xx_check_mbx_status(adapter)) {
3754 qlcnic_83xx_flush_mbx_queue(adapter);
3755 return;
3756 }
3757
3758 atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
3759
3760 spin_lock(&mbx->queue_lock);
3761
3762 if (list_empty(head)) {
3763 spin_unlock(&mbx->queue_lock);
3764 return;
3765 }
3766 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
3767
3768 spin_unlock(&mbx->queue_lock);
3769
3770 mbx_ops->encode_cmd(adapter, cmd);
3771 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
3772
3773 if (wait_for_completion_timeout(&mbx->completion,
3774 QLC_83XX_MBX_TIMEOUT)) {
3775 mbx_ops->decode_resp(adapter, cmd);
3776 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
3777 } else {
3778 dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
3779 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3780 ahw->op_mode);
3781 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3782 qlcnic_dump_mbx(adapter, cmd);
3783 qlcnic_83xx_idc_request_reset(adapter,
3784 QLCNIC_FORCE_FW_DUMP_KEY);
3785 cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
3786 }
3787 mbx_ops->dequeue_cmd(adapter, cmd);
3788 }
3789}
3790
3791static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
3792 .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
3793 .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
3794 .decode_resp = qlcnic_83xx_decode_mbx_rsp,
3795 .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
3796 .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
3797};
3798
3799int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
3800{
3801 struct qlcnic_hardware_context *ahw = adapter->ahw;
3802 struct qlcnic_mailbox *mbx;
3803
3804 ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
3805 if (!ahw->mailbox)
3806 return -ENOMEM;
3807
3808 mbx = ahw->mailbox;
3809 mbx->ops = &qlcnic_83xx_mbx_ops;
3810 mbx->adapter = adapter;
3811
3812 spin_lock_init(&mbx->queue_lock);
3813 spin_lock_init(&mbx->aen_lock);
3814 INIT_LIST_HEAD(&mbx->cmd_q);
3815 init_completion(&mbx->completion);
3816
3817 mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
3818 if (mbx->work_q == NULL) {
3819 kfree(mbx);
3820 return -ENOMEM;
3821 }
3822
3823 INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
3824 set_bit(QLC_83XX_MBX_READY, &mbx->status);
3825 return 0;
3826}
3827
3828pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
3829 pci_channel_state_t state)
3830{
3831 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3832
3833 if (state == pci_channel_io_perm_failure)
3834 return PCI_ERS_RESULT_DISCONNECT;
3835
3836 if (state == pci_channel_io_normal)
3837 return PCI_ERS_RESULT_RECOVERED;
3838
3839 set_bit(__QLCNIC_AER, &adapter->state);
3840 set_bit(__QLCNIC_RESETTING, &adapter->state);
3841
3842 qlcnic_83xx_aer_stop_poll_work(adapter);
3843
3844 pci_save_state(pdev);
3845 pci_disable_device(pdev);
3846
3847 return PCI_ERS_RESULT_NEED_RESET;
3848}
3849
3850pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
3851{
3852 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3853 int err = 0;
3854
3855 pdev->error_state = pci_channel_io_normal;
3856 err = pci_enable_device(pdev);
3857 if (err)
3858 goto disconnect;
3859
3860 pci_set_power_state(pdev, PCI_D0);
3861 pci_set_master(pdev);
3862 pci_restore_state(pdev);
3863
3864 err = qlcnic_83xx_aer_reset(adapter);
3865 if (err == 0)
3866 return PCI_ERS_RESULT_RECOVERED;
3867disconnect:
3868 clear_bit(__QLCNIC_AER, &adapter->state);
3869 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3870 return PCI_ERS_RESULT_DISCONNECT;
3871}
3872
3873void qlcnic_83xx_io_resume(struct pci_dev *pdev)
3874{
3875 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3876
3877 pci_cleanup_aer_uncorrect_error_status(pdev);
3878 if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3879 qlcnic_83xx_aer_start_poll_work(adapter);
3880}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 272f56a2e14b..533e150503af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -84,11 +84,20 @@
84/* Firmware image definitions */ 84/* Firmware image definitions */
85#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 85#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
86#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" 86#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
87#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
87#define QLC_83XX_BOOT_FROM_FLASH 0 88#define QLC_83XX_BOOT_FROM_FLASH 0
88#define QLC_83XX_BOOT_FROM_FILE 0x12345678 89#define QLC_83XX_BOOT_FROM_FILE 0x12345678
89 90
91#define QLC_FW_FILE_NAME_LEN 20
90#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 92#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
91 93
94#define QLC_83XX_MBX_POST_BC_OP 0x1
95#define QLC_83XX_MBX_COMPLETION 0x0
96#define QLC_83XX_MBX_REQUEST 0x1
97
98#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
99#define QLC_83XX_MBX_CMD_LOOP 5000000
100
92/* status descriptor mailbox data 101/* status descriptor mailbox data
93 * @phy_addr_{low|high}: physical address of buffer 102 * @phy_addr_{low|high}: physical address of buffer
94 * @sds_ring_size: buffer size 103 * @sds_ring_size: buffer size
@@ -265,11 +274,7 @@ struct qlcnic_macvlan_mbx {
265 274
266struct qlc_83xx_fw_info { 275struct qlc_83xx_fw_info {
267 const struct firmware *fw; 276 const struct firmware *fw;
268 u16 major_fw_version; 277 char fw_file_name[QLC_FW_FILE_NAME_LEN];
269 u8 minor_fw_version;
270 u8 sub_fw_version;
271 u8 fw_build_num;
272 u8 load_from_file;
273}; 278};
274 279
275struct qlc_83xx_reset { 280struct qlc_83xx_reset {
@@ -288,6 +293,7 @@ struct qlc_83xx_reset {
288 293
289#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1 294#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
290#define QLC_83XX_IDC_GRACEFULL_RESET 0x2 295#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
296#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
291#define QLC_83XX_IDC_TIMESTAMP 0 297#define QLC_83XX_IDC_TIMESTAMP 0
292#define QLC_83XX_IDC_DURATION 1 298#define QLC_83XX_IDC_DURATION 1
293#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30 299#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
@@ -397,6 +403,7 @@ enum qlcnic_83xx_states {
397#define QLC_83XX_MAX_MC_COUNT 38 403#define QLC_83XX_MAX_MC_COUNT 38
398#define QLC_83XX_MAX_UC_COUNT 4096 404#define QLC_83XX_MAX_UC_COUNT 4096
399 405
406#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
400#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) 407#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
401#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) 408#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
402#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) 409#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
@@ -404,6 +411,7 @@ enum qlcnic_83xx_states {
404#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) 411#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
405#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) 412#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
406#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 413#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
414#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
407#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF 415#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
408#define QLC_83XX_DEFAULT_MODE 0x0 416#define QLC_83XX_DEFAULT_MODE 0x0
409#define QLC_83XX_SRIOV_MODE 0x1 417#define QLC_83XX_SRIOV_MODE 0x1
@@ -449,6 +457,20 @@ enum qlcnic_83xx_states {
449#define QLC_83xx_FLASH_MAX_WAIT_USEC 100 457#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
450#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000 458#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
451 459
460enum qlc_83xx_mbx_cmd_type {
461 QLC_83XX_MBX_CMD_WAIT = 0,
462 QLC_83XX_MBX_CMD_NO_WAIT,
463 QLC_83XX_MBX_CMD_BUSY_WAIT,
464};
465
466enum qlc_83xx_mbx_response_states {
467 QLC_83XX_MBX_RESPONSE_WAIT = 0,
468 QLC_83XX_MBX_RESPONSE_ARRIVED,
469};
470
471#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
472#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
473
452/* Additional registers in 83xx */ 474/* Additional registers in 83xx */
453enum qlc_83xx_ext_regs { 475enum qlc_83xx_ext_regs {
454 QLCNIC_GLOBAL_RESET = 0, 476 QLCNIC_GLOBAL_RESET = 0,
@@ -498,8 +520,8 @@ enum qlc_83xx_ext_regs {
498 520
499/* 83xx funcitons */ 521/* 83xx funcitons */
500int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); 522int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
501int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); 523int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
502int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8); 524int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
503void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); 525void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
504int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); 526int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
505void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); 527void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -540,7 +562,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
540void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); 562void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
541int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool); 563int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
542int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); 564int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
543int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *); 565int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
544void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, 566void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
545 struct qlcnic_cmd_args *); 567 struct qlcnic_cmd_args *);
546int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *, 568int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
@@ -551,7 +573,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
551void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *); 573void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
552irqreturn_t qlcnic_83xx_handle_aen(int, void *); 574irqreturn_t qlcnic_83xx_handle_aen(int, void *);
553int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); 575int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
554void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *); 576void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
555void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *); 577void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
556irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); 578irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
557irqreturn_t qlcnic_83xx_intr(int, void *); 579irqreturn_t qlcnic_83xx_intr(int, void *);
@@ -604,6 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
604int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, 626int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
605 struct qlcnic_info *, u8); 627 struct qlcnic_info *, u8);
606int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); 628int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
629int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
607 630
608void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); 631void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
609void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); 632void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
@@ -623,8 +646,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
623int qlcnic_83xx_flash_test(struct qlcnic_adapter *); 646int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
624int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); 647int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
625int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); 648int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
626u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
627u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
628void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); 649void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
629void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); 650void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
630void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); 651void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
@@ -634,4 +655,11 @@ int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
634int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *); 655int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
635int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *); 656int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
636int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); 657int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
658void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
659int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
660void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
661pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
662 pci_channel_state_t);
663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
664void qlcnic_83xx_io_resume(struct pci_dev *);
637#endif 665#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 345d987aede4..f09e787af0b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
399 struct net_device *netdev = adapter->netdev; 399 struct net_device *netdev = adapter->netdev;
400 400
401 netif_device_detach(netdev); 401 netif_device_detach(netdev);
402 qlcnic_83xx_detach_mailbox_work(adapter);
402 403
403 /* Disable mailbox interrupt */ 404 /* Disable mailbox interrupt */
404 qlcnic_83xx_disable_mbx_intr(adapter); 405 qlcnic_83xx_disable_mbx_intr(adapter);
@@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
610{ 611{
611 int err; 612 int err;
612 613
614 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
615 qlcnic_83xx_enable_mbx_interrupt(adapter);
616
613 /* register for NIC IDC AEN Events */ 617 /* register for NIC IDC AEN Events */
614 qlcnic_83xx_register_nic_idc_func(adapter, 1); 618 qlcnic_83xx_register_nic_idc_func(adapter, 1);
615 619
@@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
617 if (err) 621 if (err)
618 return err; 622 return err;
619 623
620 qlcnic_83xx_enable_mbx_intrpt(adapter); 624 qlcnic_83xx_enable_mbx_interrupt(adapter);
621 625
622 if (qlcnic_83xx_configure_opmode(adapter)) { 626 if (qlcnic_83xx_configure_opmode(adapter)) {
623 qlcnic_83xx_idc_enter_failed_state(adapter, 1); 627 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -631,6 +635,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
631 635
632 if (adapter->portnum == 0) 636 if (adapter->portnum == 0)
633 qlcnic_set_drv_version(adapter); 637 qlcnic_set_drv_version(adapter);
638
639 qlcnic_dcb_get_info(adapter);
634 qlcnic_83xx_idc_attach_driver(adapter); 640 qlcnic_83xx_idc_attach_driver(adapter);
635 641
636 return 0; 642 return 0;
@@ -641,7 +647,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
641 struct qlcnic_hardware_context *ahw = adapter->ahw; 647 struct qlcnic_hardware_context *ahw = adapter->ahw;
642 648
643 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); 649 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
644 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
645 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 650 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
646 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 651 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
647 652
@@ -792,7 +797,6 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
792 ret = qlcnic_83xx_idc_restart_hw(adapter, 1); 797 ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
793 } else { 798 } else {
794 ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); 799 ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
795 return ret;
796 } 800 }
797 801
798 return ret; 802 return ret;
@@ -811,9 +815,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
811 **/ 815 **/
812static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) 816static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
813{ 817{
814 u32 val;
815 struct qlcnic_hardware_context *ahw = adapter->ahw; 818 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox;
816 int ret = 0; 820 int ret = 0;
821 u32 val;
817 822
818 /* Perform NIC configuration based ready state entry actions */ 823 /* Perform NIC configuration based ready state entry actions */
819 if (ahw->idc.state_entry(adapter)) 824 if (ahw->idc.state_entry(adapter))
@@ -825,7 +830,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
825 dev_err(&adapter->pdev->dev, 830 dev_err(&adapter->pdev->dev,
826 "Error: device temperature %d above limits\n", 831 "Error: device temperature %d above limits\n",
827 adapter->ahw->temp); 832 adapter->ahw->temp);
828 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 833 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
829 set_bit(__QLCNIC_RESETTING, &adapter->state); 834 set_bit(__QLCNIC_RESETTING, &adapter->state);
830 qlcnic_83xx_idc_detach_driver(adapter); 835 qlcnic_83xx_idc_detach_driver(adapter);
831 qlcnic_83xx_idc_enter_failed_state(adapter, 1); 836 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -838,7 +843,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
838 if (ret) { 843 if (ret) {
839 adapter->flags |= QLCNIC_FW_HANG; 844 adapter->flags |= QLCNIC_FW_HANG;
840 if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { 845 if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
841 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 846 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
842 set_bit(__QLCNIC_RESETTING, &adapter->state); 847 set_bit(__QLCNIC_RESETTING, &adapter->state);
843 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 848 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
844 } 849 }
@@ -846,6 +851,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
846 } 851 }
847 852
848 if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { 853 if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
854 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
855
849 /* Move to need reset state and prepare for reset */ 856 /* Move to need reset state and prepare for reset */
850 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 857 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
851 return ret; 858 return ret;
@@ -883,12 +890,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
883 **/ 890 **/
884static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) 891static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
885{ 892{
893 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
886 int ret = 0; 894 int ret = 0;
887 895
888 if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { 896 if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
889 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 897 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
890 set_bit(__QLCNIC_RESETTING, &adapter->state); 898 set_bit(__QLCNIC_RESETTING, &adapter->state);
891 clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 899 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
892 if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) 900 if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
893 qlcnic_83xx_disable_vnic_mode(adapter, 1); 901 qlcnic_83xx_disable_vnic_mode(adapter, 1);
894 902
@@ -1080,7 +1088,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
1080 adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; 1088 adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
1081 1089
1082 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1090 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1083 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
1084 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 1091 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1085 1092
1086 /* Check if reset recovery is disabled */ 1093 /* Check if reset recovery is disabled */
@@ -1191,6 +1198,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
1191{ 1198{
1192 u32 val; 1199 u32 val;
1193 1200
1201 if (qlcnic_sriov_vf_check(adapter))
1202 return;
1203
1194 if (qlcnic_83xx_lock_driver(adapter)) { 1204 if (qlcnic_83xx_lock_driver(adapter)) {
1195 dev_err(&adapter->pdev->dev, 1205 dev_err(&adapter->pdev->dev,
1196 "%s:failed, please retry\n", __func__); 1206 "%s:failed, please retry\n", __func__);
@@ -1257,31 +1267,33 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1257 1267
1258static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) 1268static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1259{ 1269{
1270 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
1271 const struct firmware *fw = fw_info->fw;
1260 u32 dest, *p_cache; 1272 u32 dest, *p_cache;
1261 u64 addr; 1273 int i, ret = -EIO;
1262 u8 data[16]; 1274 u8 data[16];
1263 size_t size; 1275 size_t size;
1264 int i, ret = -EIO; 1276 u64 addr;
1265 1277
1266 dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); 1278 dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
1267 size = (adapter->ahw->fw_info.fw->size & ~0xF); 1279 size = (fw->size & ~0xF);
1268 p_cache = (u32 *)adapter->ahw->fw_info.fw->data; 1280 p_cache = (u32 *)fw->data;
1269 addr = (u64)dest; 1281 addr = (u64)dest;
1270 1282
1271 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1283 ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
1272 (u32 *)p_cache, size / 16); 1284 (u32 *)p_cache, size / 16);
1273 if (ret) { 1285 if (ret) {
1274 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1286 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1275 release_firmware(adapter->ahw->fw_info.fw); 1287 release_firmware(fw);
1276 adapter->ahw->fw_info.fw = NULL; 1288 fw_info->fw = NULL;
1277 return -EIO; 1289 return -EIO;
1278 } 1290 }
1279 1291
1280 /* alignment check */ 1292 /* alignment check */
1281 if (adapter->ahw->fw_info.fw->size & 0xF) { 1293 if (fw->size & 0xF) {
1282 addr = dest + size; 1294 addr = dest + size;
1283 for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++) 1295 for (i = 0; i < (fw->size & 0xF); i++)
1284 data[i] = adapter->ahw->fw_info.fw->data[size + i]; 1296 data[i] = fw->data[size + i];
1285 for (; i < 16; i++) 1297 for (; i < 16; i++)
1286 data[i] = 0; 1298 data[i] = 0;
1287 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1299 ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
@@ -1289,13 +1301,13 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1289 if (ret) { 1301 if (ret) {
1290 dev_err(&adapter->pdev->dev, 1302 dev_err(&adapter->pdev->dev,
1291 "MS memory write failed\n"); 1303 "MS memory write failed\n");
1292 release_firmware(adapter->ahw->fw_info.fw); 1304 release_firmware(fw);
1293 adapter->ahw->fw_info.fw = NULL; 1305 fw_info->fw = NULL;
1294 return -EIO; 1306 return -EIO;
1295 } 1307 }
1296 } 1308 }
1297 release_firmware(adapter->ahw->fw_info.fw); 1309 release_firmware(fw);
1298 adapter->ahw->fw_info.fw = NULL; 1310 fw_info->fw = NULL;
1299 1311
1300 return 0; 1312 return 0;
1301} 1313}
@@ -1941,10 +1953,11 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
1941 1953
1942static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) 1954static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
1943{ 1955{
1956 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
1944 int err = -EIO; 1957 int err = -EIO;
1945 1958
1946 if (request_firmware(&adapter->ahw->fw_info.fw, 1959 if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
1947 QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) { 1960 &(adapter->pdev->dev))) {
1948 dev_err(&adapter->pdev->dev, 1961 dev_err(&adapter->pdev->dev,
1949 "No file FW image, loading flash FW image.\n"); 1962 "No file FW image, loading flash FW image.\n");
1950 QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, 1963 QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
@@ -1990,36 +2003,6 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
1990 return 0; 2003 return 0;
1991} 2004}
1992 2005
1993/**
1994* qlcnic_83xx_config_default_opmode
1995*
1996* @adapter: adapter structure
1997*
1998* Configure default driver operating mode
1999*
2000* Returns: Error code or Success(0)
2001* */
2002int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
2003{
2004 u32 op_mode;
2005 struct qlcnic_hardware_context *ahw = adapter->ahw;
2006
2007 qlcnic_get_func_no(adapter);
2008 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
2009
2010 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
2011 op_mode = QLC_83XX_DEFAULT_OPMODE;
2012
2013 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
2014 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2015 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2016 } else {
2017 return -EIO;
2018 }
2019
2020 return 0;
2021}
2022
2023int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) 2006int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2024{ 2007{
2025 int err; 2008 int err;
@@ -2039,26 +2022,26 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2039 ahw->max_mac_filters = nic_info.max_mac_filters; 2022 ahw->max_mac_filters = nic_info.max_mac_filters;
2040 ahw->max_mtu = nic_info.max_mtu; 2023 ahw->max_mtu = nic_info.max_mtu;
2041 2024
2042 /* VNIC mode is detected by BIT_23 in capabilities. This bit is also 2025 /* eSwitch capability indicates vNIC mode.
2043 * set in case device is SRIOV capable. VNIC and SRIOV are mutually 2026 * vNIC and SRIOV are mutually exclusive operational modes.
2044 * exclusive. So in case of sriov capable device load driver in 2027 * If SR-IOV capability is detected, SR-IOV physical function
2045 * default mode 2028 * will get initialized in default mode.
2029 * SR-IOV virtual function initialization follows a
2030 * different code path and opmode.
2031 * SRIOV mode has precedence over vNIC mode.
2046 */ 2032 */
2047 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) { 2033 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
2048 ahw->nic_mode = QLC_83XX_DEFAULT_MODE; 2034 return QLC_83XX_DEFAULT_OPMODE;
2049 return ahw->nic_mode;
2050 }
2051 2035
2052 if (ahw->capabilities & BIT_23) 2036 if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
2053 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; 2037 return QLC_83XX_VIRTUAL_NIC_MODE;
2054 else
2055 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
2056 2038
2057 return ahw->nic_mode; 2039 return QLC_83XX_DEFAULT_OPMODE;
2058} 2040}
2059 2041
2060int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 2042int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2061{ 2043{
2044 struct qlcnic_hardware_context *ahw = adapter->ahw;
2062 int ret; 2045 int ret;
2063 2046
2064 ret = qlcnic_83xx_get_nic_configuration(adapter); 2047 ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2066,11 +2049,16 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2066 return -EIO; 2049 return -EIO;
2067 2050
2068 if (ret == QLC_83XX_VIRTUAL_NIC_MODE) { 2051 if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
2052 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
2069 if (qlcnic_83xx_config_vnic_opmode(adapter)) 2053 if (qlcnic_83xx_config_vnic_opmode(adapter))
2070 return -EIO; 2054 return -EIO;
2071 } else if (ret == QLC_83XX_DEFAULT_MODE) { 2055
2072 if (qlcnic_83xx_config_default_opmode(adapter)) 2056 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
2073 return -EIO; 2057 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
2058 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2059 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2060 } else {
2061 return -EIO;
2074 } 2062 }
2075 2063
2076 return 0; 2064 return 0;
@@ -2139,20 +2127,82 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
2139 } 2127 }
2140} 2128}
2141 2129
2130static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
2131{
2132 struct qlcnic_hardware_context *ahw = adapter->ahw;
2133 struct pci_dev *pdev = adapter->pdev;
2134 struct qlc_83xx_fw_info *fw_info;
2135 int err = 0;
2136
2137 ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
2138 if (!ahw->fw_info) {
2139 err = -ENOMEM;
2140 } else {
2141 fw_info = ahw->fw_info;
2142 switch (pdev->device) {
2143 case PCI_DEVICE_ID_QLOGIC_QLE834X:
2144 strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
2145 QLC_FW_FILE_NAME_LEN);
2146 break;
2147 case PCI_DEVICE_ID_QLOGIC_QLE844X:
2148 strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
2149 QLC_FW_FILE_NAME_LEN);
2150 break;
2151 default:
2152 dev_err(&pdev->dev, "%s: Invalid device id\n",
2153 __func__);
2154 err = -EINVAL;
2155 break;
2156 }
2157 }
2158
2159 return err;
2160}
2161
2162
2142int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) 2163int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2143{ 2164{
2144 struct qlcnic_hardware_context *ahw = adapter->ahw; 2165 struct qlcnic_hardware_context *ahw = adapter->ahw;
2166 int err = 0;
2145 2167
2146 if (qlcnic_sriov_vf_check(adapter)) 2168 ahw->msix_supported = !!qlcnic_use_msi_x;
2147 return qlcnic_sriov_vf_init(adapter, pci_using_dac); 2169 err = qlcnic_83xx_init_mailbox_work(adapter);
2170 if (err)
2171 goto exit;
2148 2172
2149 if (qlcnic_83xx_check_hw_status(adapter)) 2173 if (qlcnic_sriov_vf_check(adapter)) {
2150 return -EIO; 2174 err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
2175 if (err)
2176 goto detach_mbx;
2177 else
2178 return err;
2179 }
2180
2181 err = qlcnic_83xx_check_hw_status(adapter);
2182 if (err)
2183 goto detach_mbx;
2184
2185 if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
2186 qlcnic_83xx_read_flash_mfg_id(adapter);
2187
2188 err = qlcnic_83xx_get_fw_info(adapter);
2189 if (err)
2190 goto detach_mbx;
2151 2191
2152 /* Initilaize 83xx mailbox spinlock */ 2192 err = qlcnic_83xx_idc_init(adapter);
2153 spin_lock_init(&ahw->mbx_lock); 2193 if (err)
2194 goto clear_fw_info;
2195
2196 err = qlcnic_setup_intr(adapter, 0, 0);
2197 if (err) {
2198 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
2199 goto disable_intr;
2200 }
2201
2202 err = qlcnic_83xx_setup_mbx_intr(adapter);
2203 if (err)
2204 goto disable_mbx_intr;
2154 2205
2155 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
2156 qlcnic_83xx_clear_function_resources(adapter); 2206 qlcnic_83xx_clear_function_resources(adapter);
2157 2207
2158 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); 2208 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
@@ -2160,22 +2210,90 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2160 /* register for NIC IDC AEN Events */ 2210 /* register for NIC IDC AEN Events */
2161 qlcnic_83xx_register_nic_idc_func(adapter, 1); 2211 qlcnic_83xx_register_nic_idc_func(adapter, 1);
2162 2212
2163 if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
2164 qlcnic_83xx_read_flash_mfg_id(adapter);
2165
2166 if (qlcnic_83xx_idc_init(adapter))
2167 return -EIO;
2168
2169 /* Configure default, SR-IOV or Virtual NIC mode of operation */ 2213 /* Configure default, SR-IOV or Virtual NIC mode of operation */
2170 if (qlcnic_83xx_configure_opmode(adapter)) 2214 err = qlcnic_83xx_configure_opmode(adapter);
2171 return -EIO; 2215 if (err)
2216 goto disable_mbx_intr;
2172 2217
2173 /* Perform operating mode specific initialization */ 2218 /* Perform operating mode specific initialization */
2174 if (adapter->nic_ops->init_driver(adapter)) 2219 err = adapter->nic_ops->init_driver(adapter);
2175 return -EIO; 2220 if (err)
2221 goto disable_mbx_intr;
2222
2223 if (adapter->dcb && qlcnic_dcb_attach(adapter))
2224 qlcnic_clear_dcb_ops(adapter);
2176 2225
2177 /* Periodically monitor device status */ 2226 /* Periodically monitor device status */
2178 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); 2227 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
2228 return 0;
2229
2230disable_mbx_intr:
2231 qlcnic_83xx_free_mbx_intr(adapter);
2232
2233disable_intr:
2234 qlcnic_teardown_intr(adapter);
2235
2236clear_fw_info:
2237 kfree(ahw->fw_info);
2238
2239detach_mbx:
2240 qlcnic_83xx_detach_mailbox_work(adapter);
2241 qlcnic_83xx_free_mailbox(ahw->mailbox);
2242exit:
2243 return err;
2244}
2245
2246void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
2247{
2248 struct qlcnic_hardware_context *ahw = adapter->ahw;
2249 struct qlc_83xx_idc *idc = &ahw->idc;
2250
2251 clear_bit(QLC_83XX_MBX_READY, &idc->status);
2252 cancel_delayed_work_sync(&adapter->fw_work);
2253
2254 if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
2255 qlcnic_83xx_disable_vnic_mode(adapter, 1);
2256
2257 qlcnic_83xx_idc_detach_driver(adapter);
2258 qlcnic_83xx_register_nic_idc_func(adapter, 0);
2259
2260 cancel_delayed_work_sync(&adapter->idc_aen_work);
2261}
2262
2263int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
2264{
2265 struct qlcnic_hardware_context *ahw = adapter->ahw;
2266 struct qlc_83xx_idc *idc = &ahw->idc;
2267 int ret = 0;
2268 u32 owner;
2269
2270 /* Mark the previous IDC state as NEED_RESET so
2271 * that state_entry() will perform the reattachment
2272 * and bringup the device
2273 */
2274 idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
2275 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
2276 if (ahw->pci_func == owner) {
2277 ret = qlcnic_83xx_restart_hw(adapter);
2278 if (ret < 0)
2279 return ret;
2280 qlcnic_83xx_idc_clear_registers(adapter, 0);
2281 }
2282
2283 ret = idc->state_entry(adapter);
2284 return ret;
2285}
2286
2287void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
2288{
2289 struct qlcnic_hardware_context *ahw = adapter->ahw;
2290 struct qlc_83xx_idc *idc = &ahw->idc;
2291 u32 owner;
2292
2293 idc->prev_state = QLC_83XX_IDC_DEV_READY;
2294 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
2295 if (ahw->pci_func == owner)
2296 qlcnic_83xx_idc_enter_ready_state(adapter, 0);
2179 2297
2180 return adapter->ahw->idc.err_code; 2298 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
2181} 2299}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 599d1fda52f2..0248a4c2f5dd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -208,7 +208,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
208 return -EIO; 208 return -EIO;
209 } 209 }
210 210
211 if (ahw->capabilities & BIT_23) 211 if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
212 adapter->flags |= QLCNIC_ESWITCH_ENABLED; 212 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
213 else 213 else
214 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 214 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -239,3 +239,41 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
239 239
240 return 0; 240 return 0;
241} 241}
242
243static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
244 int func, int *port_id)
245{
246 struct qlcnic_info nic_info;
247 int err = 0;
248
249 memset(&nic_info, 0, sizeof(struct qlcnic_info));
250
251 err = qlcnic_get_nic_info(adapter, &nic_info, func);
252 if (err)
253 return err;
254
255 if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
256 *port_id = nic_info.phys_port;
257 else
258 err = -EIO;
259
260 return err;
261}
262
263int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
264{
265 int id, err = 0;
266
267 err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
268 if (err)
269 return err;
270
271 if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
272 if (!qlcnic_enable_eswitch(adapter, id, 1))
273 adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
274 else
275 err = -EIO;
276 }
277
278 return err;
279}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d09389b33474..86850dd633a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -38,6 +38,9 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, 39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, 40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
41 {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
42 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
43 {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
41}; 44};
42 45
43static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 46static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -171,6 +174,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
171 break; 174 break;
172 } 175 }
173 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); 176 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
177 qlcnic_dump_mbx(adapter, cmd);
174 } else if (rsp == QLCNIC_CDRP_RSP_OK) 178 } else if (rsp == QLCNIC_CDRP_RSP_OK)
175 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 179 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
176 180
@@ -243,40 +247,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
243 247
244int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 248int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
245{ 249{
246 void *addr; 250 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
247 struct qlcnic_hostrq_rx_ctx *prq; 251 struct qlcnic_hardware_context *ahw = adapter->ahw;
248 struct qlcnic_cardrsp_rx_ctx *prsp; 252 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
249 struct qlcnic_hostrq_rds_ring *prq_rds; 253 struct net_device *netdev = adapter->netdev;
250 struct qlcnic_hostrq_sds_ring *prq_sds; 254 u32 temp_intr_crb_mode, temp_rds_crb_mode;
251 struct qlcnic_cardrsp_rds_ring *prsp_rds; 255 struct qlcnic_cardrsp_rds_ring *prsp_rds;
252 struct qlcnic_cardrsp_sds_ring *prsp_sds; 256 struct qlcnic_cardrsp_sds_ring *prsp_sds;
257 struct qlcnic_hostrq_rds_ring *prq_rds;
258 struct qlcnic_hostrq_sds_ring *prq_sds;
253 struct qlcnic_host_rds_ring *rds_ring; 259 struct qlcnic_host_rds_ring *rds_ring;
254 struct qlcnic_host_sds_ring *sds_ring; 260 struct qlcnic_host_sds_ring *sds_ring;
255 struct qlcnic_cmd_args cmd; 261 struct qlcnic_cardrsp_rx_ctx *prsp;
256 262 struct qlcnic_hostrq_rx_ctx *prq;
257 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
258 u64 phys_addr;
259
260 u8 i, nrds_rings, nsds_rings; 263 u8 i, nrds_rings, nsds_rings;
261 u16 temp_u16; 264 struct qlcnic_cmd_args cmd;
262 size_t rq_size, rsp_size; 265 size_t rq_size, rsp_size;
263 u32 cap, reg, val, reg2; 266 u32 cap, reg, val, reg2;
267 u64 phys_addr;
268 u16 temp_u16;
269 void *addr;
264 int err; 270 int err;
265 271
266 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
267
268 nrds_rings = adapter->max_rds_rings; 272 nrds_rings = adapter->max_rds_rings;
269 nsds_rings = adapter->max_sds_rings; 273 nsds_rings = adapter->max_sds_rings;
270 274
271 rq_size = 275 rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
272 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 276 nsds_rings);
273 nsds_rings); 277 rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
274 rsp_size = 278 nsds_rings);
275 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
276 nsds_rings);
277 279
278 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 280 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
279 &hostrq_phys_addr, GFP_KERNEL); 281 &hostrq_phys_addr, GFP_KERNEL);
280 if (addr == NULL) 282 if (addr == NULL)
281 return -ENOMEM; 283 return -ENOMEM;
282 prq = addr; 284 prq = addr;
@@ -295,15 +297,20 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
295 | QLCNIC_CAP0_VALIDOFF); 297 | QLCNIC_CAP0_VALIDOFF);
296 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 298 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
297 299
298 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); 300 if (qlcnic_check_multi_tx(adapter) &&
299 prq->valid_field_offset = cpu_to_le16(temp_u16); 301 !adapter->ahw->diag_test) {
300 prq->txrx_sds_binding = nsds_rings - 1; 302 cap |= QLCNIC_CAP0_TX_MULTI;
303 } else {
304 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
305 prq->valid_field_offset = cpu_to_le16(temp_u16);
306 prq->txrx_sds_binding = nsds_rings - 1;
307 temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
308 prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
309 temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
310 prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
311 }
301 312
302 prq->capabilities[0] = cpu_to_le32(cap); 313 prq->capabilities[0] = cpu_to_le32(cap);
303 prq->host_int_crb_mode =
304 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
305 prq->host_rds_crb_mode =
306 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
307 314
308 prq->num_rds_rings = cpu_to_le16(nrds_rings); 315 prq->num_rds_rings = cpu_to_le16(nrds_rings);
309 prq->num_sds_rings = cpu_to_le16(nsds_rings); 316 prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -317,10 +324,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
317 le32_to_cpu(prq->rds_ring_offset)); 324 le32_to_cpu(prq->rds_ring_offset));
318 325
319 for (i = 0; i < nrds_rings; i++) { 326 for (i = 0; i < nrds_rings; i++) {
320
321 rds_ring = &recv_ctx->rds_rings[i]; 327 rds_ring = &recv_ctx->rds_rings[i];
322 rds_ring->producer = 0; 328 rds_ring->producer = 0;
323
324 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 329 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
325 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 330 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
326 prq_rds[i].ring_kind = cpu_to_le32(i); 331 prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -331,14 +336,16 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
331 le32_to_cpu(prq->sds_ring_offset)); 336 le32_to_cpu(prq->sds_ring_offset));
332 337
333 for (i = 0; i < nsds_rings; i++) { 338 for (i = 0; i < nsds_rings; i++) {
334
335 sds_ring = &recv_ctx->sds_rings[i]; 339 sds_ring = &recv_ctx->sds_rings[i];
336 sds_ring->consumer = 0; 340 sds_ring->consumer = 0;
337 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 341 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
338
339 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 342 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
340 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 343 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
341 prq_sds[i].msi_index = cpu_to_le16(i); 344 if (qlcnic_check_multi_tx(adapter) &&
345 !adapter->ahw->diag_test)
346 prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
347 else
348 prq_sds[i].msi_index = cpu_to_le16(i);
342 } 349 }
343 350
344 phys_addr = hostrq_phys_addr; 351 phys_addr = hostrq_phys_addr;
@@ -361,9 +368,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
361 368
362 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 369 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
363 rds_ring = &recv_ctx->rds_rings[i]; 370 rds_ring = &recv_ctx->rds_rings[i];
364
365 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 371 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
366 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; 372 rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
367 } 373 }
368 374
369 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 375 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -371,24 +377,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
371 377
372 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 378 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
373 sds_ring = &recv_ctx->sds_rings[i]; 379 sds_ring = &recv_ctx->sds_rings[i];
374
375 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 380 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
376 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 381 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
382 reg2 = ahw->intr_tbl[i].src;
383 else
384 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
377 385
378 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; 386 sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
379 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; 387 sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
380 } 388 }
381 389
382 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 390 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
383 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 391 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
384 recv_ctx->virt_port = prsp->virt_port; 392 recv_ctx->virt_port = prsp->virt_port;
385 393
394 netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
395 recv_ctx->context_id, recv_ctx->state);
386 qlcnic_free_mbx_args(&cmd); 396 qlcnic_free_mbx_args(&cmd);
397
387out_free_rsp: 398out_free_rsp:
388 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 399 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
389 cardrsp_phys_addr); 400 cardrsp_phys_addr);
390out_free_rq: 401out_free_rq:
391 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 402 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
403
392 return err; 404 return err;
393} 405}
394 406
@@ -416,16 +428,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
416 struct qlcnic_host_tx_ring *tx_ring, 428 struct qlcnic_host_tx_ring *tx_ring,
417 int ring) 429 int ring)
418{ 430{
431 struct qlcnic_hardware_context *ahw = adapter->ahw;
432 struct net_device *netdev = adapter->netdev;
419 struct qlcnic_hostrq_tx_ctx *prq; 433 struct qlcnic_hostrq_tx_ctx *prq;
420 struct qlcnic_hostrq_cds_ring *prq_cds; 434 struct qlcnic_hostrq_cds_ring *prq_cds;
421 struct qlcnic_cardrsp_tx_ctx *prsp; 435 struct qlcnic_cardrsp_tx_ctx *prsp;
422 void *rq_addr, *rsp_addr;
423 size_t rq_size, rsp_size;
424 u32 temp;
425 struct qlcnic_cmd_args cmd; 436 struct qlcnic_cmd_args cmd;
426 int err; 437 u32 temp, intr_mask, temp_int_crb_mode;
427 u64 phys_addr; 438 dma_addr_t rq_phys_addr, rsp_phys_addr;
428 dma_addr_t rq_phys_addr, rsp_phys_addr; 439 int temp_nsds_rings, index, err;
440 void *rq_addr, *rsp_addr;
441 size_t rq_size, rsp_size;
442 u64 phys_addr;
443 u16 msix_id;
429 444
430 /* reset host resources */ 445 /* reset host resources */
431 tx_ring->producer = 0; 446 tx_ring->producer = 0;
@@ -433,32 +448,42 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
433 *(tx_ring->hw_consumer) = 0; 448 *(tx_ring->hw_consumer) = 0;
434 449
435 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 450 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
436 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 451 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
437 &rq_phys_addr, GFP_KERNEL | __GFP_ZERO); 452 &rq_phys_addr, GFP_KERNEL);
438 if (!rq_addr) 453 if (!rq_addr)
439 return -ENOMEM; 454 return -ENOMEM;
440 455
441 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 456 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
442 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 457 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
443 &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO); 458 &rsp_phys_addr, GFP_KERNEL);
444 if (!rsp_addr) { 459 if (!rsp_addr) {
445 err = -ENOMEM; 460 err = -ENOMEM;
446 goto out_free_rq; 461 goto out_free_rq;
447 } 462 }
448 463
449 prq = rq_addr; 464 prq = rq_addr;
450
451 prsp = rsp_addr; 465 prsp = rsp_addr;
452 466
453 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 467 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
454 468
455 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 469 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
456 QLCNIC_CAP0_LSO); 470 QLCNIC_CAP0_LSO);
471 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
472 temp |= QLCNIC_CAP0_TX_MULTI;
473
457 prq->capabilities[0] = cpu_to_le32(temp); 474 prq->capabilities[0] = cpu_to_le32(temp);
458 475
459 prq->host_int_crb_mode = 476 if (qlcnic_check_multi_tx(adapter) &&
460 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 477 !adapter->ahw->diag_test) {
461 prq->msi_index = 0; 478 temp_nsds_rings = adapter->max_sds_rings;
479 index = temp_nsds_rings + ring;
480 msix_id = ahw->intr_tbl[index].id;
481 prq->msi_index = cpu_to_le16(msix_id);
482 } else {
483 temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
484 prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
485 prq->msi_index = 0;
486 }
462 487
463 prq->interrupt_ctl = 0; 488 prq->interrupt_ctl = 0;
464 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 489 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
@@ -480,15 +505,25 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
480 err = qlcnic_issue_cmd(adapter, &cmd); 505 err = qlcnic_issue_cmd(adapter, &cmd);
481 506
482 if (err == QLCNIC_RCODE_SUCCESS) { 507 if (err == QLCNIC_RCODE_SUCCESS) {
508 tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
483 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 509 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
484 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 510 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
485 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); 511 tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
512 if (qlcnic_check_multi_tx(adapter) &&
513 !adapter->ahw->diag_test &&
514 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
515 index = adapter->max_sds_rings + ring;
516 intr_mask = ahw->intr_tbl[index].src;
517 tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
518 }
519
520 netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
521 tx_ring->ctx_id, tx_ring->state);
486 } else { 522 } else {
487 dev_err(&adapter->pdev->dev, 523 netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
488 "Failed to create tx ctx in firmware%d\n", err); 524 err);
489 err = -EIO; 525 err = -EIO;
490 } 526 }
491
492 qlcnic_free_mbx_args(&cmd); 527 qlcnic_free_mbx_args(&cmd);
493 528
494out_free_rsp: 529out_free_rsp:
@@ -618,6 +653,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
618 } 653 }
619 } 654 }
620 655
656 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
657 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
658 err = qlcnic_82xx_mq_intrpt(dev, 1);
659 if (err)
660 return err;
661 }
662
621 err = qlcnic_fw_cmd_create_rx_ctx(dev); 663 err = qlcnic_fw_cmd_create_rx_ctx(dev);
622 if (err) 664 if (err)
623 goto err_out; 665 goto err_out;
@@ -639,13 +681,19 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
639 } 681 }
640 682
641 set_bit(__QLCNIC_FW_ATTACHED, &dev->state); 683 set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
684
642 return 0; 685 return 0;
643 686
644err_out: 687err_out:
688 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
689 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
690 qlcnic_82xx_config_intrpt(dev, 0);
691
645 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 692 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
646 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 693 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
647 qlcnic_83xx_config_intrpt(dev, 0); 694 qlcnic_83xx_config_intrpt(dev, 0);
648 } 695 }
696
649 return err; 697 return err;
650} 698}
651 699
@@ -659,6 +707,12 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
659 qlcnic_fw_cmd_del_tx_ctx(adapter, 707 qlcnic_fw_cmd_del_tx_ctx(adapter,
660 &adapter->tx_ring[ring]); 708 &adapter->tx_ring[ring]);
661 709
710 if (qlcnic_82xx_check(adapter) &&
711 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
712 qlcnic_check_multi_tx(adapter) &&
713 !adapter->ahw->diag_test)
714 qlcnic_82xx_config_intrpt(adapter, 0);
715
662 if (qlcnic_83xx_check(adapter) && 716 if (qlcnic_83xx_check(adapter) &&
663 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 717 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
664 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 718 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
@@ -723,8 +777,54 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
723 } 777 }
724} 778}
725 779
780int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
781{
782 struct qlcnic_hardware_context *ahw = adapter->ahw;
783 struct net_device *netdev = adapter->netdev;
784 struct qlcnic_cmd_args cmd;
785 u32 type, val;
786 int i, err = 0;
787
788 for (i = 0; i < ahw->num_msix; i++) {
789 qlcnic_alloc_mbx_args(&cmd, adapter,
790 QLCNIC_CMD_MQ_TX_CONFIG_INTR);
791 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
792 val = type | (ahw->intr_tbl[i].type << 4);
793 if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
794 val |= (ahw->intr_tbl[i].id << 16);
795 cmd.req.arg[1] = val;
796 err = qlcnic_issue_cmd(adapter, &cmd);
797 if (err) {
798 netdev_err(netdev, "Failed to %s interrupts %d\n",
799 op_type == QLCNIC_INTRPT_ADD ? "Add" :
800 "Delete", err);
801 qlcnic_free_mbx_args(&cmd);
802 return err;
803 }
804 val = cmd.rsp.arg[1];
805 if (LSB(val)) {
806 netdev_info(netdev,
807 "failed to configure interrupt for %d\n",
808 ahw->intr_tbl[i].id);
809 continue;
810 }
811 if (op_type) {
812 ahw->intr_tbl[i].id = MSW(val);
813 ahw->intr_tbl[i].enabled = 1;
814 ahw->intr_tbl[i].src = cmd.rsp.arg[2];
815 } else {
816 ahw->intr_tbl[i].id = i;
817 ahw->intr_tbl[i].enabled = 0;
818 ahw->intr_tbl[i].src = 0;
819 }
820 qlcnic_free_mbx_args(&cmd);
821 }
822
823 return err;
824}
726 825
727int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 826int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
827 u8 function)
728{ 828{
729 int err, i; 829 int err, i;
730 struct qlcnic_cmd_args cmd; 830 struct qlcnic_cmd_args cmd;
@@ -734,7 +834,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
734 if (err) 834 if (err)
735 return err; 835 return err;
736 836
737 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; 837 cmd.req.arg[1] = function | BIT_8;
738 err = qlcnic_issue_cmd(adapter, &cmd); 838 err = qlcnic_issue_cmd(adapter, &cmd);
739 839
740 if (err == QLCNIC_RCODE_SUCCESS) { 840 if (err == QLCNIC_RCODE_SUCCESS) {
@@ -765,8 +865,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
765 struct qlcnic_cmd_args cmd; 865 struct qlcnic_cmd_args cmd;
766 size_t nic_size = sizeof(struct qlcnic_info_le); 866 size_t nic_size = sizeof(struct qlcnic_info_le);
767 867
768 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 868 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
769 &nic_dma_t, GFP_KERNEL | __GFP_ZERO); 869 &nic_dma_t, GFP_KERNEL);
770 if (!nic_info_addr) 870 if (!nic_info_addr)
771 return -ENOMEM; 871 return -ENOMEM;
772 872
@@ -819,8 +919,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
819 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 919 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
820 return err; 920 return err;
821 921
822 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 922 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
823 &nic_dma_t, GFP_KERNEL | __GFP_ZERO); 923 &nic_dma_t, GFP_KERNEL);
824 if (!nic_info_addr) 924 if (!nic_info_addr)
825 return -ENOMEM; 925 return -ENOMEM;
826 926
@@ -872,9 +972,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
872 size_t npar_size = sizeof(struct qlcnic_pci_info_le); 972 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
873 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 973 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
874 974
875 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 975 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
876 &pci_info_dma_t, 976 &pci_info_dma_t, GFP_KERNEL);
877 GFP_KERNEL | __GFP_ZERO);
878 if (!pci_info_addr) 977 if (!pci_info_addr)
879 return -ENOMEM; 978 return -ENOMEM;
880 979
@@ -974,8 +1073,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
974 return -EIO; 1073 return -EIO;
975 } 1074 }
976 1075
977 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1076 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
978 &stats_dma_t, GFP_KERNEL | __GFP_ZERO); 1077 &stats_dma_t, GFP_KERNEL);
979 if (!stats_addr) 1078 if (!stats_addr)
980 return -ENOMEM; 1079 return -ENOMEM;
981 1080
@@ -1030,8 +1129,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1030 if (mac_stats == NULL) 1129 if (mac_stats == NULL)
1031 return -ENOMEM; 1130 return -ENOMEM;
1032 1131
1033 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1132 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
1034 &stats_dma_t, GFP_KERNEL | __GFP_ZERO); 1133 &stats_dma_t, GFP_KERNEL);
1035 if (!stats_addr) 1134 if (!stats_addr)
1036 return -ENOMEM; 1135 return -ENOMEM;
1037 1136
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 000000000000..d62d5ce432ec
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1179 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/types.h>
9#include "qlcnic.h"
10
11#define QLC_DCB_NUM_PARAM 3
12#define QLC_DCB_LOCAL_IDX 0
13#define QLC_DCB_OPER_IDX 1
14#define QLC_DCB_PEER_IDX 2
15
16#define QLC_DCB_GET_MAP(V) (1 << V)
17
18#define QLC_DCB_AEN_BIT 0x2
19#define QLC_DCB_FW_VER 0x2
20#define QLC_DCB_MAX_TC 0x8
21#define QLC_DCB_MAX_APP 0x8
22#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
23#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
24
25#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
26#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
27#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
28#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
29#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
30#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
31#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
32#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
33#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
34#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
35#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
36#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
37#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
38
39#define QLC_DCB_LOCAL_PARAM_FWID 0x3
40#define QLC_DCB_OPER_PARAM_FWID 0x1
41#define QLC_DCB_PEER_PARAM_FWID 0x2
42
43#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
44#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
45#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
46#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
47
48#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
49#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
50#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
51#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
52#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
53#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
54
55static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
56
57static void qlcnic_dcb_aen_work(struct work_struct *);
58static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
59
60static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
61static void __qlcnic_dcb_free(struct qlcnic_adapter *);
62static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
63static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
64static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
65
66static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
67static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
68static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
69static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
70
71static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
72static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
73static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
74static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
75static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
76
77struct qlcnic_dcb_capability {
78 bool tsa_capability;
79 bool ets_capability;
80 u8 max_num_tc;
81 u8 max_ets_tc;
82 u8 max_pfc_tc;
83 u8 dcb_capability;
84};
85
86struct qlcnic_dcb_param {
87 u32 hdr_prio_pfc_map[2];
88 u32 prio_pg_map[2];
89 u32 pg_bw_map[2];
90 u32 pg_tsa_map[2];
91 u32 app[QLC_DCB_MAX_APP];
92};
93
94struct qlcnic_dcb_mbx_params {
95 /* 1st local, 2nd operational 3rd remote */
96 struct qlcnic_dcb_param type[3];
97 u32 prio_tc_map;
98};
99
100struct qlcnic_82xx_dcb_param_mbx_le {
101 __le32 hdr_prio_pfc_map[2];
102 __le32 prio_pg_map[2];
103 __le32 pg_bw_map[2];
104 __le32 pg_tsa_map[2];
105 __le32 app[QLC_DCB_MAX_APP];
106};
107
108enum qlcnic_dcb_selector {
109 QLC_SELECTOR_DEF = 0x0,
110 QLC_SELECTOR_ETHER,
111 QLC_SELECTOR_TCP,
112 QLC_SELECTOR_UDP,
113};
114
115enum qlcnic_dcb_prio_type {
116 QLC_PRIO_NONE = 0,
117 QLC_PRIO_GROUP,
118 QLC_PRIO_LINK,
119};
120
121enum qlcnic_dcb_pfc_type {
122 QLC_PFC_DISABLED = 0,
123 QLC_PFC_FULL,
124 QLC_PFC_TX,
125 QLC_PFC_RX
126};
127
128struct qlcnic_dcb_prio_cfg {
129 bool valid;
130 enum qlcnic_dcb_pfc_type pfc_type;
131};
132
133struct qlcnic_dcb_pg_cfg {
134 bool valid;
135 u8 total_bw_percent; /* of Link/ port BW */
136 u8 prio_count;
137 u8 tsa_type;
138};
139
140struct qlcnic_dcb_tc_cfg {
141 bool valid;
142 struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
143 enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
144 u8 link_percent; /* % of link bandwidth */
145 u8 bwg_percent; /* % of BWG's bandwidth */
146 u8 up_tc_map;
147 u8 pgid;
148};
149
150struct qlcnic_dcb_app {
151 bool valid;
152 enum qlcnic_dcb_selector selector;
153 u16 protocol;
154 u8 priority;
155};
156
157struct qlcnic_dcb_cee {
158 struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
159 struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
160 struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
161 bool tc_param_valid;
162 bool pfc_mode_enable;
163};
164
165struct qlcnic_dcb_cfg {
166 /* 0 - local, 1 - operational, 2 - remote */
167 struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
168 struct qlcnic_dcb_capability capability;
169 u32 version;
170};
171
172static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
173 .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
174 .free = __qlcnic_dcb_free,
175 .attach = __qlcnic_dcb_attach,
176 .query_hw_capability = __qlcnic_dcb_query_hw_capability,
177 .get_info = __qlcnic_dcb_get_info,
178
179 .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
180 .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
181 .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
182 .register_aen = qlcnic_83xx_dcb_register_aen,
183 .handle_aen = qlcnic_83xx_dcb_handle_aen,
184};
185
186static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
187 .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
188 .free = __qlcnic_dcb_free,
189 .attach = __qlcnic_dcb_attach,
190 .query_hw_capability = __qlcnic_dcb_query_hw_capability,
191 .get_info = __qlcnic_dcb_get_info,
192
193 .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
194 .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
195 .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
196 .handle_aen = qlcnic_82xx_dcb_handle_aen,
197};
198
199static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
200{
201 if (qlcnic_82xx_check(adapter))
202 return QLC_82XX_DCB_GET_NUMAPP(val);
203 else
204 return QLC_83XX_DCB_GET_NUMAPP(val);
205}
206
207static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
208 u32 val)
209{
210 if (qlcnic_82xx_check(adapter))
211 return QLC_82XX_DCB_PFC_VALID(val);
212 else
213 return QLC_83XX_DCB_PFC_VALID(val);
214}
215
216static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
217 u32 val)
218{
219 if (qlcnic_82xx_check(adapter))
220 return QLC_82XX_DCB_TSA_VALID(val);
221 else
222 return QLC_83XX_DCB_TSA_VALID(val);
223}
224
225static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
226 u32 val)
227{
228 if (qlcnic_82xx_check(adapter))
229 return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
230 else
231 return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
232}
233
234static int qlcnic_dcb_prio_count(u8 up_tc_map)
235{
236 int j;
237
238 for (j = 0; j < QLC_DCB_MAX_TC; j++)
239 if (up_tc_map & QLC_DCB_GET_MAP(j))
240 break;
241
242 return j;
243}
244
245static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
246{
247 if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
248 adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
249}
250
251static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
252{
253 if (qlcnic_82xx_check(adapter))
254 adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
255 else if (qlcnic_83xx_check(adapter))
256 adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
257}
258
259int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
260{
261 struct qlcnic_dcb *dcb;
262
263 dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
264 if (!dcb)
265 return -ENOMEM;
266
267 adapter->dcb = dcb;
268 dcb->adapter = adapter;
269 qlcnic_set_dcb_ops(adapter);
270
271 return 0;
272}
273
274static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
275{
276 struct qlcnic_dcb *dcb = adapter->dcb;
277
278 if (!dcb)
279 return;
280
281 qlcnic_dcb_register_aen(adapter, 0);
282
283 while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
284 usleep_range(10000, 11000);
285
286 cancel_delayed_work_sync(&dcb->aen_work);
287
288 if (dcb->wq) {
289 destroy_workqueue(dcb->wq);
290 dcb->wq = NULL;
291 }
292
293 kfree(dcb->cfg);
294 dcb->cfg = NULL;
295 kfree(dcb->param);
296 dcb->param = NULL;
297 kfree(dcb);
298 adapter->dcb = NULL;
299}
300
301static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
302{
303 qlcnic_dcb_get_hw_capability(adapter);
304 qlcnic_dcb_get_cee_cfg(adapter);
305 qlcnic_dcb_register_aen(adapter, 1);
306}
307
308static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
309{
310 struct qlcnic_dcb *dcb = adapter->dcb;
311 int err = 0;
312
313 INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
314
315 dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
316 if (!dcb->wq) {
317 dev_err(&adapter->pdev->dev,
318 "DCB workqueue allocation failed. DCB will be disabled\n");
319 return -1;
320 }
321
322 dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
323 if (!dcb->cfg) {
324 err = -ENOMEM;
325 goto out_free_wq;
326 }
327
328 dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
329 if (!dcb->param) {
330 err = -ENOMEM;
331 goto out_free_cfg;
332 }
333
334 qlcnic_dcb_get_info(adapter);
335
336 return 0;
337out_free_cfg:
338 kfree(dcb->cfg);
339 dcb->cfg = NULL;
340
341out_free_wq:
342 destroy_workqueue(dcb->wq);
343 dcb->wq = NULL;
344
345 return err;
346}
347
348static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
349 char *buf)
350{
351 struct qlcnic_cmd_args cmd;
352 u32 mbx_out;
353 int err;
354
355 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
356 if (err)
357 return err;
358
359 err = qlcnic_issue_cmd(adapter, &cmd);
360 if (err) {
361 dev_err(&adapter->pdev->dev,
362 "Failed to query DCBX capability, err %d\n", err);
363 } else {
364 mbx_out = cmd.rsp.arg[1];
365 if (buf)
366 memcpy(buf, &mbx_out, sizeof(u32));
367 }
368
369 qlcnic_free_mbx_args(&cmd);
370
371 return err;
372}
373
374static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
375{
376 struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
377 u32 mbx_out;
378 int err;
379
380 memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
381
382 err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
383 if (err)
384 return err;
385
386 mbx_out = *val;
387 if (QLC_DCB_TSA_SUPPORT(mbx_out))
388 cap->tsa_capability = true;
389
390 if (QLC_DCB_ETS_SUPPORT(mbx_out))
391 cap->ets_capability = true;
392
393 cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
394 cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
395 cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
396
397 if (cap->max_num_tc > QLC_DCB_MAX_TC ||
398 cap->max_ets_tc > cap->max_num_tc ||
399 cap->max_pfc_tc > cap->max_num_tc) {
400 dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
401 return -EINVAL;
402 }
403
404 return err;
405}
406
407static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
408{
409 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
410 struct qlcnic_dcb_capability *cap;
411 u32 mbx_out;
412 int err;
413
414 err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
415 if (err)
416 return err;
417
418 cap = &cfg->capability;
419 cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
420
421 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
422 set_bit(__QLCNIC_DCB_STATE, &adapter->state);
423
424 return err;
425}
426
427static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
428 char *buf, u8 type)
429{
430 u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
431 struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
432 struct device *dev = &adapter->pdev->dev;
433 dma_addr_t cardrsp_phys_addr;
434 struct qlcnic_dcb_param rsp;
435 struct qlcnic_cmd_args cmd;
436 u64 phys_addr;
437 void *addr;
438 int err, i;
439
440 switch (type) {
441 case QLC_DCB_LOCAL_PARAM_FWID:
442 case QLC_DCB_OPER_PARAM_FWID:
443 case QLC_DCB_PEER_PARAM_FWID:
444 break;
445 default:
446 dev_err(dev, "Invalid parameter type %d\n", type);
447 return -EINVAL;
448 }
449
450 addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
451 GFP_KERNEL);
452 if (addr == NULL)
453 return -ENOMEM;
454
455 prsp_le = addr;
456
457 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
458 if (err)
459 goto out_free_rsp;
460
461 phys_addr = cardrsp_phys_addr;
462 cmd.req.arg[1] = size | (type << 16);
463 cmd.req.arg[2] = MSD(phys_addr);
464 cmd.req.arg[3] = LSD(phys_addr);
465
466 err = qlcnic_issue_cmd(adapter, &cmd);
467 if (err) {
468 dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
469 goto out;
470 }
471
472 memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
473 rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
474 rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
475 rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
476 rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
477 rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
478 rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
479 rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
480 rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
481
482 for (i = 0; i < QLC_DCB_MAX_APP; i++)
483 rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
484
485 if (buf)
486 memcpy(buf, &rsp, size);
487out:
488 qlcnic_free_mbx_args(&cmd);
489
490out_free_rsp:
491 dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
492
493 return err;
494}
495
496static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
497{
498 struct qlcnic_dcb_mbx_params *mbx;
499 int err;
500
501 mbx = adapter->dcb->param;
502 if (!mbx)
503 return 0;
504
505 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
506 QLC_DCB_LOCAL_PARAM_FWID);
507 if (err)
508 return err;
509
510 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
511 QLC_DCB_OPER_PARAM_FWID);
512 if (err)
513 return err;
514
515 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
516 QLC_DCB_PEER_PARAM_FWID);
517 if (err)
518 return err;
519
520 mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
521
522 qlcnic_dcb_data_cee_param_map(adapter);
523
524 return err;
525}
526
527static void qlcnic_dcb_aen_work(struct work_struct *work)
528{
529 struct qlcnic_adapter *adapter;
530 struct qlcnic_dcb *dcb;
531
532 dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
533 adapter = dcb->adapter;
534
535 qlcnic_dcb_get_cee_cfg(adapter);
536 clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
537}
538
539static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
540 void *data)
541{
542 struct qlcnic_dcb *dcb = adapter->dcb;
543
544 if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
545 return;
546
547 queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
548}
549
550static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
551{
552 struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
553 u32 mbx_out;
554 int err;
555
556 err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
557 if (err)
558 return err;
559
560 if (mbx_out & BIT_2)
561 cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
562 if (mbx_out & BIT_3)
563 cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
564 if (cap->dcb_capability)
565 cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
566
567 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
568 set_bit(__QLCNIC_DCB_STATE, &adapter->state);
569
570 return err;
571}
572
573static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
574 char *buf, u8 idx)
575{
576 struct qlcnic_dcb_mbx_params mbx_out;
577 int err, i, j, k, max_app, size;
578 struct qlcnic_dcb_param *each;
579 struct qlcnic_cmd_args cmd;
580 u32 val;
581 char *p;
582
583 size = 0;
584 memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
585 memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
586
587 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
588 if (err)
589 return err;
590
591 cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
592 err = qlcnic_issue_cmd(adapter, &cmd);
593 if (err) {
594 dev_err(&adapter->pdev->dev,
595 "Failed to query DCBX param, err %d\n", err);
596 goto out;
597 }
598
599 mbx_out.prio_tc_map = cmd.rsp.arg[1];
600 p = memcpy(buf, &mbx_out, sizeof(u32));
601 k = 2;
602 p += sizeof(u32);
603
604 for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
605 each = &mbx_out.type[j];
606
607 each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
608 each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
609 each->prio_pg_map[0] = cmd.rsp.arg[k++];
610 each->prio_pg_map[1] = cmd.rsp.arg[k++];
611 each->pg_bw_map[0] = cmd.rsp.arg[k++];
612 each->pg_bw_map[1] = cmd.rsp.arg[k++];
613 each->pg_tsa_map[0] = cmd.rsp.arg[k++];
614 each->pg_tsa_map[1] = cmd.rsp.arg[k++];
615 val = each->hdr_prio_pfc_map[0];
616
617 max_app = qlcnic_dcb_get_num_app(adapter, val);
618 for (i = 0; i < max_app; i++)
619 each->app[i] = cmd.rsp.arg[i + k];
620
621 size = 16 * sizeof(u32);
622 memcpy(p, &each->hdr_prio_pfc_map[0], size);
623 p += size;
624 if (j == 0)
625 k = 18;
626 else
627 k = 34;
628 }
629out:
630 qlcnic_free_mbx_args(&cmd);
631
632 return err;
633}
634
635static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
636{
637 struct qlcnic_dcb *dcb = adapter->dcb;
638 int err;
639
640 err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
641 if (err)
642 return err;
643
644 qlcnic_dcb_data_cee_param_map(adapter);
645
646 return err;
647}
648
649static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
650 bool flag)
651{
652 u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
653 struct qlcnic_cmd_args cmd;
654 int err;
655
656 err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
657 if (err)
658 return err;
659
660 cmd.req.arg[1] = QLC_DCB_AEN_BIT;
661
662 err = qlcnic_issue_cmd(adapter, &cmd);
663 if (err)
664 dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
665 (flag ? "register" : "unregister"), err);
666
667 qlcnic_free_mbx_args(&cmd);
668
669 return err;
670}
671
672static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
673 void *data)
674{
675 struct qlcnic_dcb *dcb = adapter->dcb;
676 u32 *val = data;
677
678 if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
679 return;
680
681 if (*val & BIT_8)
682 set_bit(__QLCNIC_DCB_STATE, &adapter->state);
683 else
684 clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
685
686 queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
687}
688
689static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
690 struct qlcnic_dcb_param *each,
691 struct qlcnic_dcb_cee *type)
692{
693 struct qlcnic_dcb_tc_cfg *tc_cfg;
694 u8 i, tc, pgid;
695
696 for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
697 tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
698 tc_cfg = &type->tc_cfg[tc];
699 tc_cfg->valid = true;
700 tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
701
702 if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
703 type->pfc_mode_enable) {
704 tc_cfg->prio_cfg[i].valid = true;
705 tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
706 }
707
708 if (i < 4)
709 pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
710 else
711 pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
712
713 tc_cfg->pgid = pgid;
714
715 tc_cfg->prio_type = QLC_PRIO_LINK;
716 type->pg_cfg[tc_cfg->pgid].prio_count++;
717 }
718}
719
720static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
721 struct qlcnic_dcb_cee *type)
722{
723 struct qlcnic_dcb_pg_cfg *pg_cfg;
724 u8 i, tsa, bw_per;
725
726 for (i = 0; i < QLC_DCB_MAX_PG; i++) {
727 pg_cfg = &type->pg_cfg[i];
728 pg_cfg->valid = true;
729
730 if (i < 4) {
731 bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
732 tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
733 } else {
734 bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
735 tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
736 }
737
738 pg_cfg->total_bw_percent = bw_per;
739 pg_cfg->tsa_type = tsa;
740 }
741}
742
743static void
744qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
745 struct qlcnic_dcb_param *each,
746 struct qlcnic_dcb_cee *type)
747{
748 struct qlcnic_dcb_app *app;
749 u8 i, num_app, map, cnt;
750 struct dcb_app new_app;
751
752 num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
753 for (i = 0; i < num_app; i++) {
754 app = &type->app[i];
755 app->valid = true;
756
757 /* Only for CEE (-1) */
758 app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
759 new_app.selector = app->selector;
760 app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
761 new_app.protocol = app->protocol;
762 map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
763 cnt = qlcnic_dcb_prio_count(map);
764
765 if (cnt >= QLC_DCB_MAX_TC)
766 cnt = 0;
767
768 app->priority = cnt;
769 new_app.priority = cnt;
770
771 if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
772 dcb_setapp(adapter->netdev, &new_app);
773 }
774}
775
776static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
777{
778 struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
779 struct qlcnic_dcb_param *each = &mbx->type[idx];
780 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
781 struct qlcnic_dcb_cee *type = &cfg->type[idx];
782
783 type->tc_param_valid = false;
784 type->pfc_mode_enable = false;
785 memset(type->tc_cfg, 0,
786 sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
787 memset(type->pg_cfg, 0,
788 sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
789
790 if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
791 cfg->capability.max_pfc_tc)
792 type->pfc_mode_enable = true;
793
794 if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
795 cfg->capability.max_ets_tc)
796 type->tc_param_valid = true;
797
798 qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
799 qlcnic_dcb_fill_cee_pg_params(each, type);
800 qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
801}
802
803static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
804{
805 int i;
806
807 for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
808 qlcnic_dcb_map_cee_params(adapter, i);
809
810 dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
811}
812
813static u8 qlcnic_dcb_get_state(struct net_device *netdev)
814{
815 struct qlcnic_adapter *adapter = netdev_priv(netdev);
816
817 return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
818}
819
820static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
821{
822 memcpy(addr, netdev->dev_addr, netdev->addr_len);
823}
824
825static void
826qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
827 u8 *pgid, u8 *bw_per, u8 *up_tc_map)
828{
829 struct qlcnic_adapter *adapter = netdev_priv(netdev);
830 struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
831 struct qlcnic_dcb_cee *type;
832 u8 i, cnt, pg;
833
834 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
835 *prio = *pgid = *bw_per = *up_tc_map = 0;
836
837 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
838 !type->tc_param_valid)
839 return;
840
841 if (tc < 0 || (tc > QLC_DCB_MAX_TC))
842 return;
843
844 tc_cfg = &type->tc_cfg[tc];
845 if (!tc_cfg->valid)
846 return;
847
848 *pgid = tc_cfg->pgid;
849 *prio = tc_cfg->prio_type;
850 *up_tc_map = tc_cfg->up_tc_map;
851 pg = *pgid;
852
853 for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
854 temp = &type->tc_cfg[i];
855 if (temp->valid && (pg == temp->pgid))
856 cnt++;
857 }
858
859 tc_cfg->bwg_percent = (100 / cnt);
860 *bw_per = tc_cfg->bwg_percent;
861}
862
863static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
864 u8 *bw_pct)
865{
866 struct qlcnic_adapter *adapter = netdev_priv(netdev);
867 struct qlcnic_dcb_pg_cfg *pgcfg;
868 struct qlcnic_dcb_cee *type;
869
870 *bw_pct = 0;
871 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
872
873 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
874 !type->tc_param_valid)
875 return;
876
877 if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
878 return;
879
880 pgcfg = &type->pg_cfg[pgid];
881 if (!pgcfg->valid)
882 return;
883
884 *bw_pct = pgcfg->total_bw_percent;
885}
886
887static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
888 u8 *setting)
889{
890 struct qlcnic_adapter *adapter = netdev_priv(netdev);
891 struct qlcnic_dcb_tc_cfg *tc_cfg;
892 u8 val = QLC_DCB_GET_MAP(prio);
893 struct qlcnic_dcb_cee *type;
894 u8 i;
895
896 *setting = 0;
897 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
898
899 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
900 !type->pfc_mode_enable)
901 return;
902
903 for (i = 0; i < QLC_DCB_MAX_TC; i++) {
904 tc_cfg = &type->tc_cfg[i];
905 if (!tc_cfg->valid)
906 continue;
907
908 if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
909 *setting = tc_cfg->prio_cfg[prio].pfc_type;
910 }
911}
912
913static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
914 u8 *cap)
915{
916 struct qlcnic_adapter *adapter = netdev_priv(netdev);
917
918 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
919 return 0;
920
921 switch (capid) {
922 case DCB_CAP_ATTR_PG:
923 case DCB_CAP_ATTR_UP2TC:
924 case DCB_CAP_ATTR_PFC:
925 case DCB_CAP_ATTR_GSP:
926 *cap = true;
927 break;
928 case DCB_CAP_ATTR_PG_TCS:
929 case DCB_CAP_ATTR_PFC_TCS:
930 *cap = 0x80; /* 8 priorities for PGs */
931 break;
932 case DCB_CAP_ATTR_DCBX:
933 *cap = adapter->dcb->cfg->capability.dcb_capability;
934 break;
935 default:
936 *cap = false;
937 }
938
939 return 0;
940}
941
942static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
943{
944 struct qlcnic_adapter *adapter = netdev_priv(netdev);
945 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
946
947 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
948 return -EINVAL;
949
950 switch (attr) {
951 case DCB_NUMTCS_ATTR_PG:
952 *num = cfg->capability.max_ets_tc;
953 return 0;
954 case DCB_NUMTCS_ATTR_PFC:
955 *num = cfg->capability.max_pfc_tc;
956 return 0;
957 default:
958 return -EINVAL;
959 }
960}
961
962static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
963{
964 struct qlcnic_adapter *adapter = netdev_priv(netdev);
965 struct dcb_app app = {
966 .selector = idtype,
967 .protocol = id,
968 };
969
970 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
971 return 0;
972
973 return dcb_getapp(netdev, &app);
974}
975
976static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
977{
978 struct qlcnic_adapter *adapter = netdev_priv(netdev);
979 struct qlcnic_dcb *dcb = adapter->dcb;
980
981 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
982 return 0;
983
984 return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
985}
986
987static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
988{
989 struct qlcnic_adapter *adapter = netdev_priv(netdev);
990 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
991
992 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
993 return 0;
994
995 return cfg->capability.dcb_capability;
996}
997
998static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
999{
1000 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1001 struct qlcnic_dcb_cee *type;
1002
1003 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1004 return 1;
1005
1006 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
1007 *flag = 0;
1008
1009 switch (fid) {
1010 case DCB_FEATCFG_ATTR_PG:
1011 if (type->tc_param_valid)
1012 *flag |= DCB_FEATCFG_ENABLE;
1013 else
1014 *flag |= DCB_FEATCFG_ERROR;
1015 break;
1016 case DCB_FEATCFG_ATTR_PFC:
1017 if (type->pfc_mode_enable) {
1018 if (type->tc_cfg[0].prio_cfg[0].pfc_type)
1019 *flag |= DCB_FEATCFG_ENABLE;
1020 } else {
1021 *flag |= DCB_FEATCFG_ERROR;
1022 }
1023 break;
1024 case DCB_FEATCFG_ATTR_APP:
1025 *flag |= DCB_FEATCFG_ENABLE;
1026 break;
1027 default:
1028 netdev_err(netdev, "Invalid Feature ID %d\n", fid);
1029 return 1;
1030 }
1031
1032 return 0;
1033}
1034
1035static inline void
1036qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
1037 u8 *pgid, u8 *bw_pct, u8 *up_map)
1038{
1039 *prio_type = *pgid = *bw_pct = *up_map = 0;
1040}
1041
1042static inline void
1043qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
1044{
1045 *bw_pct = 0;
1046}
1047
1048static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1049 struct dcb_peer_app_info *info,
1050 u16 *app_count)
1051{
1052 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1053 struct qlcnic_dcb_cee *peer;
1054 int i;
1055
1056 *app_count = 0;
1057
1058 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1059 return 0;
1060
1061 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
1062
1063 for (i = 0; i < QLC_DCB_MAX_APP; i++) {
1064 if (peer->app[i].valid)
1065 (*app_count)++;
1066 }
1067
1068 return 0;
1069}
1070
1071static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
1072 struct dcb_app *table)
1073{
1074 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1075 struct qlcnic_dcb_cee *peer;
1076 struct qlcnic_dcb_app *app;
1077 int i, j;
1078
1079 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1080 return 0;
1081
1082 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
1083
1084 for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
1085 app = &peer->app[i];
1086 if (!app->valid)
1087 continue;
1088
1089 table[j].selector = app->selector;
1090 table[j].priority = app->priority;
1091 table[j++].protocol = app->protocol;
1092 }
1093
1094 return 0;
1095}
1096
1097static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
1098 struct cee_pg *pg)
1099{
1100 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1101 struct qlcnic_dcb_cee *peer;
1102 u8 i, j, k, map;
1103
1104 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1105 return 0;
1106
1107 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
1108
1109 for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
1110 if (!peer->pg_cfg[i].valid)
1111 continue;
1112
1113 pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
1114
1115 for (k = 0; k < QLC_DCB_MAX_TC; k++) {
1116 if (peer->tc_cfg[i].valid &&
1117 (peer->tc_cfg[i].pgid == i)) {
1118 map = peer->tc_cfg[i].up_tc_map;
1119 pg->prio_pg[j++] = map;
1120 break;
1121 }
1122 }
1123 }
1124
1125 return 0;
1126}
1127
1128static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
1129 struct cee_pfc *pfc)
1130{
1131 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1132 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
1133 struct qlcnic_dcb_tc_cfg *tc;
1134 struct qlcnic_dcb_cee *peer;
1135 u8 i, setting, prio;
1136
1137 pfc->pfc_en = 0;
1138
1139 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1140 return 0;
1141
1142 peer = &cfg->type[QLC_DCB_PEER_IDX];
1143
1144 for (i = 0; i < QLC_DCB_MAX_TC; i++) {
1145 tc = &peer->tc_cfg[i];
1146 prio = qlcnic_dcb_prio_count(tc->up_tc_map);
1147
1148 setting = 0;
1149 qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
1150 if (setting)
1151 pfc->pfc_en |= QLC_DCB_GET_MAP(i);
1152 }
1153
1154 pfc->tcs_supported = cfg->capability.max_pfc_tc;
1155
1156 return 0;
1157}
1158
1159static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
1160 .getstate = qlcnic_dcb_get_state,
1161 .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
1162 .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
1163 .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
1164 .getpfccfg = qlcnic_dcb_get_pfc_cfg,
1165 .getcap = qlcnic_dcb_get_capability,
1166 .getnumtcs = qlcnic_dcb_get_num_tcs,
1167 .getapp = qlcnic_dcb_get_app,
1168 .getpfcstate = qlcnic_dcb_get_pfc_state,
1169 .getdcbx = qlcnic_dcb_get_dcbx,
1170 .getfeatcfg = qlcnic_dcb_get_feat_cfg,
1171
1172 .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
1173 .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
1174
1175 .peer_getappinfo = qlcnic_dcb_peer_app_info,
1176 .peer_getapptable = qlcnic_dcb_peer_app_table,
1177 .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
1178 .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
1179};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 000000000000..b87ce9fb503e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,41 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef __QLCNIC_DCBX_H
9#define __QLCNIC_DCBX_H
10
11void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
12
13#ifdef CONFIG_QLCNIC_DCB
14int __qlcnic_register_dcb(struct qlcnic_adapter *);
15#else
16static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
17{ return 0; }
18#endif
19
20struct qlcnic_dcb_ops {
21 void (*init_dcbnl_ops) (struct qlcnic_adapter *);
22 void (*free) (struct qlcnic_adapter *);
23 int (*attach) (struct qlcnic_adapter *);
24 int (*query_hw_capability) (struct qlcnic_adapter *, char *);
25 int (*get_hw_capability) (struct qlcnic_adapter *);
26 void (*get_info) (struct qlcnic_adapter *);
27 int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
28 int (*get_cee_cfg) (struct qlcnic_adapter *);
29 int (*register_aen) (struct qlcnic_adapter *, bool);
30 void (*handle_aen) (struct qlcnic_adapter *, void *);
31};
32
33struct qlcnic_dcb {
34 struct qlcnic_dcb_mbx_params *param;
35 struct qlcnic_adapter *adapter;
36 struct delayed_work aen_work;
37 struct workqueue_struct *wq;
38 struct qlcnic_dcb_ops *ops;
39 struct qlcnic_dcb_cfg *cfg;
40};
41#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7aac23ab31d1..4d7ad0074d1c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
125}; 125};
126 126
127#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) 127#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
128
129static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
130 "xmit_on",
131 "xmit_off",
132 "xmit_called",
133 "xmit_finished",
134};
135
128static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { 136static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
129 "ctx_rx_bytes", 137 "ctx_rx_bytes",
130 "ctx_rx_pkts", 138 "ctx_rx_pkts",
@@ -630,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev,
630static void qlcnic_get_channels(struct net_device *dev, 638static void qlcnic_get_channels(struct net_device *dev,
631 struct ethtool_channels *channel) 639 struct ethtool_channels *channel)
632{ 640{
633 int min;
634 struct qlcnic_adapter *adapter = netdev_priv(dev); 641 struct qlcnic_adapter *adapter = netdev_priv(dev);
642 int min;
635 643
636 min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus()); 644 min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
637 channel->max_rx = rounddown_pow_of_two(min); 645 channel->max_rx = rounddown_pow_of_two(min);
638 channel->max_tx = adapter->ahw->max_tx_ques; 646 channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
639 647
640 channel->rx_count = adapter->max_sds_rings; 648 channel->rx_count = adapter->max_sds_rings;
641 channel->tx_count = adapter->ahw->max_tx_ques; 649 channel->tx_count = adapter->max_drv_tx_rings;
642} 650}
643 651
644static int qlcnic_set_channels(struct net_device *dev, 652static int qlcnic_set_channels(struct net_device *dev,
@@ -646,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev,
646{ 654{
647 struct qlcnic_adapter *adapter = netdev_priv(dev); 655 struct qlcnic_adapter *adapter = netdev_priv(dev);
648 int err; 656 int err;
657 int txq = 0;
649 658
650 if (channel->other_count || channel->combined_count || 659 if (channel->other_count || channel->combined_count)
651 channel->tx_count != channel->max_tx)
652 return -EINVAL; 660 return -EINVAL;
653 661
654 err = qlcnic_validate_max_rss(adapter, channel->rx_count); 662 if (channel->rx_count) {
655 if (err) 663 err = qlcnic_validate_max_rss(adapter, channel->rx_count);
656 return err; 664 if (err)
665 return err;
666 }
667
668 if (channel->tx_count) {
669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
670 if (err)
671 return err;
672 txq = channel->tx_count;
673 }
657 674
658 err = qlcnic_set_max_rss(adapter, channel->rx_count, 0); 675 err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
659 netdev_info(dev, "allocated 0x%x sds rings\n", 676 netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
660 adapter->max_sds_rings); 677 adapter->max_sds_rings, adapter->max_drv_tx_rings);
661 return err; 678 return err;
662} 679}
663 680
@@ -893,6 +910,7 @@ free_diag_res:
893clear_diag_irq: 910clear_diag_irq:
894 adapter->max_sds_rings = max_sds_rings; 911 adapter->max_sds_rings = max_sds_rings;
895 clear_bit(__QLCNIC_RESETTING, &adapter->state); 912 clear_bit(__QLCNIC_RESETTING, &adapter->state);
913
896 return ret; 914 return ret;
897} 915}
898 916
@@ -966,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
966int qlcnic_loopback_test(struct net_device *netdev, u8 mode) 984int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
967{ 985{
968 struct qlcnic_adapter *adapter = netdev_priv(netdev); 986 struct qlcnic_adapter *adapter = netdev_priv(netdev);
987 int max_drv_tx_rings = adapter->max_drv_tx_rings;
969 int max_sds_rings = adapter->max_sds_rings; 988 int max_sds_rings = adapter->max_sds_rings;
970 struct qlcnic_host_sds_ring *sds_ring; 989 struct qlcnic_host_sds_ring *sds_ring;
971 struct qlcnic_hardware_context *ahw = adapter->ahw; 990 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1006,9 +1025,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
1006 msleep(500); 1025 msleep(500);
1007 qlcnic_process_rcv_ring_diag(sds_ring); 1026 qlcnic_process_rcv_ring_diag(sds_ring);
1008 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1027 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
1009 netdev_info(netdev, "firmware didnt respond to loopback" 1028 netdev_info(netdev,
1010 " configure request\n"); 1029 "Firmware didn't sent link up event to loopback request\n");
1011 ret = -QLCNIC_FW_NOT_RESPOND; 1030 ret = -ETIMEDOUT;
1012 goto free_res; 1031 goto free_res;
1013 } else if (adapter->ahw->diag_cnt) { 1032 } else if (adapter->ahw->diag_cnt) {
1014 ret = adapter->ahw->diag_cnt; 1033 ret = adapter->ahw->diag_cnt;
@@ -1025,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
1025 1044
1026 clear_it: 1045 clear_it:
1027 adapter->max_sds_rings = max_sds_rings; 1046 adapter->max_sds_rings = max_sds_rings;
1047 adapter->max_drv_tx_rings = max_drv_tx_rings;
1028 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1048 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1029 return ret; 1049 return ret;
1030} 1050}
@@ -1077,11 +1097,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1077 QLCNIC_TEST_LEN * ETH_GSTRING_LEN); 1097 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
1078 break; 1098 break;
1079 case ETH_SS_STATS: 1099 case ETH_SS_STATS:
1100 num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
1101 for (i = 0; i < adapter->max_drv_tx_rings; i++) {
1102 for (index = 0; index < num_stats; index++) {
1103 sprintf(data, "tx_ring_%d %s", i,
1104 qlcnic_tx_ring_stats_strings[index]);
1105 data += ETH_GSTRING_LEN;
1106 }
1107 }
1108
1080 for (index = 0; index < QLCNIC_STATS_LEN; index++) { 1109 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
1081 memcpy(data + index * ETH_GSTRING_LEN, 1110 memcpy(data + index * ETH_GSTRING_LEN,
1082 qlcnic_gstrings_stats[index].stat_string, 1111 qlcnic_gstrings_stats[index].stat_string,
1083 ETH_GSTRING_LEN); 1112 ETH_GSTRING_LEN);
1084 } 1113 }
1114
1085 if (qlcnic_83xx_check(adapter)) { 1115 if (qlcnic_83xx_check(adapter)) {
1086 num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings); 1116 num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
1087 for (i = 0; i < num_stats; i++, index++) 1117 for (i = 0; i < num_stats; i++, index++)
@@ -1173,11 +1203,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1173 struct ethtool_stats *stats, u64 *data) 1203 struct ethtool_stats *stats, u64 *data)
1174{ 1204{
1175 struct qlcnic_adapter *adapter = netdev_priv(dev); 1205 struct qlcnic_adapter *adapter = netdev_priv(dev);
1206 struct qlcnic_host_tx_ring *tx_ring;
1176 struct qlcnic_esw_statistics port_stats; 1207 struct qlcnic_esw_statistics port_stats;
1177 struct qlcnic_mac_statistics mac_stats; 1208 struct qlcnic_mac_statistics mac_stats;
1178 int index, ret, length, size; 1209 int index, ret, length, size, ring;
1179 char *p; 1210 char *p;
1180 1211
1212 memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
1213 for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
1214 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1215 tx_ring = &adapter->tx_ring[ring];
1216 *data++ = tx_ring->xmit_on;
1217 *data++ = tx_ring->xmit_off;
1218 *data++ = tx_ring->xmit_called;
1219 *data++ = tx_ring->xmit_finished;
1220 }
1221 }
1181 memset(data, 0, stats->n_stats * sizeof(u64)); 1222 memset(data, 0, stats->n_stats * sizeof(u64));
1182 length = QLCNIC_STATS_LEN; 1223 length = QLCNIC_STATS_LEN;
1183 for (index = 0; index < length; index++) { 1224 for (index = 0; index < length; index++) {
@@ -1468,6 +1509,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1468 adapter->ahw->msg_enable = msglvl; 1509 adapter->ahw->msg_enable = msglvl;
1469} 1510}
1470 1511
1512int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
1513{
1514 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1515 u32 val;
1516
1517 if (qlcnic_84xx_check(adapter)) {
1518 if (qlcnic_83xx_lock_driver(adapter))
1519 return -EBUSY;
1520
1521 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1522 val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
1523 QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
1524
1525 qlcnic_83xx_unlock_driver(adapter);
1526 } else {
1527 fw_dump->enable = true;
1528 }
1529
1530 dev_info(&adapter->pdev->dev, "FW dump enabled\n");
1531
1532 return 0;
1533}
1534
1535static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
1536{
1537 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1538 u32 val;
1539
1540 if (qlcnic_84xx_check(adapter)) {
1541 if (qlcnic_83xx_lock_driver(adapter))
1542 return -EBUSY;
1543
1544 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1545 val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
1546 QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
1547
1548 qlcnic_83xx_unlock_driver(adapter);
1549 } else {
1550 fw_dump->enable = false;
1551 }
1552
1553 dev_info(&adapter->pdev->dev, "FW dump disabled\n");
1554
1555 return 0;
1556}
1557
1558bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
1559{
1560 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1561 bool state;
1562 u32 val;
1563
1564 if (qlcnic_84xx_check(adapter)) {
1565 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1566 state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
1567 } else {
1568 state = fw_dump->enable;
1569 }
1570
1571 return state;
1572}
1573
1471static int 1574static int
1472qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) 1575qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1473{ 1576{
@@ -1484,7 +1587,7 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1484 else 1587 else
1485 dump->len = 0; 1588 dump->len = 0;
1486 1589
1487 if (!fw_dump->enable) 1590 if (!qlcnic_check_fw_dump_state(adapter))
1488 dump->flag = ETH_FW_DUMP_DISABLE; 1591 dump->flag = ETH_FW_DUMP_DISABLE;
1489 else 1592 else
1490 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1593 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
@@ -1532,77 +1635,111 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1532 return 0; 1635 return 0;
1533} 1636}
1534 1637
1638static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
1639{
1640 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1641 struct net_device *netdev = adapter->netdev;
1642
1643 if (!qlcnic_check_fw_dump_state(adapter)) {
1644 netdev_info(netdev,
1645 "Can not change driver mask to 0x%x. FW dump not enabled\n",
1646 mask);
1647 return -EOPNOTSUPP;
1648 }
1649
1650 fw_dump->tmpl_hdr->drv_cap_mask = mask;
1651 netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
1652 return 0;
1653}
1654
1535static int 1655static int
1536qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) 1656qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1537{ 1657{
1538 int i;
1539 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1658 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1540 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1659 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1660 bool valid_mask = false;
1661 int i, ret = 0;
1541 u32 state; 1662 u32 state;
1542 1663
1543 switch (val->flag) { 1664 switch (val->flag) {
1544 case QLCNIC_FORCE_FW_DUMP_KEY: 1665 case QLCNIC_FORCE_FW_DUMP_KEY:
1545 if (!fw_dump->tmpl_hdr) { 1666 if (!fw_dump->tmpl_hdr) {
1546 netdev_err(netdev, "FW dump not supported\n"); 1667 netdev_err(netdev, "FW dump not supported\n");
1547 return -ENOTSUPP; 1668 ret = -EOPNOTSUPP;
1669 break;
1548 } 1670 }
1549 if (!fw_dump->enable) { 1671
1672 if (!qlcnic_check_fw_dump_state(adapter)) {
1550 netdev_info(netdev, "FW dump not enabled\n"); 1673 netdev_info(netdev, "FW dump not enabled\n");
1551 return 0; 1674 ret = -EOPNOTSUPP;
1675 break;
1552 } 1676 }
1677
1553 if (fw_dump->clr) { 1678 if (fw_dump->clr) {
1554 netdev_info(netdev, 1679 netdev_info(netdev,
1555 "Previous dump not cleared, not forcing dump\n"); 1680 "Previous dump not cleared, not forcing dump\n");
1556 return 0; 1681 break;
1557 } 1682 }
1683
1558 netdev_info(netdev, "Forcing a FW dump\n"); 1684 netdev_info(netdev, "Forcing a FW dump\n");
1559 qlcnic_dev_request_reset(adapter, val->flag); 1685 qlcnic_dev_request_reset(adapter, val->flag);
1560 break; 1686 break;
1561 case QLCNIC_DISABLE_FW_DUMP: 1687 case QLCNIC_DISABLE_FW_DUMP:
1562 if (fw_dump->enable && fw_dump->tmpl_hdr) { 1688 if (!fw_dump->tmpl_hdr) {
1563 netdev_info(netdev, "Disabling FW dump\n"); 1689 netdev_err(netdev, "FW dump not supported\n");
1564 fw_dump->enable = 0; 1690 ret = -EOPNOTSUPP;
1691 break;
1565 } 1692 }
1566 return 0; 1693
1694 ret = qlcnic_disable_fw_dump_state(adapter);
1695 break;
1696
1567 case QLCNIC_ENABLE_FW_DUMP: 1697 case QLCNIC_ENABLE_FW_DUMP:
1568 if (!fw_dump->tmpl_hdr) { 1698 if (!fw_dump->tmpl_hdr) {
1569 netdev_err(netdev, "FW dump not supported\n"); 1699 netdev_err(netdev, "FW dump not supported\n");
1570 return -ENOTSUPP; 1700 ret = -EOPNOTSUPP;
1571 } 1701 break;
1572 if (!fw_dump->enable) {
1573 netdev_info(netdev, "Enabling FW dump\n");
1574 fw_dump->enable = 1;
1575 } 1702 }
1576 return 0; 1703
1704 ret = qlcnic_enable_fw_dump_state(adapter);
1705 break;
1706
1577 case QLCNIC_FORCE_FW_RESET: 1707 case QLCNIC_FORCE_FW_RESET:
1578 netdev_info(netdev, "Forcing a FW reset\n"); 1708 netdev_info(netdev, "Forcing a FW reset\n");
1579 qlcnic_dev_request_reset(adapter, val->flag); 1709 qlcnic_dev_request_reset(adapter, val->flag);
1580 adapter->flags &= ~QLCNIC_FW_RESET_OWNER; 1710 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
1581 return 0; 1711 break;
1712
1582 case QLCNIC_SET_QUIESCENT: 1713 case QLCNIC_SET_QUIESCENT:
1583 case QLCNIC_RESET_QUIESCENT: 1714 case QLCNIC_RESET_QUIESCENT:
1584 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 1715 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1585 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) 1716 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1586 netdev_info(netdev, "Device in FAILED state\n"); 1717 netdev_info(netdev, "Device in FAILED state\n");
1587 return 0; 1718 break;
1719
1588 default: 1720 default:
1589 if (!fw_dump->tmpl_hdr) { 1721 if (!fw_dump->tmpl_hdr) {
1590 netdev_err(netdev, "FW dump not supported\n"); 1722 netdev_err(netdev, "FW dump not supported\n");
1591 return -ENOTSUPP; 1723 ret = -EOPNOTSUPP;
1724 break;
1592 } 1725 }
1726
1593 for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) { 1727 for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
1594 if (val->flag == qlcnic_fw_dump_level[i]) { 1728 if (val->flag == qlcnic_fw_dump_level[i]) {
1595 fw_dump->tmpl_hdr->drv_cap_mask = 1729 valid_mask = true;
1596 val->flag; 1730 break;
1597 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1598 fw_dump->tmpl_hdr->drv_cap_mask);
1599 return 0;
1600 } 1731 }
1601 } 1732 }
1602 netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); 1733
1603 return -EINVAL; 1734 if (valid_mask) {
1735 ret = qlcnic_set_dump_mask(adapter, val->flag);
1736 } else {
1737 netdev_info(netdev, "Invalid dump level: 0x%x\n",
1738 val->flag);
1739 ret = -EINVAL;
1740 }
1604 } 1741 }
1605 return 0; 1742 return ret;
1606} 1743}
1607 1744
1608const struct ethtool_ops qlcnic_ethtool_ops = { 1745const struct ethtool_ops qlcnic_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4d5f59b2d153..f8adc7b01f1f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -387,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
387 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 387 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
388 return -EIO; 388 return -EIO;
389 389
390 tx_ring = adapter->tx_ring; 390 tx_ring = &adapter->tx_ring[0];
391 __netif_tx_lock_bh(tx_ring->txq); 391 __netif_tx_lock_bh(tx_ring->txq);
392 392
393 producer = tx_ring->producer; 393 producer = tx_ring->producer;
@@ -740,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
740 return 0; 740 return 0;
741} 741}
742 742
743int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
744{
745 u8 mac[ETH_ALEN];
746 int ret;
747
748 ret = qlcnic_get_mac_address(adapter, mac,
749 adapter->ahw->physical_port);
750 if (ret)
751 return ret;
752
753 memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
754 adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
755
756 return 0;
757}
758
743/* 759/*
744 * Send the interrupt coalescing parameter set by ethtool to the card. 760 * Send the interrupt coalescing parameter set by ethtool to the card.
745 */ 761 */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4a71b28effcb..272c356cf9b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -85,8 +85,11 @@ enum qlcnic_regs {
85#define QLCNIC_CMD_GET_TEMP_HDR 0x30 85#define QLCNIC_CMD_GET_TEMP_HDR 0x30
86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31 86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
87#define QLCNIC_CMD_CONFIG_VPORT 0x32 87#define QLCNIC_CMD_CONFIG_VPORT 0x32
88#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
89#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
88#define QLCNIC_CMD_GET_MAC_STATS 0x37 90#define QLCNIC_CMD_GET_MAC_STATS 0x37
89#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 91#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
92#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
90#define QLCNIC_CMD_GET_LED_STATUS 0x3C 93#define QLCNIC_CMD_GET_LED_STATUS 0x3C
91#define QLCNIC_CMD_CONFIGURE_RSS 0x41 94#define QLCNIC_CMD_CONFIGURE_RSS 0x41
92#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 95#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
@@ -122,6 +125,7 @@ enum qlcnic_regs {
122#define QLCNIC_MBX_COMP_EVENT 0x8100 125#define QLCNIC_MBX_COMP_EVENT 0x8100
123#define QLCNIC_MBX_REQUEST_EVENT 0x8101 126#define QLCNIC_MBX_REQUEST_EVENT 0x8101
124#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 127#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
128#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
125#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130 129#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
126#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131 130#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
127 131
@@ -149,7 +153,6 @@ struct ethtool_stats;
149struct pci_device_id; 153struct pci_device_id;
150struct qlcnic_host_sds_ring; 154struct qlcnic_host_sds_ring;
151struct qlcnic_host_tx_ring; 155struct qlcnic_host_tx_ring;
152struct qlcnic_host_tx_ring;
153struct qlcnic_hardware_context; 156struct qlcnic_hardware_context;
154struct qlcnic_adapter; 157struct qlcnic_adapter;
155 158
@@ -173,10 +176,12 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
173void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 176void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
174void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 177void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
175void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); 178void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
176int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8); 179int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
177irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); 180irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
178int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 181int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
179 struct qlcnic_cmd_args *); 182 struct qlcnic_cmd_args *);
183int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
184int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
180int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); 185int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
181int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, 186int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
182 struct qlcnic_host_tx_ring *tx_ring, int); 187 struct qlcnic_host_tx_ring *tx_ring, int);
@@ -184,7 +189,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
184void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *, 189void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
185 struct qlcnic_host_tx_ring *); 190 struct qlcnic_host_tx_ring *);
186int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); 191int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
187int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); 192int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
188int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 193int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
189int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 194int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
190int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); 195int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 974d62607e13..66c26cf7a2b8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
127 } 127 }
128} 128}
129 129
130void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) 130void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
131{ 132{
132 struct qlcnic_cmd_buffer *cmd_buf; 133 struct qlcnic_cmd_buffer *cmd_buf;
133 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
134 int i, j; 135 int i, j;
135 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
136 136
137 cmd_buf = tx_ring->cmd_buf_arr; 137 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 138 for (i = 0; i < tx_ring->num_desc; i++) {
@@ -241,7 +241,13 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
241 sds_ring->irq = adapter->msix_entries[ring].vector; 241 sds_ring->irq = adapter->msix_entries[ring].vector;
242 sds_ring->adapter = adapter; 242 sds_ring->adapter = adapter;
243 sds_ring->num_desc = adapter->num_rxd; 243 sds_ring->num_desc = adapter->num_rxd;
244 244 if (qlcnic_82xx_check(adapter)) {
245 if (qlcnic_check_multi_tx(adapter) &&
246 !adapter->ahw->diag_test)
247 sds_ring->tx_ring = &adapter->tx_ring[ring];
248 else
249 sds_ring->tx_ring = &adapter->tx_ring[0];
250 }
245 for (i = 0; i < NUM_RCV_DESC_RINGS; i++) 251 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
246 INIT_LIST_HEAD(&sds_ring->free_list[i]); 252 INIT_LIST_HEAD(&sds_ring->free_list[i]);
247 } 253 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6946d354f44f..b7b245b43b87 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -127,6 +127,23 @@
127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, 127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
128 struct qlcnic_host_rds_ring *, u16, u16); 128 struct qlcnic_host_rds_ring *, u16, u16);
129 129
130inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
132{
133 if (qlcnic_check_multi_tx(adapter) &&
134 !adapter->ahw->diag_test)
135 writel(0x0, tx_ring->crb_intr_mask);
136}
137
138
139static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
140 struct qlcnic_host_tx_ring *tx_ring)
141{
142 if (qlcnic_check_multi_tx(adapter) &&
143 !adapter->ahw->diag_test)
144 writel(1, tx_ring->crb_intr_mask);
145}
146
130inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter, 147inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring) 148 struct qlcnic_host_tx_ring *tx_ring)
132{ 149{
@@ -147,10 +164,7 @@ static inline u8 qlcnic_mac_hash(u64 mac)
147static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, 164static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
148 u16 handle, u8 ring_id) 165 u16 handle, u8 ring_id)
149{ 166{
150 unsigned short device = adapter->pdev->device; 167 if (qlcnic_83xx_check(adapter))
151
152 if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
153 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
154 return handle | (ring_id << 15); 168 return handle | (ring_id << 15);
155 else 169 else
156 return handle; 170 return handle;
@@ -357,14 +371,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
357} 371}
358 372
359static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, 373static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
360 struct cmd_desc_type0 *first_desc, struct sk_buff *skb) 374 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
375 struct qlcnic_host_tx_ring *tx_ring)
361{ 376{
362 u8 l4proto, opcode = 0, hdr_len = 0; 377 u8 l4proto, opcode = 0, hdr_len = 0;
363 u16 flags = 0, vlan_tci = 0; 378 u16 flags = 0, vlan_tci = 0;
364 int copied, offset, copy_len, size; 379 int copied, offset, copy_len, size;
365 struct cmd_desc_type0 *hwdesc; 380 struct cmd_desc_type0 *hwdesc;
366 struct vlan_ethhdr *vh; 381 struct vlan_ethhdr *vh;
367 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
368 u16 protocol = ntohs(skb->protocol); 382 u16 protocol = ntohs(skb->protocol);
369 u32 producer = tx_ring->producer; 383 u32 producer = tx_ring->producer;
370 384
@@ -547,7 +561,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
547netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 561netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
548{ 562{
549 struct qlcnic_adapter *adapter = netdev_priv(netdev); 563 struct qlcnic_adapter *adapter = netdev_priv(netdev);
550 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 564 struct qlcnic_host_tx_ring *tx_ring;
551 struct qlcnic_cmd_buffer *pbuf; 565 struct qlcnic_cmd_buffer *pbuf;
552 struct qlcnic_skb_frag *buffrag; 566 struct qlcnic_skb_frag *buffrag;
553 struct cmd_desc_type0 *hwdesc, *first_desc; 567 struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -556,10 +570,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
556 int i, k, frag_count, delta = 0; 570 int i, k, frag_count, delta = 0;
557 u32 producer, num_txd; 571 u32 producer, num_txd;
558 572
559 num_txd = tx_ring->num_desc;
560
561 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 573 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
562 netif_stop_queue(netdev); 574 netif_tx_stop_all_queues(netdev);
563 return NETDEV_TX_BUSY; 575 return NETDEV_TX_BUSY;
564 } 576 }
565 577
@@ -569,7 +581,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
569 goto drop_packet; 581 goto drop_packet;
570 } 582 }
571 583
584 if (qlcnic_check_multi_tx(adapter))
585 tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
586 else
587 tx_ring = &adapter->tx_ring[0];
588 num_txd = tx_ring->num_desc;
589
572 frag_count = skb_shinfo(skb)->nr_frags + 1; 590 frag_count = skb_shinfo(skb)->nr_frags + 1;
591
573 /* 14 frags supported for normal packet and 592 /* 14 frags supported for normal packet and
574 * 32 frags supported for TSO packet 593 * 32 frags supported for TSO packet
575 */ 594 */
@@ -584,11 +603,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
584 } 603 }
585 604
586 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 605 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
587 netif_stop_queue(netdev); 606 netif_tx_stop_queue(tx_ring->txq);
588 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 607 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
589 netif_start_queue(netdev); 608 netif_tx_start_queue(tx_ring->txq);
590 } else { 609 } else {
591 adapter->stats.xmit_off++; 610 adapter->stats.xmit_off++;
611 tx_ring->xmit_off++;
592 return NETDEV_TX_BUSY; 612 return NETDEV_TX_BUSY;
593 } 613 }
594 } 614 }
@@ -643,7 +663,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
643 tx_ring->producer = get_next_index(producer, num_txd); 663 tx_ring->producer = get_next_index(producer, num_txd);
644 smp_mb(); 664 smp_mb();
645 665
646 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) 666 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
647 goto unwind_buff; 667 goto unwind_buff;
648 668
649 if (adapter->drv_mac_learn) 669 if (adapter->drv_mac_learn)
@@ -651,6 +671,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
651 671
652 adapter->stats.txbytes += skb->len; 672 adapter->stats.txbytes += skb->len;
653 adapter->stats.xmitcalled++; 673 adapter->stats.xmitcalled++;
674 tx_ring->xmit_called++;
654 675
655 qlcnic_update_cmd_producer(tx_ring); 676 qlcnic_update_cmd_producer(tx_ring);
656 677
@@ -673,7 +694,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
673 adapter->ahw->linkup = 0; 694 adapter->ahw->linkup = 0;
674 if (netif_running(netdev)) { 695 if (netif_running(netdev)) {
675 netif_carrier_off(netdev); 696 netif_carrier_off(netdev);
676 netif_stop_queue(netdev); 697 netif_tx_stop_all_queues(netdev);
677 } 698 }
678 } else if (!adapter->ahw->linkup && linkup) { 699 } else if (!adapter->ahw->linkup && linkup) {
679 netdev_info(netdev, "NIC Link is up\n"); 700 netdev_info(netdev, "NIC Link is up\n");
@@ -768,9 +789,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
768 struct net_device *netdev = adapter->netdev; 789 struct net_device *netdev = adapter->netdev;
769 struct qlcnic_skb_frag *frag; 790 struct qlcnic_skb_frag *frag;
770 791
771 if (!spin_trylock(&adapter->tx_clean_lock))
772 return 1;
773
774 sw_consumer = tx_ring->sw_consumer; 792 sw_consumer = tx_ring->sw_consumer;
775 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 793 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
776 794
@@ -788,6 +806,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
788 frag->dma = 0ULL; 806 frag->dma = 0ULL;
789 } 807 }
790 adapter->stats.xmitfinished++; 808 adapter->stats.xmitfinished++;
809 tx_ring->xmit_finished++;
791 dev_kfree_skb_any(buffer->skb); 810 dev_kfree_skb_any(buffer->skb);
792 buffer->skb = NULL; 811 buffer->skb = NULL;
793 } 812 }
@@ -800,10 +819,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
800 if (count && netif_running(netdev)) { 819 if (count && netif_running(netdev)) {
801 tx_ring->sw_consumer = sw_consumer; 820 tx_ring->sw_consumer = sw_consumer;
802 smp_mb(); 821 smp_mb();
803 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 822 if (netif_tx_queue_stopped(tx_ring->txq) &&
823 netif_carrier_ok(netdev)) {
804 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 824 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
805 netif_wake_queue(netdev); 825 netif_tx_wake_queue(tx_ring->txq);
806 adapter->stats.xmit_on++; 826 adapter->stats.xmit_on++;
827 tx_ring->xmit_on++;
807 } 828 }
808 } 829 }
809 adapter->tx_timeo_cnt = 0; 830 adapter->tx_timeo_cnt = 0;
@@ -823,7 +844,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
823 */ 844 */
824 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 845 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
825 done = (sw_consumer == hw_consumer); 846 done = (sw_consumer == hw_consumer);
826 spin_unlock(&adapter->tx_clean_lock);
827 847
828 return done; 848 return done;
829} 849}
@@ -833,16 +853,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
833 int tx_complete, work_done; 853 int tx_complete, work_done;
834 struct qlcnic_host_sds_ring *sds_ring; 854 struct qlcnic_host_sds_ring *sds_ring;
835 struct qlcnic_adapter *adapter; 855 struct qlcnic_adapter *adapter;
856 struct qlcnic_host_tx_ring *tx_ring;
836 857
837 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); 858 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
838 adapter = sds_ring->adapter; 859 adapter = sds_ring->adapter;
839 tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring, 860 tx_ring = sds_ring->tx_ring;
861
862 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
840 budget); 863 budget);
841 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 864 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
842 if ((work_done < budget) && tx_complete) { 865 if ((work_done < budget) && tx_complete) {
843 napi_complete(&sds_ring->napi); 866 napi_complete(&sds_ring->napi);
844 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 867 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
845 qlcnic_enable_int(sds_ring); 868 qlcnic_enable_int(sds_ring);
869 qlcnic_enable_tx_intr(adapter, tx_ring);
870 }
871 }
872
873 return work_done;
874}
875
876static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
877{
878 struct qlcnic_host_tx_ring *tx_ring;
879 struct qlcnic_adapter *adapter;
880 int work_done;
881
882 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
883 adapter = tx_ring->adapter;
884
885 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
886 if (work_done) {
887 napi_complete(&tx_ring->napi);
888 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
889 qlcnic_enable_tx_intr(adapter, tx_ring);
846 } 890 }
847 891
848 return work_done; 892 return work_done;
@@ -952,20 +996,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
952 break; 996 break;
953 case 1: 997 case 1:
954 dev_info(dev, "loopback already in progress\n"); 998 dev_info(dev, "loopback already in progress\n");
955 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; 999 adapter->ahw->diag_cnt = -EINPROGRESS;
956 break; 1000 break;
957 case 2: 1001 case 2:
958 dev_info(dev, "loopback cable is not connected\n"); 1002 dev_info(dev, "loopback cable is not connected\n");
959 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; 1003 adapter->ahw->diag_cnt = -ENODEV;
960 break; 1004 break;
961 default: 1005 default:
962 dev_info(dev, 1006 dev_info(dev,
963 "loopback configure request failed, err %x\n", 1007 "loopback configure request failed, err %x\n",
964 ret); 1008 ret);
965 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR; 1009 adapter->ahw->diag_cnt = -EIO;
966 break; 1010 break;
967 } 1011 }
968 break; 1012 break;
1013 case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
1014 qlcnic_dcb_handle_aen(adapter, (void *)&msg);
1015 break;
969 default: 1016 default:
970 break; 1017 break;
971 } 1018 }
@@ -1411,23 +1458,31 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1411int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, 1458int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1412 struct net_device *netdev) 1459 struct net_device *netdev)
1413{ 1460{
1414 int ring, max_sds_rings; 1461 int ring;
1415 struct qlcnic_host_sds_ring *sds_ring; 1462 struct qlcnic_host_sds_ring *sds_ring;
1416 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1463 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1464 struct qlcnic_host_tx_ring *tx_ring;
1417 1465
1418 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 1466 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1419 return -ENOMEM; 1467 return -ENOMEM;
1420 1468
1421 max_sds_rings = adapter->max_sds_rings;
1422
1423 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1469 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1424 sds_ring = &recv_ctx->sds_rings[ring]; 1470 sds_ring = &recv_ctx->sds_rings[ring];
1425 if (ring == adapter->max_sds_rings - 1) 1471 if (qlcnic_check_multi_tx(adapter) &&
1426 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, 1472 !adapter->ahw->diag_test &&
1427 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1473 (adapter->max_drv_tx_rings > 1)) {
1428 else
1429 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, 1474 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1430 QLCNIC_NETDEV_WEIGHT*2); 1475 NAPI_POLL_WEIGHT);
1476 } else {
1477 if (ring == (adapter->max_sds_rings - 1))
1478 netif_napi_add(netdev, &sds_ring->napi,
1479 qlcnic_poll,
1480 NAPI_POLL_WEIGHT);
1481 else
1482 netif_napi_add(netdev, &sds_ring->napi,
1483 qlcnic_rx_poll,
1484 NAPI_POLL_WEIGHT);
1485 }
1431 } 1486 }
1432 1487
1433 if (qlcnic_alloc_tx_rings(adapter, netdev)) { 1488 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1435,6 +1490,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1435 return -ENOMEM; 1490 return -ENOMEM;
1436 } 1491 }
1437 1492
1493 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1494 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1495 tx_ring = &adapter->tx_ring[ring];
1496 netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
1497 NAPI_POLL_WEIGHT);
1498 }
1499 }
1500
1438 return 0; 1501 return 0;
1439} 1502}
1440 1503
@@ -1443,6 +1506,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1443 int ring; 1506 int ring;
1444 struct qlcnic_host_sds_ring *sds_ring; 1507 struct qlcnic_host_sds_ring *sds_ring;
1445 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1508 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1509 struct qlcnic_host_tx_ring *tx_ring;
1446 1510
1447 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1511 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1448 sds_ring = &recv_ctx->sds_rings[ring]; 1512 sds_ring = &recv_ctx->sds_rings[ring];
@@ -1450,6 +1514,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1450 } 1514 }
1451 1515
1452 qlcnic_free_sds_rings(adapter->recv_ctx); 1516 qlcnic_free_sds_rings(adapter->recv_ctx);
1517
1518 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1519 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1520 tx_ring = &adapter->tx_ring[ring];
1521 netif_napi_del(&tx_ring->napi);
1522 }
1523 }
1524
1453 qlcnic_free_tx_rings(adapter); 1525 qlcnic_free_tx_rings(adapter);
1454} 1526}
1455 1527
@@ -1457,6 +1529,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1457{ 1529{
1458 int ring; 1530 int ring;
1459 struct qlcnic_host_sds_ring *sds_ring; 1531 struct qlcnic_host_sds_ring *sds_ring;
1532 struct qlcnic_host_tx_ring *tx_ring;
1460 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1533 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1461 1534
1462 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1535 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1467,12 +1540,24 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1467 napi_enable(&sds_ring->napi); 1540 napi_enable(&sds_ring->napi);
1468 qlcnic_enable_int(sds_ring); 1541 qlcnic_enable_int(sds_ring);
1469 } 1542 }
1543
1544 if (qlcnic_check_multi_tx(adapter) &&
1545 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1546 !adapter->ahw->diag_test &&
1547 (adapter->max_drv_tx_rings > 1)) {
1548 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1549 tx_ring = &adapter->tx_ring[ring];
1550 napi_enable(&tx_ring->napi);
1551 qlcnic_enable_tx_intr(adapter, tx_ring);
1552 }
1553 }
1470} 1554}
1471 1555
1472void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) 1556void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1473{ 1557{
1474 int ring; 1558 int ring;
1475 struct qlcnic_host_sds_ring *sds_ring; 1559 struct qlcnic_host_sds_ring *sds_ring;
1560 struct qlcnic_host_tx_ring *tx_ring;
1476 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1561 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1477 1562
1478 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1563 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1484,6 +1569,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1484 napi_synchronize(&sds_ring->napi); 1569 napi_synchronize(&sds_ring->napi);
1485 napi_disable(&sds_ring->napi); 1570 napi_disable(&sds_ring->napi);
1486 } 1571 }
1572
1573 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1574 !adapter->ahw->diag_test &&
1575 qlcnic_check_multi_tx(adapter)) {
1576 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1577 tx_ring = &adapter->tx_ring[ring];
1578 qlcnic_disable_tx_int(adapter, tx_ring);
1579 napi_synchronize(&tx_ring->napi);
1580 napi_disable(&tx_ring->napi);
1581 }
1582 }
1487} 1583}
1488 1584
1489#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) 1585#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
@@ -1864,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1864int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, 1960int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1865 struct net_device *netdev) 1961 struct net_device *netdev)
1866{ 1962{
1867 int ring, max_sds_rings, temp; 1963 int ring;
1868 struct qlcnic_host_sds_ring *sds_ring; 1964 struct qlcnic_host_sds_ring *sds_ring;
1869 struct qlcnic_host_tx_ring *tx_ring; 1965 struct qlcnic_host_tx_ring *tx_ring;
1870 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1966 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1872,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1872 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 1968 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1873 return -ENOMEM; 1969 return -ENOMEM;
1874 1970
1875 max_sds_rings = adapter->max_sds_rings;
1876 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1971 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1877 sds_ring = &recv_ctx->sds_rings[ring]; 1972 sds_ring = &recv_ctx->sds_rings[ring];
1878 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1973 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1879 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { 1974 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
1880 netif_napi_add(netdev, &sds_ring->napi, 1975 netif_napi_add(netdev, &sds_ring->napi,
1881 qlcnic_83xx_rx_poll, 1976 qlcnic_83xx_rx_poll,
1882 QLCNIC_NETDEV_WEIGHT * 2); 1977 NAPI_POLL_WEIGHT);
1883 } else { 1978 else
1884 temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
1885 netif_napi_add(netdev, &sds_ring->napi, 1979 netif_napi_add(netdev, &sds_ring->napi,
1886 qlcnic_83xx_msix_sriov_vf_poll, 1980 qlcnic_83xx_msix_sriov_vf_poll,
1887 temp); 1981 NAPI_POLL_WEIGHT);
1888 }
1889 1982
1890 } else { 1983 } else {
1891 netif_napi_add(netdev, &sds_ring->napi, 1984 netif_napi_add(netdev, &sds_ring->napi,
1892 qlcnic_83xx_poll, 1985 qlcnic_83xx_poll,
1893 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1986 NAPI_POLL_WEIGHT);
1894 } 1987 }
1895 } 1988 }
1896 1989
@@ -1905,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1905 tx_ring = &adapter->tx_ring[ring]; 1998 tx_ring = &adapter->tx_ring[ring];
1906 netif_napi_add(netdev, &tx_ring->napi, 1999 netif_napi_add(netdev, &tx_ring->napi,
1907 qlcnic_83xx_msix_tx_poll, 2000 qlcnic_83xx_msix_tx_poll,
1908 QLCNIC_NETDEV_WEIGHT); 2001 NAPI_POLL_WEIGHT);
1909 } 2002 }
1910 } 2003 }
1911 2004
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index bc05d016c859..c4c5023e1fdf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
100 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), 100 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
101 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), 101 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
102 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), 102 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
103 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
104 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
103 {0,} 105 {0,}
104}; 106};
105 107
@@ -146,6 +148,11 @@ static const u32 qlcnic_reg_tbl[] = {
146 148
147static const struct qlcnic_board_info qlcnic_boards[] = { 149static const struct qlcnic_board_info qlcnic_boards[] = {
148 { PCI_VENDOR_ID_QLOGIC, 150 { PCI_VENDOR_ID_QLOGIC,
151 PCI_DEVICE_ID_QLOGIC_QLE844X,
152 0x0,
153 0x0,
154 "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
155 { PCI_VENDOR_ID_QLOGIC,
149 PCI_DEVICE_ID_QLOGIC_QLE834X, 156 PCI_DEVICE_ID_QLOGIC_QLE834X,
150 PCI_VENDOR_ID_QLOGIC, 157 PCI_VENDOR_ID_QLOGIC,
151 0x24e, 158 0x24e,
@@ -254,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
254}; 261};
255 262
256#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) 263#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
257#define QLC_MAX_SDS_RINGS 8
258 264
259static const 265static const
260struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; 266struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -278,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
278 284
279int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) 285int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
280{ 286{
281 u8 mac_addr[ETH_ALEN];
282 struct net_device *netdev = adapter->netdev; 287 struct net_device *netdev = adapter->netdev;
283 struct pci_dev *pdev = adapter->pdev; 288 struct pci_dev *pdev = adapter->pdev;
289 u8 mac_addr[ETH_ALEN];
290 int ret;
284 291
285 if (qlcnic_get_mac_address(adapter, mac_addr) != 0) 292 ret = qlcnic_get_mac_address(adapter, mac_addr,
286 return -EIO; 293 adapter->ahw->pci_func);
294 if (ret)
295 return ret;
287 296
288 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 297 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
289 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); 298 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -425,6 +434,21 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
425 cancel_delayed_work_sync(&adapter->fw_work); 434 cancel_delayed_work_sync(&adapter->fw_work);
426} 435}
427 436
437static int qlcnic_get_phys_port_id(struct net_device *netdev,
438 struct netdev_phys_port_id *ppid)
439{
440 struct qlcnic_adapter *adapter = netdev_priv(netdev);
441 struct qlcnic_hardware_context *ahw = adapter->ahw;
442
443 if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
444 return -EOPNOTSUPP;
445
446 ppid->id_len = sizeof(ahw->phys_port_id);
447 memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
448
449 return 0;
450}
451
428static const struct net_device_ops qlcnic_netdev_ops = { 452static const struct net_device_ops qlcnic_netdev_ops = {
429 .ndo_open = qlcnic_open, 453 .ndo_open = qlcnic_open,
430 .ndo_stop = qlcnic_close, 454 .ndo_stop = qlcnic_close,
@@ -442,6 +466,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
442 .ndo_fdb_add = qlcnic_fdb_add, 466 .ndo_fdb_add = qlcnic_fdb_add,
443 .ndo_fdb_del = qlcnic_fdb_del, 467 .ndo_fdb_del = qlcnic_fdb_del,
444 .ndo_fdb_dump = qlcnic_fdb_dump, 468 .ndo_fdb_dump = qlcnic_fdb_dump,
469 .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
445#ifdef CONFIG_NET_POLL_CONTROLLER 470#ifdef CONFIG_NET_POLL_CONTROLLER
446 .ndo_poll_controller = qlcnic_poll_controller, 471 .ndo_poll_controller = qlcnic_poll_controller,
447#endif 472#endif
@@ -514,13 +539,36 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
514 .get_board_info = qlcnic_82xx_get_board_info, 539 .get_board_info = qlcnic_82xx_get_board_info,
515 .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, 540 .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
516 .free_mac_list = qlcnic_82xx_free_mac_list, 541 .free_mac_list = qlcnic_82xx_free_mac_list,
542 .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
543 .io_error_detected = qlcnic_82xx_io_error_detected,
544 .io_slot_reset = qlcnic_82xx_io_slot_reset,
545 .io_resume = qlcnic_82xx_io_resume,
517}; 546};
518 547
548static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
549{
550 struct qlcnic_hardware_context *ahw = adapter->ahw;
551 int num_tx_q;
552
553 if (ahw->msix_supported &&
554 (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
555 num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
556 num_online_cpus());
557 if (num_tx_q > 1) {
558 test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
559 &adapter->state);
560 adapter->max_drv_tx_rings = num_tx_q;
561 }
562 } else {
563 adapter->max_drv_tx_rings = 1;
564 }
565}
566
519int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 567int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
520{ 568{
521 struct pci_dev *pdev = adapter->pdev; 569 struct pci_dev *pdev = adapter->pdev;
570 int max_tx_rings, max_sds_rings, tx_vector;
522 int err = -1, i; 571 int err = -1, i;
523 int max_tx_rings, tx_vector;
524 572
525 if (adapter->flags & QLCNIC_TX_INTR_SHARED) { 573 if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
526 max_tx_rings = 0; 574 max_tx_rings = 0;
@@ -554,7 +602,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
554 adapter->max_sds_rings = num_msix - 602 adapter->max_sds_rings = num_msix -
555 max_tx_rings - 1; 603 max_tx_rings - 1;
556 } else { 604 } else {
557 adapter->max_sds_rings = num_msix; 605 adapter->ahw->num_msix = num_msix;
606 if (qlcnic_check_multi_tx(adapter) &&
607 !adapter->ahw->diag_test &&
608 (adapter->max_drv_tx_rings > 1))
609 max_sds_rings = num_msix - max_tx_rings;
610 else
611 max_sds_rings = num_msix;
612
613 adapter->max_sds_rings = max_sds_rings;
558 } 614 }
559 dev_info(&pdev->dev, "using msi-x interrupts\n"); 615 dev_info(&pdev->dev, "using msi-x interrupts\n");
560 return err; 616 return err;
@@ -570,6 +626,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
570 num_msix += (max_tx_rings + 1); 626 num_msix += (max_tx_rings + 1);
571 } else { 627 } else {
572 num_msix = rounddown_pow_of_two(err); 628 num_msix = rounddown_pow_of_two(err);
629 if (qlcnic_check_multi_tx(adapter))
630 num_msix += max_tx_rings;
573 } 631 }
574 632
575 if (num_msix) { 633 if (num_msix) {
@@ -605,6 +663,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
605 adapter->msix_entries[0].vector = pdev->irq; 663 adapter->msix_entries[0].vector = pdev->irq;
606 return err; 664 return err;
607 } 665 }
666
608 if (qlcnic_use_msi || qlcnic_use_msi_x) 667 if (qlcnic_use_msi || qlcnic_use_msi_x)
609 return -EOPNOTSUPP; 668 return -EOPNOTSUPP;
610 669
@@ -621,28 +680,69 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
621 return err; 680 return err;
622} 681}
623 682
624int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) 683int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
625{ 684{
685 struct qlcnic_hardware_context *ahw = adapter->ahw;
626 int num_msix, err = 0; 686 int num_msix, err = 0;
627 687
628 if (!num_intr) 688 if (!num_intr)
629 num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; 689 num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
630 690
631 if (adapter->ahw->msix_supported) 691 if (ahw->msix_supported) {
632 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), 692 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
633 num_intr)); 693 num_intr));
634 else 694 if (qlcnic_check_multi_tx(adapter)) {
695 if (txq)
696 adapter->max_drv_tx_rings = txq;
697 num_msix += adapter->max_drv_tx_rings;
698 }
699 } else {
635 num_msix = 1; 700 num_msix = 1;
701 }
636 702
637 err = qlcnic_enable_msix(adapter, num_msix); 703 err = qlcnic_enable_msix(adapter, num_msix);
638 if (err == -ENOMEM || !err) 704 if (err == -ENOMEM)
639 return err; 705 return err;
640 706
641 err = qlcnic_enable_msi_legacy(adapter); 707 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
642 if (!err) 708 qlcnic_disable_multi_tx(adapter);
709
710 err = qlcnic_enable_msi_legacy(adapter);
711 if (!err)
712 return err;
713 }
714
715 return 0;
716}
717
718int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
719{
720 struct qlcnic_hardware_context *ahw = adapter->ahw;
721 int err, i;
722
723 if (qlcnic_check_multi_tx(adapter) &&
724 !ahw->diag_test &&
725 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
726 ahw->intr_tbl = vzalloc(ahw->num_msix *
727 sizeof(struct qlcnic_intrpt_config));
728 if (!ahw->intr_tbl)
729 return -ENOMEM;
730
731 for (i = 0; i < ahw->num_msix; i++) {
732 ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
733 ahw->intr_tbl[i].id = i;
734 ahw->intr_tbl[i].src = 0;
735 }
736
737 err = qlcnic_82xx_config_intrpt(adapter, 1);
738 if (err)
739 dev_err(&adapter->pdev->dev,
740 "Failed to configure Interrupt for %d vector\n",
741 ahw->num_msix);
643 return err; 742 return err;
743 }
644 744
645 return -EIO; 745 return 0;
646} 746}
647 747
648void qlcnic_teardown_intr(struct qlcnic_adapter *adapter) 748void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
@@ -696,6 +796,23 @@ static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
696 return ret; 796 return ret;
697} 797}
698 798
799static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
800{
801 bool ret = false;
802
803 if (qlcnic_84xx_check(adapter)) {
804 ret = true;
805 } else if (qlcnic_83xx_check(adapter)) {
806 if (adapter->ahw->extra_capability[0] &
807 QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
808 ret = true;
809 else
810 ret = false;
811 }
812
813 return ret;
814}
815
699int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) 816int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
700{ 817{
701 struct qlcnic_pci_info *pci_info; 818 struct qlcnic_pci_info *pci_info;
@@ -739,18 +856,30 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
739 (pci_info[i].type != QLCNIC_TYPE_NIC)) 856 (pci_info[i].type != QLCNIC_TYPE_NIC))
740 continue; 857 continue;
741 858
859 if (qlcnic_port_eswitch_cfg_capability(adapter)) {
860 if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
861 adapter->npars[j].eswitch_status = true;
862 else
863 continue;
864 } else {
865 adapter->npars[j].eswitch_status = true;
866 }
867
742 adapter->npars[j].pci_func = pfn; 868 adapter->npars[j].pci_func = pfn;
743 adapter->npars[j].active = (u8)pci_info[i].active; 869 adapter->npars[j].active = (u8)pci_info[i].active;
744 adapter->npars[j].type = (u8)pci_info[i].type; 870 adapter->npars[j].type = (u8)pci_info[i].type;
745 adapter->npars[j].phy_port = (u8)pci_info[i].default_port; 871 adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
746 adapter->npars[j].min_bw = pci_info[i].tx_min_bw; 872 adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
747 adapter->npars[j].max_bw = pci_info[i].tx_max_bw; 873 adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
874
748 j++; 875 j++;
749 } 876 }
750 877
751 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) { 878 if (qlcnic_82xx_check(adapter)) {
752 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; 879 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
753 if (qlcnic_83xx_check(adapter)) 880 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
881 } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
882 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
754 qlcnic_enable_eswitch(adapter, i, 1); 883 qlcnic_enable_eswitch(adapter, i, 1);
755 } 884 }
756 885
@@ -829,7 +958,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
829 *bar = QLCNIC_82XX_BAR0_LENGTH; 958 *bar = QLCNIC_82XX_BAR0_LENGTH;
830 break; 959 break;
831 case PCI_DEVICE_ID_QLOGIC_QLE834X: 960 case PCI_DEVICE_ID_QLOGIC_QLE834X:
961 case PCI_DEVICE_ID_QLOGIC_QLE844X:
832 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: 962 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
963 case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
833 *bar = QLCNIC_83XX_BAR0_LENGTH; 964 *bar = QLCNIC_83XX_BAR0_LENGTH;
834 break; 965 break;
835 default: 966 default:
@@ -870,8 +1001,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
870 return 0; 1001 return 0;
871} 1002}
872 1003
873static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, 1004static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
874 int index) 1005 int index)
875{ 1006{
876 struct pci_dev *pdev = adapter->pdev; 1007 struct pci_dev *pdev = adapter->pdev;
877 unsigned short subsystem_vendor; 1008 unsigned short subsystem_vendor;
@@ -1173,6 +1304,9 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
1173 return 0; 1304 return 0;
1174 1305
1175 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1306 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1307 if (!adapter->npars[i].eswitch_status)
1308 continue;
1309
1176 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); 1310 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
1177 esw_cfg.pci_func = adapter->npars[i].pci_func; 1311 esw_cfg.pci_func = adapter->npars[i].pci_func;
1178 esw_cfg.mac_override = BIT_0; 1312 esw_cfg.mac_override = BIT_0;
@@ -1235,6 +1369,9 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
1235 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1369 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1236 npar = &adapter->npars[i]; 1370 npar = &adapter->npars[i];
1237 pci_func = npar->pci_func; 1371 pci_func = npar->pci_func;
1372 if (!adapter->npars[i].eswitch_status)
1373 continue;
1374
1238 memset(&nic_info, 0, sizeof(struct qlcnic_info)); 1375 memset(&nic_info, 0, sizeof(struct qlcnic_info));
1239 err = qlcnic_get_nic_info(adapter, &nic_info, pci_func); 1376 err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
1240 if (err) 1377 if (err)
@@ -1413,6 +1550,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1413 for (ring = 0; ring < num_sds_rings; ring++) { 1550 for (ring = 0; ring < num_sds_rings; ring++) {
1414 sds_ring = &recv_ctx->sds_rings[ring]; 1551 sds_ring = &recv_ctx->sds_rings[ring];
1415 if (qlcnic_82xx_check(adapter) && 1552 if (qlcnic_82xx_check(adapter) &&
1553 !qlcnic_check_multi_tx(adapter) &&
1416 (ring == (num_sds_rings - 1))) { 1554 (ring == (num_sds_rings - 1))) {
1417 if (!(adapter->flags & 1555 if (!(adapter->flags &
1418 QLCNIC_MSIX_ENABLED)) 1556 QLCNIC_MSIX_ENABLED))
@@ -1436,9 +1574,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1436 return err; 1574 return err;
1437 } 1575 }
1438 } 1576 }
1439 if (qlcnic_83xx_check(adapter) && 1577 if ((qlcnic_82xx_check(adapter) &&
1440 (adapter->flags & QLCNIC_MSIX_ENABLED) && 1578 qlcnic_check_multi_tx(adapter)) ||
1441 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { 1579 (qlcnic_83xx_check(adapter) &&
1580 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1581 !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
1442 handler = qlcnic_msix_tx_intr; 1582 handler = qlcnic_msix_tx_intr;
1443 for (ring = 0; ring < adapter->max_drv_tx_rings; 1583 for (ring = 0; ring < adapter->max_drv_tx_rings;
1444 ring++) { 1584 ring++) {
@@ -1473,8 +1613,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1473 free_irq(sds_ring->irq, sds_ring); 1613 free_irq(sds_ring->irq, sds_ring);
1474 } 1614 }
1475 } 1615 }
1476 if (qlcnic_83xx_check(adapter) && 1616 if ((qlcnic_83xx_check(adapter) &&
1477 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { 1617 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
1618 (qlcnic_82xx_check(adapter) &&
1619 qlcnic_check_multi_tx(adapter))) {
1478 for (ring = 0; ring < adapter->max_drv_tx_rings; 1620 for (ring = 0; ring < adapter->max_drv_tx_rings;
1479 ring++) { 1621 ring++) {
1480 tx_ring = &adapter->tx_ring[ring]; 1622 tx_ring = &adapter->tx_ring[ring];
@@ -1510,8 +1652,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1510 1652
1511 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 1653 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1512 return 0; 1654 return 0;
1655
1513 if (qlcnic_set_eswitch_port_config(adapter)) 1656 if (qlcnic_set_eswitch_port_config(adapter))
1514 return -EIO; 1657 return -EIO;
1658
1515 qlcnic_get_lro_mss_capability(adapter); 1659 qlcnic_get_lro_mss_capability(adapter);
1516 1660
1517 if (qlcnic_fw_create_ctx(adapter)) 1661 if (qlcnic_fw_create_ctx(adapter))
@@ -1558,6 +1702,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1558 1702
1559void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) 1703void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1560{ 1704{
1705 int ring;
1706
1561 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1707 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1562 return; 1708 return;
1563 1709
@@ -1567,7 +1713,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1567 if (qlcnic_sriov_vf_check(adapter)) 1713 if (qlcnic_sriov_vf_check(adapter))
1568 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1714 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1569 smp_mb(); 1715 smp_mb();
1570 spin_lock(&adapter->tx_clean_lock);
1571 netif_carrier_off(netdev); 1716 netif_carrier_off(netdev);
1572 adapter->ahw->linkup = 0; 1717 adapter->ahw->linkup = 0;
1573 netif_tx_disable(netdev); 1718 netif_tx_disable(netdev);
@@ -1585,8 +1730,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1585 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP; 1730 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
1586 1731
1587 qlcnic_reset_rx_buffers_list(adapter); 1732 qlcnic_reset_rx_buffers_list(adapter);
1588 qlcnic_release_tx_buffers(adapter); 1733
1589 spin_unlock(&adapter->tx_clean_lock); 1734 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
1735 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1590} 1736}
1591 1737
1592/* Usage: During suspend and firmware recovery module */ 1738/* Usage: During suspend and firmware recovery module */
@@ -1666,6 +1812,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1666{ 1812{
1667 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1813 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1668 struct qlcnic_host_sds_ring *sds_ring; 1814 struct qlcnic_host_sds_ring *sds_ring;
1815 int max_tx_rings = adapter->max_drv_tx_rings;
1669 int ring; 1816 int ring;
1670 1817
1671 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1818 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
@@ -1682,6 +1829,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1682 1829
1683 adapter->ahw->diag_test = 0; 1830 adapter->ahw->diag_test = 0;
1684 adapter->max_sds_rings = max_sds_rings; 1831 adapter->max_sds_rings = max_sds_rings;
1832 adapter->max_drv_tx_rings = max_tx_rings;
1685 1833
1686 if (qlcnic_attach(adapter)) 1834 if (qlcnic_attach(adapter))
1687 goto out; 1835 goto out;
@@ -1750,6 +1898,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1750 adapter->max_sds_rings = 1; 1898 adapter->max_sds_rings = 1;
1751 adapter->ahw->diag_test = test; 1899 adapter->ahw->diag_test = test;
1752 adapter->ahw->linkup = 0; 1900 adapter->ahw->linkup = 0;
1901 adapter->max_drv_tx_rings = 1;
1753 1902
1754 ret = qlcnic_attach(adapter); 1903 ret = qlcnic_attach(adapter);
1755 if (ret) { 1904 if (ret) {
@@ -1907,12 +2056,18 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1907 netdev->priv_flags |= IFF_UNICAST_FLT; 2056 netdev->priv_flags |= IFF_UNICAST_FLT;
1908 netdev->irq = adapter->msix_entries[0].vector; 2057 netdev->irq = adapter->msix_entries[0].vector;
1909 2058
2059 err = qlcnic_set_real_num_queues(adapter, netdev);
2060 if (err)
2061 return err;
2062
1910 err = register_netdev(netdev); 2063 err = register_netdev(netdev);
1911 if (err) { 2064 if (err) {
1912 dev_err(&pdev->dev, "failed to register net device\n"); 2065 dev_err(&pdev->dev, "failed to register net device\n");
1913 return err; 2066 return err;
1914 } 2067 }
1915 2068
2069 qlcnic_dcb_init_dcbnl_ops(adapter);
2070
1916 return 0; 2071 return 0;
1917} 2072}
1918 2073
@@ -1975,7 +2130,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
1975 tx_ring->cmd_buf_arr = cmd_buf_arr; 2130 tx_ring->cmd_buf_arr = cmd_buf_arr;
1976 } 2131 }
1977 2132
1978 if (qlcnic_83xx_check(adapter)) { 2133 if (qlcnic_83xx_check(adapter) ||
2134 (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
1979 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 2135 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1980 tx_ring = &adapter->tx_ring[ring]; 2136 tx_ring = &adapter->tx_ring[ring];
1981 tx_ring->adapter = adapter; 2137 tx_ring->adapter = adapter;
@@ -1986,6 +2142,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
1986 } 2142 }
1987 } 2143 }
1988 } 2144 }
2145
1989 return 0; 2146 return 0;
1990} 2147}
1991 2148
@@ -2004,6 +2161,17 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2004 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2161 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2005} 2162}
2006 2163
2164static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
2165{
2166 return __qlcnic_register_dcb(adapter);
2167}
2168
2169void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
2170{
2171 kfree(adapter->dcb);
2172 adapter->dcb = NULL;
2173}
2174
2007static int 2175static int
2008qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2176qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2009{ 2177{
@@ -2048,9 +2216,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2048 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; 2216 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
2049 break; 2217 break;
2050 case PCI_DEVICE_ID_QLOGIC_QLE834X: 2218 case PCI_DEVICE_ID_QLOGIC_QLE834X:
2219 case PCI_DEVICE_ID_QLOGIC_QLE844X:
2051 qlcnic_83xx_register_map(ahw); 2220 qlcnic_83xx_register_map(ahw);
2052 break; 2221 break;
2053 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: 2222 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
2223 case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
2054 qlcnic_sriov_vf_register_map(ahw); 2224 qlcnic_sriov_vf_register_map(ahw);
2055 break; 2225 break;
2056 default: 2226 default:
@@ -2061,7 +2231,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2061 if (err) 2231 if (err)
2062 goto err_out_free_hw_res; 2232 goto err_out_free_hw_res;
2063 2233
2064 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); 2234 netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
2235 QLCNIC_MAX_TX_RINGS);
2065 if (!netdev) { 2236 if (!netdev) {
2066 err = -ENOMEM; 2237 err = -ENOMEM;
2067 goto err_out_iounmap; 2238 goto err_out_iounmap;
@@ -2091,14 +2262,14 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2091 adapter->fdb_mac_learn = true; 2262 adapter->fdb_mac_learn = true;
2092 else if (qlcnic_mac_learn == DRV_MAC_LEARN) 2263 else if (qlcnic_mac_learn == DRV_MAC_LEARN)
2093 adapter->drv_mac_learn = true; 2264 adapter->drv_mac_learn = true;
2094 adapter->max_drv_tx_rings = 1;
2095 2265
2096 rwlock_init(&adapter->ahw->crb_lock); 2266 rwlock_init(&adapter->ahw->crb_lock);
2097 mutex_init(&adapter->ahw->mem_lock); 2267 mutex_init(&adapter->ahw->mem_lock);
2098 2268
2099 spin_lock_init(&adapter->tx_clean_lock);
2100 INIT_LIST_HEAD(&adapter->mac_list); 2269 INIT_LIST_HEAD(&adapter->mac_list);
2101 2270
2271 qlcnic_register_dcb(adapter);
2272
2102 if (qlcnic_82xx_check(adapter)) { 2273 if (qlcnic_82xx_check(adapter)) {
2103 qlcnic_check_vf(adapter, ent); 2274 qlcnic_check_vf(adapter, ent);
2104 adapter->portnum = adapter->ahw->pci_func; 2275 adapter->portnum = adapter->ahw->pci_func;
@@ -2108,12 +2279,31 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2108 goto err_out_free_hw; 2279 goto err_out_free_hw;
2109 } 2280 }
2110 2281
2282 qlcnic_get_multiq_capability(adapter);
2283
2284 if ((adapter->ahw->act_pci_func > 2) &&
2285 qlcnic_check_multi_tx(adapter)) {
2286 adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
2287 dev_info(&adapter->pdev->dev,
2288 "vNIC mode enabled, Set max TX rings = %d\n",
2289 adapter->max_drv_tx_rings);
2290 }
2291
2292 if (!qlcnic_check_multi_tx(adapter)) {
2293 clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
2294 adapter->max_drv_tx_rings = 1;
2295 }
2111 err = qlcnic_setup_idc_param(adapter); 2296 err = qlcnic_setup_idc_param(adapter);
2112 if (err) 2297 if (err)
2113 goto err_out_free_hw; 2298 goto err_out_free_hw;
2114 2299
2115 adapter->flags |= QLCNIC_NEED_FLR; 2300 adapter->flags |= QLCNIC_NEED_FLR;
2301
2302 if (adapter->dcb && qlcnic_dcb_attach(adapter))
2303 qlcnic_clear_dcb_ops(adapter);
2304
2116 } else if (qlcnic_83xx_check(adapter)) { 2305 } else if (qlcnic_83xx_check(adapter)) {
2306 adapter->max_drv_tx_rings = 1;
2117 qlcnic_83xx_check_vf(adapter, ent); 2307 qlcnic_83xx_check_vf(adapter, ent);
2118 adapter->portnum = adapter->ahw->pci_func; 2308 adapter->portnum = adapter->ahw->pci_func;
2119 err = qlcnic_83xx_init(adapter, pci_using_dac); 2309 err = qlcnic_83xx_init(adapter, pci_using_dac);
@@ -2132,6 +2322,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2132 if (qlcnic_read_mac_addr(adapter)) 2322 if (qlcnic_read_mac_addr(adapter))
2133 dev_warn(&pdev->dev, "failed to read mac addr\n"); 2323 dev_warn(&pdev->dev, "failed to read mac addr\n");
2134 2324
2325 qlcnic_read_phys_port_id(adapter);
2326
2135 if (adapter->portnum == 0) { 2327 if (adapter->portnum == 0) {
2136 qlcnic_get_board_name(adapter, board_name); 2328 qlcnic_get_board_name(adapter, board_name);
2137 2329
@@ -2145,16 +2337,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2145 dev_warn(&pdev->dev, 2337 dev_warn(&pdev->dev,
2146 "Device does not support MSI interrupts\n"); 2338 "Device does not support MSI interrupts\n");
2147 2339
2148 err = qlcnic_setup_intr(adapter, 0); 2340 if (qlcnic_82xx_check(adapter)) {
2149 if (err) { 2341 err = qlcnic_setup_intr(adapter, 0, 0);
2150 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2342 if (err) {
2151 goto err_out_disable_msi; 2343 dev_err(&pdev->dev, "Failed to setup interrupt\n");
2152 }
2153
2154 if (qlcnic_83xx_check(adapter)) {
2155 err = qlcnic_83xx_setup_mbx_intr(adapter);
2156 if (err)
2157 goto err_out_disable_msi; 2344 goto err_out_disable_msi;
2345 }
2158 } 2346 }
2159 2347
2160 err = qlcnic_get_act_pci_func(adapter); 2348 err = qlcnic_get_act_pci_func(adapter);
@@ -2238,13 +2426,18 @@ static void qlcnic_remove(struct pci_dev *pdev)
2238 qlcnic_cancel_idc_work(adapter); 2426 qlcnic_cancel_idc_work(adapter);
2239 ahw = adapter->ahw; 2427 ahw = adapter->ahw;
2240 2428
2429 qlcnic_dcb_free(adapter);
2430
2241 unregister_netdev(netdev); 2431 unregister_netdev(netdev);
2242 qlcnic_sriov_cleanup(adapter); 2432 qlcnic_sriov_cleanup(adapter);
2243 2433
2244 if (qlcnic_83xx_check(adapter)) { 2434 if (qlcnic_83xx_check(adapter)) {
2245 qlcnic_83xx_free_mbx_intr(adapter);
2246 qlcnic_83xx_register_nic_idc_func(adapter, 0); 2435 qlcnic_83xx_register_nic_idc_func(adapter, 0);
2247 cancel_delayed_work_sync(&adapter->idc_aen_work); 2436 cancel_delayed_work_sync(&adapter->idc_aen_work);
2437 qlcnic_83xx_free_mbx_intr(adapter);
2438 qlcnic_83xx_detach_mailbox_work(adapter);
2439 qlcnic_83xx_free_mailbox(ahw->mailbox);
2440 kfree(ahw->fw_info);
2248 } 2441 }
2249 2442
2250 qlcnic_detach(adapter); 2443 qlcnic_detach(adapter);
@@ -2278,6 +2471,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
2278 destroy_workqueue(adapter->qlcnic_wq); 2471 destroy_workqueue(adapter->qlcnic_wq);
2279 adapter->qlcnic_wq = NULL; 2472 adapter->qlcnic_wq = NULL;
2280 } 2473 }
2474
2281 qlcnic_free_adapter_resources(adapter); 2475 qlcnic_free_adapter_resources(adapter);
2282 kfree(ahw); 2476 kfree(ahw);
2283 free_netdev(netdev); 2477 free_netdev(netdev);
@@ -2336,7 +2530,7 @@ static int qlcnic_open(struct net_device *netdev)
2336 if (err) 2530 if (err)
2337 goto err_out; 2531 goto err_out;
2338 2532
2339 netif_start_queue(netdev); 2533 netif_tx_start_all_queues(netdev);
2340 2534
2341 return 0; 2535 return 0;
2342 2536
@@ -2468,6 +2662,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2468static void qlcnic_tx_timeout(struct net_device *netdev) 2662static void qlcnic_tx_timeout(struct net_device *netdev)
2469{ 2663{
2470 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2664 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2665 struct qlcnic_host_tx_ring *tx_ring;
2666 int ring;
2471 2667
2472 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 2668 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2473 return; 2669 return;
@@ -2481,6 +2677,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
2481 QLCNIC_FORCE_FW_DUMP_KEY); 2677 QLCNIC_FORCE_FW_DUMP_KEY);
2482 } else { 2678 } else {
2483 netdev_info(netdev, "Tx timeout, reset adapter context.\n"); 2679 netdev_info(netdev, "Tx timeout, reset adapter context.\n");
2680 if (qlcnic_82xx_check(adapter)) {
2681 for (ring = 0; ring < adapter->max_drv_tx_rings;
2682 ring++) {
2683 tx_ring = &adapter->tx_ring[ring];
2684 dev_info(&netdev->dev, "ring=%d\n", ring);
2685 dev_info(&netdev->dev, "crb_intr_mask=%d\n",
2686 readl(tx_ring->crb_intr_mask));
2687 dev_info(&netdev->dev, "producer=%d\n",
2688 readl(tx_ring->crb_cmd_producer));
2689 dev_info(&netdev->dev, "sw_consumer = %d\n",
2690 tx_ring->sw_consumer);
2691 dev_info(&netdev->dev, "hw_consumer = %d\n",
2692 le32_to_cpu(*(tx_ring->hw_consumer)));
2693 dev_info(&netdev->dev, "xmit-on=%llu\n",
2694 tx_ring->xmit_on);
2695 dev_info(&netdev->dev, "xmit-off=%llu\n",
2696 tx_ring->xmit_off);
2697 }
2698 }
2484 adapter->ahw->reset_context = 1; 2699 adapter->ahw->reset_context = 1;
2485 } 2700 }
2486} 2701}
@@ -2869,7 +3084,7 @@ skip_ack_check:
2869 qlcnic_api_unlock(adapter); 3084 qlcnic_api_unlock(adapter);
2870 3085
2871 rtnl_lock(); 3086 rtnl_lock();
2872 if (adapter->ahw->fw_dump.enable && 3087 if (qlcnic_check_fw_dump_state(adapter) &&
2873 (adapter->flags & QLCNIC_FW_RESET_OWNER)) { 3088 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2874 QLCDB(adapter, DRV, "Take FW dump\n"); 3089 QLCDB(adapter, DRV, "Take FW dump\n");
2875 qlcnic_dump_fw(adapter); 3090 qlcnic_dump_fw(adapter);
@@ -3074,6 +3289,8 @@ qlcnic_attach_work(struct work_struct *work)
3074 return; 3289 return;
3075 } 3290 }
3076attach: 3291attach:
3292 qlcnic_dcb_get_info(adapter);
3293
3077 if (netif_running(netdev)) { 3294 if (netif_running(netdev)) {
3078 if (qlcnic_up(adapter, netdev)) 3295 if (qlcnic_up(adapter, netdev))
3079 goto done; 3296 goto done;
@@ -3245,7 +3462,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3245 qlcnic_clr_drv_state(adapter); 3462 qlcnic_clr_drv_state(adapter);
3246 kfree(adapter->msix_entries); 3463 kfree(adapter->msix_entries);
3247 adapter->msix_entries = NULL; 3464 adapter->msix_entries = NULL;
3248 err = qlcnic_setup_intr(adapter, 0); 3465 err = qlcnic_setup_intr(adapter, 0, 0);
3249 3466
3250 if (err) { 3467 if (err) {
3251 kfree(adapter->msix_entries); 3468 kfree(adapter->msix_entries);
@@ -3253,19 +3470,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3253 return err; 3470 return err;
3254 } 3471 }
3255 3472
3256 if (qlcnic_83xx_check(adapter)) {
3257 /* register for NIC IDC AEN Events */
3258 qlcnic_83xx_register_nic_idc_func(adapter, 1);
3259 err = qlcnic_83xx_setup_mbx_intr(adapter);
3260 if (err) {
3261 dev_err(&adapter->pdev->dev,
3262 "failed to setup mbx interrupt\n");
3263 qlcnic_clr_all_drv_state(adapter, 1);
3264 clear_bit(__QLCNIC_AER, &adapter->state);
3265 goto done;
3266 }
3267 }
3268
3269 if (netif_running(netdev)) { 3473 if (netif_running(netdev)) {
3270 err = qlcnic_attach(adapter); 3474 err = qlcnic_attach(adapter);
3271 if (err) { 3475 if (err) {
@@ -3286,8 +3490,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3286 return err; 3490 return err;
3287} 3491}
3288 3492
3289static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, 3493pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
3290 pci_channel_state_t state) 3494 pci_channel_state_t state)
3291{ 3495{
3292 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 3496 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3293 struct net_device *netdev = adapter->netdev; 3497 struct net_device *netdev = adapter->netdev;
@@ -3306,12 +3510,6 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3306 if (netif_running(netdev)) 3510 if (netif_running(netdev))
3307 qlcnic_down(adapter, netdev); 3511 qlcnic_down(adapter, netdev);
3308 3512
3309 if (qlcnic_83xx_check(adapter)) {
3310 qlcnic_83xx_free_mbx_intr(adapter);
3311 qlcnic_83xx_register_nic_idc_func(adapter, 0);
3312 cancel_delayed_work_sync(&adapter->idc_aen_work);
3313 }
3314
3315 qlcnic_detach(adapter); 3513 qlcnic_detach(adapter);
3316 qlcnic_teardown_intr(adapter); 3514 qlcnic_teardown_intr(adapter);
3317 3515
@@ -3323,13 +3521,13 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3323 return PCI_ERS_RESULT_NEED_RESET; 3521 return PCI_ERS_RESULT_NEED_RESET;
3324} 3522}
3325 3523
3326static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) 3524pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
3327{ 3525{
3328 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : 3526 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3329 PCI_ERS_RESULT_RECOVERED; 3527 PCI_ERS_RESULT_RECOVERED;
3330} 3528}
3331 3529
3332static void qlcnic_io_resume(struct pci_dev *pdev) 3530void qlcnic_82xx_io_resume(struct pci_dev *pdev)
3333{ 3531{
3334 u32 state; 3532 u32 state;
3335 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 3533 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3339,9 +3537,48 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
3339 if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, 3537 if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
3340 &adapter->state)) 3538 &adapter->state))
3341 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, 3539 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3342 FW_POLL_DELAY); 3540 FW_POLL_DELAY);
3541}
3542
3543static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3544 pci_channel_state_t state)
3545{
3546 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3547 struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
3548
3549 if (hw_ops->io_error_detected) {
3550 return hw_ops->io_error_detected(pdev, state);
3551 } else {
3552 dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
3553 return PCI_ERS_RESULT_DISCONNECT;
3554 }
3555}
3556
3557static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3558{
3559 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3560 struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
3561
3562 if (hw_ops->io_slot_reset) {
3563 return hw_ops->io_slot_reset(pdev);
3564 } else {
3565 dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
3566 return PCI_ERS_RESULT_DISCONNECT;
3567 }
3568}
3569
3570static void qlcnic_io_resume(struct pci_dev *pdev)
3571{
3572 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3573 struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
3574
3575 if (hw_ops->io_resume)
3576 hw_ops->io_resume(pdev);
3577 else
3578 dev_err(&pdev->dev, "AER resume handler not registered.\n");
3343} 3579}
3344 3580
3581
3345static int 3582static int
3346qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) 3583qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3347{ 3584{
@@ -3370,16 +3607,65 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3370 return err; 3607 return err;
3371} 3608}
3372 3609
3610int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
3611{
3612 struct net_device *netdev = adapter->netdev;
3613 u8 max_hw = QLCNIC_MAX_TX_RINGS;
3614 u32 max_allowed;
3615
3616 if (!qlcnic_82xx_check(adapter)) {
3617 netdev_err(netdev, "No Multi TX-Q support\n");
3618 return -EINVAL;
3619 }
3620
3621 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3622 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
3623 return -EINVAL;
3624 }
3625
3626 if (!qlcnic_check_multi_tx(adapter)) {
3627 netdev_err(netdev, "No Multi TX-Q support\n");
3628 return -EINVAL;
3629 }
3630
3631 if (txq > QLCNIC_MAX_TX_RINGS) {
3632 netdev_err(netdev, "Invalid ring count\n");
3633 return -EINVAL;
3634 }
3635
3636 max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
3637 num_online_cpus()));
3638 if ((txq > max_allowed) || !is_power_of_2(txq)) {
3639 if (!is_power_of_2(txq))
3640 netdev_err(netdev,
3641 "TX queue should be a power of 2\n");
3642 if (txq > num_online_cpus())
3643 netdev_err(netdev,
3644 "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
3645 num_online_cpus());
3646 netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
3647 return -EINVAL;
3648 }
3649
3650 return 0;
3651}
3652
3373int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, 3653int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3374 __u32 val) 3654 __u32 val)
3375{ 3655{
3376 struct net_device *netdev = adapter->netdev; 3656 struct net_device *netdev = adapter->netdev;
3377 u8 max_hw = adapter->ahw->max_rx_ques; 3657 u8 max_hw = adapter->ahw->max_rx_ques;
3378 u32 max_allowed; 3658 u32 max_allowed;
3379 3659
3380 if (val > QLC_MAX_SDS_RINGS) { 3660 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
3661 !qlcnic_use_msi) {
3662 netdev_err(netdev, "No RSS support in INT-x mode\n");
3663 return -EINVAL;
3664 }
3665
3666 if (val > QLCNIC_MAX_SDS_RINGS) {
3381 netdev_err(netdev, "RSS value should not be higher than %u\n", 3667 netdev_err(netdev, "RSS value should not be higher than %u\n",
3382 QLC_MAX_SDS_RINGS); 3668 QLCNIC_MAX_SDS_RINGS);
3383 return -EINVAL; 3669 return -EINVAL;
3384 } 3670 }
3385 3671
@@ -3409,27 +3695,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3409 return 0; 3695 return 0;
3410} 3696}
3411 3697
3412int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len) 3698int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
3413{ 3699{
3414 int err; 3700 int err;
3415 struct net_device *netdev = adapter->netdev; 3701 struct net_device *netdev = adapter->netdev;
3702 int num_msix;
3416 3703
3417 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 3704 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3418 return -EBUSY; 3705 return -EBUSY;
3419 3706
3707 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
3708 !qlcnic_use_msi) {
3709 netdev_err(netdev, "No RSS support in INT-x mode\n");
3710 return -EINVAL;
3711 }
3712
3420 netif_device_detach(netdev); 3713 netif_device_detach(netdev);
3421 if (netif_running(netdev)) 3714 if (netif_running(netdev))
3422 __qlcnic_down(adapter, netdev); 3715 __qlcnic_down(adapter, netdev);
3423 3716
3424 qlcnic_detach(adapter); 3717 qlcnic_detach(adapter);
3425 3718
3719 if (qlcnic_82xx_check(adapter)) {
3720 if (txq != 0)
3721 adapter->max_drv_tx_rings = txq;
3722
3723 if (qlcnic_check_multi_tx(adapter) &&
3724 (txq > adapter->max_drv_tx_rings))
3725 num_msix = adapter->max_drv_tx_rings;
3726 else
3727 num_msix = data;
3728 }
3729
3426 if (qlcnic_83xx_check(adapter)) { 3730 if (qlcnic_83xx_check(adapter)) {
3427 qlcnic_83xx_free_mbx_intr(adapter); 3731 qlcnic_83xx_free_mbx_intr(adapter);
3428 qlcnic_83xx_enable_mbx_poll(adapter); 3732 qlcnic_83xx_enable_mbx_poll(adapter);
3429 } 3733 }
3430 3734
3735 netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
3736
3431 qlcnic_teardown_intr(adapter); 3737 qlcnic_teardown_intr(adapter);
3432 err = qlcnic_setup_intr(adapter, data); 3738
3739 err = qlcnic_setup_intr(adapter, data, txq);
3433 if (err) { 3740 if (err) {
3434 kfree(adapter->msix_entries); 3741 kfree(adapter->msix_entries);
3435 netdev_err(netdev, "failed to setup interrupt\n"); 3742 netdev_err(netdev, "failed to setup interrupt\n");
@@ -3457,8 +3764,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
3457 goto done; 3764 goto done;
3458 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 3765 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3459 } 3766 }
3460 err = len; 3767done:
3461 done:
3462 netif_device_attach(netdev); 3768 netif_device_attach(netdev);
3463 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3769 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3464 return err; 3770 return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 79e54efe07b9..15513608d480 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1082,14 +1082,17 @@ flash_temp:
1082 } 1082 }
1083 1083
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 1085 tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
1086 dev_info(&adapter->pdev->dev,
1087 "Default minidump capture mask 0x%x\n",
1088 tmpl_hdr->cap_mask);
1086 1089
1087 if ((tmpl_hdr->version & 0xfffff) >= 0x20001) 1090 if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
1088 ahw->fw_dump.use_pex_dma = true; 1091 ahw->fw_dump.use_pex_dma = true;
1089 else 1092 else
1090 ahw->fw_dump.use_pex_dma = false; 1093 ahw->fw_dump.use_pex_dma = false;
1091 1094
1092 ahw->fw_dump.enable = 1; 1095 qlcnic_enable_fw_dump_state(adapter);
1093 1096
1094 return 0; 1097 return 0;
1095} 1098}
@@ -1112,7 +1115,11 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1112 1115
1113 ahw = adapter->ahw; 1116 ahw = adapter->ahw;
1114 1117
1115 if (!fw_dump->enable) { 1118 /* Return if we don't have firmware dump template header */
1119 if (!tmpl_hdr)
1120 return -EIO;
1121
1122 if (!qlcnic_check_fw_dump_state(adapter)) {
1116 dev_info(&adapter->pdev->dev, "Dump not enabled\n"); 1123 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1117 return -EIO; 1124 return -EIO;
1118 } 1125 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 5d40045b3cea..652cc13c5023 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); 34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); 35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, 36static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
37 struct qlcnic_cmd_args *); 37 struct qlcnic_cmd_args *);
38static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 38static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
39 39
@@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
45 .get_mac_address = qlcnic_83xx_get_mac_address, 45 .get_mac_address = qlcnic_83xx_get_mac_address,
46 .setup_intr = qlcnic_83xx_setup_intr, 46 .setup_intr = qlcnic_83xx_setup_intr,
47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
48 .mbx_cmd = qlcnic_sriov_vf_mbx_op, 48 .mbx_cmd = qlcnic_sriov_issue_cmd,
49 .get_func_no = qlcnic_83xx_get_func_no, 49 .get_func_no = qlcnic_83xx_get_func_no,
50 .api_lock = qlcnic_83xx_cam_lock, 50 .api_lock = qlcnic_83xx_cam_lock,
51 .api_unlock = qlcnic_83xx_cam_unlock, 51 .api_unlock = qlcnic_83xx_cam_unlock,
@@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
286static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 286static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
287 u32 *pay, u8 pci_func, u8 size) 287 u32 *pay, u8 pci_func, u8 size)
288{ 288{
289 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
290 struct qlcnic_hardware_context *ahw = adapter->ahw; 289 struct qlcnic_hardware_context *ahw = adapter->ahw;
291 unsigned long flags; 290 struct qlcnic_mailbox *mbx = ahw->mailbox;
292 u16 opcode; 291 struct qlcnic_cmd_args cmd;
293 u8 mbx_err_code; 292 unsigned long timeout;
294 int i, j; 293 int err;
295
296 opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
297
298 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
299 dev_info(&adapter->pdev->dev,
300 "Mailbox cmd attempted, 0x%x\n", opcode);
301 dev_info(&adapter->pdev->dev, "Mailbox detached\n");
302 return 0;
303 }
304
305 spin_lock_irqsave(&ahw->mbx_lock, flags);
306
307 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
308 if (mbx_val) {
309 QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
310 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
311 return QLCNIC_RCODE_TIMEOUT;
312 }
313 /* Fill in mailbox registers */
314 val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
315 mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
316
317 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
318 mbx_cmd = 0x1 | (1 << 4);
319 294
320 if (qlcnic_sriov_pf_check(adapter)) 295 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
321 mbx_cmd |= (pci_func << 5); 296 cmd.hdr = hdr;
297 cmd.pay = pay;
298 cmd.pay_size = size;
299 cmd.func_num = pci_func;
300 cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
301 cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
322 302
323 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); 303 err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
324 for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 304 if (err) {
325 i++, j++) { 305 dev_err(&adapter->pdev->dev,
326 writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i)); 306 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
307 __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
308 ahw->op_mode);
309 return err;
327 } 310 }
328 for (j = 0; j < size; j++, i++)
329 writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
330 311
331 /* Signal FW about the impending command */ 312 if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
332 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); 313 dev_err(&adapter->pdev->dev,
333 314 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
334 /* Waiting for the mailbox cmd to complete and while waiting here 315 __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
335 * some AEN might arrive. If more than 5 seconds expire we can 316 ahw->op_mode);
336 * assume something is wrong. 317 flush_workqueue(mbx->work_q);
337 */
338poll:
339 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
340 if (rsp != QLCNIC_RCODE_TIMEOUT) {
341 /* Get the FW response data */
342 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
343 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
344 __qlcnic_83xx_process_aen(adapter);
345 goto poll;
346 }
347 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
348 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
349 opcode = QLCNIC_MBX_RSP(fw_data);
350
351 switch (mbx_err_code) {
352 case QLCNIC_MBX_RSP_OK:
353 case QLCNIC_MBX_PORT_RSP_OK:
354 rsp = QLCNIC_RCODE_SUCCESS;
355 break;
356 default:
357 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
358 rsp = qlcnic_83xx_mac_rcode(adapter);
359 if (!rsp)
360 goto out;
361 }
362 dev_err(&adapter->pdev->dev,
363 "MBX command 0x%x failed with err:0x%x\n",
364 opcode, mbx_err_code);
365 rsp = mbx_err_code;
366 break;
367 }
368 goto out;
369 } 318 }
370 319
371 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", 320 return cmd.rsp_opcode;
372 QLCNIC_MBX_RSP(mbx_cmd));
373 rsp = QLCNIC_RCODE_TIMEOUT;
374out:
375 /* clear fw mbx control register */
376 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
377 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
378 return rsp;
379} 321}
380 322
381static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) 323static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@ -458,7 +400,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
458static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, 400static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
459 struct qlcnic_cmd_args *cmd) 401 struct qlcnic_cmd_args *cmd)
460{ 402{
461 adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; 403 adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
462 adapter->flags &= ~QLCNIC_TAGGING_ENABLED; 404 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
463 return 0; 405 return 0;
464} 406}
@@ -490,11 +432,12 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
490 return 0; 432 return 0;
491} 433}
492 434
493static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) 435static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
436 struct qlcnic_info *info)
494{ 437{
495 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 438 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
496 struct qlcnic_cmd_args cmd; 439 struct qlcnic_cmd_args cmd;
497 int ret; 440 int ret = 0;
498 441
499 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 442 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
500 if (ret) 443 if (ret)
@@ -522,8 +465,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
522 465
523static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) 466static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
524{ 467{
525 struct qlcnic_info nic_info;
526 struct qlcnic_hardware_context *ahw = adapter->ahw; 468 struct qlcnic_hardware_context *ahw = adapter->ahw;
469 struct qlcnic_info nic_info;
527 int err; 470 int err;
528 471
529 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); 472 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@ -534,7 +477,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
534 if (err) 477 if (err)
535 return -EIO; 478 return -EIO;
536 479
537 err = qlcnic_sriov_get_vf_acl(adapter); 480 err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
538 if (err) 481 if (err)
539 return err; 482 return err;
540 483
@@ -564,7 +507,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
564 dev_warn(&adapter->pdev->dev, 507 dev_warn(&adapter->pdev->dev,
565 "Device does not support MSI interrupts\n"); 508 "Device does not support MSI interrupts\n");
566 509
567 err = qlcnic_setup_intr(adapter, 1); 510 err = qlcnic_setup_intr(adapter, 1, 0);
568 if (err) { 511 if (err) {
569 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); 512 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
570 goto err_out_disable_msi; 513 goto err_out_disable_msi;
@@ -590,6 +533,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
590 if (err) 533 if (err)
591 goto err_out_send_channel_term; 534 goto err_out_send_channel_term;
592 535
536 if (adapter->dcb && qlcnic_dcb_attach(adapter))
537 qlcnic_clear_dcb_ops(adapter);
538
593 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 539 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
594 if (err) 540 if (err)
595 goto err_out_send_channel_term; 541 goto err_out_send_channel_term;
@@ -597,6 +543,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
597 pci_set_drvdata(adapter->pdev, adapter); 543 pci_set_drvdata(adapter->pdev, adapter);
598 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 544 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
599 adapter->netdev->name); 545 adapter->netdev->name);
546
600 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 547 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
601 adapter->ahw->idc.delay); 548 adapter->ahw->idc.delay);
602 return 0; 549 return 0;
@@ -637,8 +584,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
637 struct qlcnic_hardware_context *ahw = adapter->ahw; 584 struct qlcnic_hardware_context *ahw = adapter->ahw;
638 int err; 585 int err;
639 586
640 spin_lock_init(&ahw->mbx_lock);
641 set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
642 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); 587 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
643 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 588 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
644 ahw->reset_context = 0; 589 ahw->reset_context = 0;
@@ -1085,6 +1030,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1085 if (test_bit(QLC_BC_VF_FLR, &vf->state)) 1030 if (test_bit(QLC_BC_VF_FLR, &vf->state))
1086 return; 1031 return;
1087 1032
1033 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1088 trans = list_first_entry(&vf->rcv_act.wait_list, 1034 trans = list_first_entry(&vf->rcv_act.wait_list,
1089 struct qlcnic_bc_trans, list); 1035 struct qlcnic_bc_trans, list);
1090 adapter = vf->adapter; 1036 adapter = vf->adapter;
@@ -1234,6 +1180,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1234 return; 1180 return;
1235 } 1181 }
1236 1182
1183 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1237 cmd_op = hdr->cmd_op; 1184 cmd_op = hdr->cmd_op;
1238 if (qlcnic_sriov_alloc_bc_trans(&trans)) 1185 if (qlcnic_sriov_alloc_bc_trans(&trans))
1239 return; 1186 return;
@@ -1359,7 +1306,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1359 if (enable) 1306 if (enable)
1360 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1307 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1361 1308
1362 err = qlcnic_83xx_mbx_op(adapter, &cmd); 1309 err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1363 1310
1364 if (err != QLCNIC_RCODE_SUCCESS) { 1311 if (err != QLCNIC_RCODE_SUCCESS) {
1365 dev_err(&adapter->pdev->dev, 1312 dev_err(&adapter->pdev->dev,
@@ -1391,10 +1338,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1391 return -EIO; 1338 return -EIO;
1392} 1339}
1393 1340
1394static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, 1341static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1395 struct qlcnic_cmd_args *cmd) 1342 struct qlcnic_cmd_args *cmd)
1396{ 1343{
1397 struct qlcnic_hardware_context *ahw = adapter->ahw; 1344 struct qlcnic_hardware_context *ahw = adapter->ahw;
1345 struct qlcnic_mailbox *mbx = ahw->mailbox;
1398 struct device *dev = &adapter->pdev->dev; 1346 struct device *dev = &adapter->pdev->dev;
1399 struct qlcnic_bc_trans *trans; 1347 struct qlcnic_bc_trans *trans;
1400 int err; 1348 int err;
@@ -1411,7 +1359,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
1411 goto cleanup_transaction; 1359 goto cleanup_transaction;
1412 1360
1413retry: 1361retry:
1414 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 1362 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1415 rsp = -EIO; 1363 rsp = -EIO;
1416 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", 1364 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1417 QLCNIC_MBX_RSP(cmd->req.arg[0]), func); 1365 QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@ -1454,7 +1402,7 @@ err_out:
1454 if (rsp == QLCNIC_RCODE_TIMEOUT) { 1402 if (rsp == QLCNIC_RCODE_TIMEOUT) {
1455 ahw->reset_context = 1; 1403 ahw->reset_context = 1;
1456 adapter->need_fw_reset = 1; 1404 adapter->need_fw_reset = 1;
1457 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 1405 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1458 } 1406 }
1459 1407
1460cleanup_transaction: 1408cleanup_transaction:
@@ -1613,8 +1561,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1613{ 1561{
1614 int err; 1562 int err;
1615 1563
1616 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 1564 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1617 qlcnic_83xx_enable_mbx_intrpt(adapter); 1565 qlcnic_83xx_enable_mbx_interrupt(adapter);
1618 1566
1619 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1567 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1620 if (err) 1568 if (err)
@@ -1628,6 +1576,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1628 if (err) 1576 if (err)
1629 goto err_out_term_channel; 1577 goto err_out_term_channel;
1630 1578
1579 qlcnic_dcb_get_info(adapter);
1580
1631 return 0; 1581 return 0;
1632 1582
1633err_out_term_channel: 1583err_out_term_channel:
@@ -1657,8 +1607,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1657 struct net_device *netdev = adapter->netdev; 1607 struct net_device *netdev = adapter->netdev;
1658 u8 i, max_ints = ahw->num_msix - 1; 1608 u8 i, max_ints = ahw->num_msix - 1;
1659 1609
1660 qlcnic_83xx_disable_mbx_intr(adapter);
1661 netif_device_detach(netdev); 1610 netif_device_detach(netdev);
1611 qlcnic_83xx_detach_mailbox_work(adapter);
1612 qlcnic_83xx_disable_mbx_intr(adapter);
1613
1662 if (netif_running(netdev)) 1614 if (netif_running(netdev))
1663 qlcnic_down(adapter, netdev); 1615 qlcnic_down(adapter, netdev);
1664 1616
@@ -1702,6 +1654,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1702static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) 1654static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1703{ 1655{
1704 struct qlcnic_hardware_context *ahw = adapter->ahw; 1656 struct qlcnic_hardware_context *ahw = adapter->ahw;
1657 struct qlcnic_mailbox *mbx = ahw->mailbox;
1705 struct device *dev = &adapter->pdev->dev; 1658 struct device *dev = &adapter->pdev->dev;
1706 struct qlc_83xx_idc *idc = &ahw->idc; 1659 struct qlc_83xx_idc *idc = &ahw->idc;
1707 u8 func = ahw->pci_func; 1660 u8 func = ahw->pci_func;
@@ -1712,7 +1665,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1712 /* Skip the context reset and check if FW is hung */ 1665 /* Skip the context reset and check if FW is hung */
1713 if (adapter->reset_ctx_cnt < 3) { 1666 if (adapter->reset_ctx_cnt < 3) {
1714 adapter->need_fw_reset = 1; 1667 adapter->need_fw_reset = 1;
1715 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1668 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1716 dev_info(dev, 1669 dev_info(dev,
1717 "Resetting context, wait here to check if FW is in failed state\n"); 1670 "Resetting context, wait here to check if FW is in failed state\n");
1718 return 0; 1671 return 0;
@@ -1737,7 +1690,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1737 __func__, adapter->reset_ctx_cnt, func); 1690 __func__, adapter->reset_ctx_cnt, func);
1738 set_bit(__QLCNIC_RESETTING, &adapter->state); 1691 set_bit(__QLCNIC_RESETTING, &adapter->state);
1739 adapter->need_fw_reset = 1; 1692 adapter->need_fw_reset = 1;
1740 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1693 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1741 qlcnic_sriov_vf_detach(adapter); 1694 qlcnic_sriov_vf_detach(adapter);
1742 adapter->need_fw_reset = 0; 1695 adapter->need_fw_reset = 0;
1743 1696
@@ -1787,6 +1740,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1787static int 1740static int
1788qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) 1741qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1789{ 1742{
1743 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1790 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1744 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1791 1745
1792 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); 1746 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
@@ -1794,7 +1748,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1794 set_bit(__QLCNIC_RESETTING, &adapter->state); 1748 set_bit(__QLCNIC_RESETTING, &adapter->state);
1795 adapter->tx_timeo_cnt = 0; 1749 adapter->tx_timeo_cnt = 0;
1796 adapter->reset_ctx_cnt = 0; 1750 adapter->reset_ctx_cnt = 0;
1797 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1751 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1798 qlcnic_sriov_vf_detach(adapter); 1752 qlcnic_sriov_vf_detach(adapter);
1799 } 1753 }
1800 1754
@@ -1803,6 +1757,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1803 1757
1804static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) 1758static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1805{ 1759{
1760 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1806 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1761 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1807 u8 func = adapter->ahw->pci_func; 1762 u8 func = adapter->ahw->pci_func;
1808 1763
@@ -1812,7 +1767,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1812 set_bit(__QLCNIC_RESETTING, &adapter->state); 1767 set_bit(__QLCNIC_RESETTING, &adapter->state);
1813 adapter->tx_timeo_cnt = 0; 1768 adapter->tx_timeo_cnt = 0;
1814 adapter->reset_ctx_cnt = 0; 1769 adapter->reset_ctx_cnt = 0;
1815 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1770 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1816 qlcnic_sriov_vf_detach(adapter); 1771 qlcnic_sriov_vf_detach(adapter);
1817 } 1772 }
1818 return 0; 1773 return 0;
@@ -1990,7 +1945,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
1990 int err; 1945 int err;
1991 1946
1992 set_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1947 set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1993 qlcnic_83xx_enable_mbx_intrpt(adapter); 1948 qlcnic_83xx_enable_mbx_interrupt(adapter);
1994 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1949 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1995 if (err) 1950 if (err)
1996 return err; 1951 return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index eb49cd65378c..330d9a8774ad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1183,10 +1183,19 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
1183 struct qlcnic_vf_info *vf = trans->vf; 1183 struct qlcnic_vf_info *vf = trans->vf;
1184 struct qlcnic_vport *vp = vf->vp; 1184 struct qlcnic_vport *vp = vf->vp;
1185 u8 cmd_op, mode = vp->vlan_mode; 1185 u8 cmd_op, mode = vp->vlan_mode;
1186 struct qlcnic_adapter *adapter;
1187
1188 adapter = vf->adapter;
1186 1189
1187 cmd_op = trans->req_hdr->cmd_op; 1190 cmd_op = trans->req_hdr->cmd_op;
1188 cmd->rsp.arg[0] |= 1 << 25; 1191 cmd->rsp.arg[0] |= 1 << 25;
1189 1192
1193 /* For 84xx adapter in case of PVID , PFD should send vlan mode as
1194 * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
1195 */
1196 if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
1197 return 0;
1198
1190 switch (mode) { 1199 switch (mode) {
1191 case QLC_GUEST_VLAN_MODE: 1200 case QLC_GUEST_VLAN_MODE:
1192 cmd->rsp.arg[1] = mode | 1 << 8; 1201 cmd->rsp.arg[1] = mode | 1 << 8;
@@ -1284,6 +1293,10 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
1284 QLCNIC_CMD_GET_STATISTICS, 1293 QLCNIC_CMD_GET_STATISTICS,
1285 QLCNIC_CMD_GET_PORT_CONFIG, 1294 QLCNIC_CMD_GET_PORT_CONFIG,
1286 QLCNIC_CMD_GET_LINK_STATUS, 1295 QLCNIC_CMD_GET_LINK_STATUS,
1296 QLCNIC_CMD_DCB_QUERY_CAP,
1297 QLCNIC_CMD_DCB_QUERY_PARAM,
1298 QLCNIC_CMD_INIT_NIC_FUNC,
1299 QLCNIC_CMD_STOP_NIC_FUNC,
1287}; 1300};
1288 1301
1289static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { 1302static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
@@ -1639,14 +1652,14 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1639 if (!is_valid_ether_addr(mac) || vf >= num_vfs) 1652 if (!is_valid_ether_addr(mac) || vf >= num_vfs)
1640 return -EINVAL; 1653 return -EINVAL;
1641 1654
1642 if (!compare_ether_addr(adapter->mac_addr, mac)) { 1655 if (ether_addr_equal(adapter->mac_addr, mac)) {
1643 netdev_err(netdev, "MAC address is already in use by the PF\n"); 1656 netdev_err(netdev, "MAC address is already in use by the PF\n");
1644 return -EINVAL; 1657 return -EINVAL;
1645 } 1658 }
1646 1659
1647 for (i = 0; i < num_vfs; i++) { 1660 for (i = 0; i < num_vfs; i++) {
1648 vf_info = &sriov->vf_info[i]; 1661 vf_info = &sriov->vf_info[i];
1649 if (!compare_ether_addr(vf_info->vp->mac, mac)) { 1662 if (ether_addr_equal(vf_info->vp->mac, mac)) {
1650 netdev_err(netdev, 1663 netdev_err(netdev,
1651 "MAC address is already in use by VF %d\n", 1664 "MAC address is already in use by VF %d\n",
1652 i); 1665 i);
@@ -1768,8 +1781,8 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1768 return 0; 1781 return 0;
1769} 1782}
1770 1783
1771static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, 1784static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
1772 struct qlcnic_vport *vp, int vf) 1785 struct qlcnic_vport *vp, int vf)
1773{ 1786{
1774 __u32 vlan = 0; 1787 __u32 vlan = 0;
1775 1788
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 660c3f5b2237..c6165d05cc13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -465,8 +465,14 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
465 memset(&pm_cfg, 0, 465 memset(&pm_cfg, 0,
466 sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC); 466 sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
467 467
468 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 468 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
469 pci_func = adapter->npars[i].pci_func; 469 pci_func = adapter->npars[i].pci_func;
470 if (!adapter->npars[i].active)
471 continue;
472
473 if (!adapter->npars[i].eswitch_status)
474 continue;
475
470 pm_cfg[pci_func].action = adapter->npars[i].enable_pm; 476 pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
471 pm_cfg[pci_func].dest_npar = 0; 477 pm_cfg[pci_func].dest_npar = 0;
472 pm_cfg[pci_func].pci_func = i; 478 pm_cfg[pci_func].pci_func = i;
@@ -632,8 +638,14 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
632 memset(&esw_cfg, 0, 638 memset(&esw_cfg, 0,
633 sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC); 639 sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
634 640
635 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 641 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
636 pci_func = adapter->npars[i].pci_func; 642 pci_func = adapter->npars[i].pci_func;
643 if (!adapter->npars[i].active)
644 continue;
645
646 if (!adapter->npars[i].eswitch_status)
647 continue;
648
637 esw_cfg[pci_func].pci_func = pci_func; 649 esw_cfg[pci_func].pci_func = pci_func;
638 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) 650 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
639 return QL_STATUS_INVALID_PARAM; 651 return QL_STATUS_INVALID_PARAM;
@@ -732,6 +744,9 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
732 if (ret) 744 if (ret)
733 return ret; 745 return ret;
734 746
747 if (!adapter->npars[i].eswitch_status)
748 continue;
749
735 np_cfg[i].pci_func = i; 750 np_cfg[i].pci_func = i;
736 np_cfg[i].op_mode = (u8)nic_info.op_mode; 751 np_cfg[i].op_mode = (u8)nic_info.op_mode;
737 np_cfg[i].port_num = nic_info.phys_port; 752 np_cfg[i].port_num = nic_info.phys_port;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 7e8d68263963..899433778466 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2149,7 +2149,7 @@ struct ql_adapter {
2149 struct timer_list timer; 2149 struct timer_list timer;
2150 atomic_t lb_count; 2150 atomic_t lb_count;
2151 /* Keep local copy of current mac address. */ 2151 /* Keep local copy of current mac address. */
2152 char current_mac_addr[6]; 2152 char current_mac_addr[ETH_ALEN];
2153}; 2153};
2154 2154
2155/* 2155/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 85e5c97191dd..6f87f2cde647 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1897,12 +1897,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1897 void *p) 1897 void *p)
1898{ 1898{
1899 struct rtl8169_private *tp = netdev_priv(dev); 1899 struct rtl8169_private *tp = netdev_priv(dev);
1900 1900 u32 __iomem *data = tp->mmio_addr;
1901 if (regs->len > R8169_REGS_SIZE) 1901 u32 *dw = p;
1902 regs->len = R8169_REGS_SIZE; 1902 int i;
1903 1903
1904 rtl_lock_work(tp); 1904 rtl_lock_work(tp);
1905 memcpy_fromio(p, tp->mmio_addr, regs->len); 1905 for (i = 0; i < R8169_REGS_SIZE; i += 4)
1906 memcpy_fromio(dw++, data++, 4);
1906 rtl_unlock_work(tp); 1907 rtl_unlock_work(tp);
1907} 1908}
1908 1909
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 19a8a045e077..a30c4395b232 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
13 Renesas SuperH Ethernet device driver. 13 Renesas SuperH Ethernet device driver.
14 This driver supporting CPUs are: 14 This driver supporting CPUs are:
15 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, 15 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
16 R8A7740 and R8A7779. 16 R8A7740, R8A777x and R8A7790.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a753928bab9c..5cd831ebfa83 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
189 [RMCR] = 0x0258, 189 [RMCR] = 0x0258,
190 [TFUCR] = 0x0264, 190 [TFUCR] = 0x0264,
191 [RFOCR] = 0x0268, 191 [RFOCR] = 0x0268,
192 [RMIIMODE] = 0x026c,
192 [FCFTR] = 0x0270, 193 [FCFTR] = 0x0270,
193 [TRIMD] = 0x027c, 194 [TRIMD] = 0x027c,
194}; 195};
@@ -377,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
377 .set_duplex = sh_eth_set_duplex, 378 .set_duplex = sh_eth_set_duplex,
378 .set_rate = sh_eth_set_rate_r8a777x, 379 .set_rate = sh_eth_set_rate_r8a777x,
379 380
381 .register_type = SH_ETH_REG_FAST_RCAR,
382
380 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 383 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
381 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 384 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
382 .eesipr_value = 0x01ff009f, 385 .eesipr_value = 0x01ff009f,
@@ -392,6 +395,30 @@ static struct sh_eth_cpu_data r8a777x_data = {
392 .hw_swap = 1, 395 .hw_swap = 1,
393}; 396};
394 397
398/* R8A7790 */
399static struct sh_eth_cpu_data r8a7790_data = {
400 .set_duplex = sh_eth_set_duplex,
401 .set_rate = sh_eth_set_rate_r8a777x,
402
403 .register_type = SH_ETH_REG_FAST_RCAR,
404
405 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
406 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
407 .eesipr_value = 0x01ff009f,
408
409 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
410 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
411 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
412 EESR_ECI,
413
414 .apr = 1,
415 .mpr = 1,
416 .tpauser = 1,
417 .hw_swap = 1,
418 .rmiimode = 1,
419 .shift_rd0 = 1,
420};
421
395static void sh_eth_set_rate_sh7724(struct net_device *ndev) 422static void sh_eth_set_rate_sh7724(struct net_device *ndev)
396{ 423{
397 struct sh_eth_private *mdp = netdev_priv(ndev); 424 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -413,6 +440,8 @@ static struct sh_eth_cpu_data sh7724_data = {
413 .set_duplex = sh_eth_set_duplex, 440 .set_duplex = sh_eth_set_duplex,
414 .set_rate = sh_eth_set_rate_sh7724, 441 .set_rate = sh_eth_set_rate_sh7724,
415 442
443 .register_type = SH_ETH_REG_FAST_SH4,
444
416 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 445 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
417 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 446 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
418 .eesipr_value = 0x01ff009f, 447 .eesipr_value = 0x01ff009f,
@@ -451,6 +480,8 @@ static struct sh_eth_cpu_data sh7757_data = {
451 .set_duplex = sh_eth_set_duplex, 480 .set_duplex = sh_eth_set_duplex,
452 .set_rate = sh_eth_set_rate_sh7757, 481 .set_rate = sh_eth_set_rate_sh7757,
453 482
483 .register_type = SH_ETH_REG_FAST_SH4,
484
454 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 485 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
455 .rmcr_value = 0x00000001, 486 .rmcr_value = 0x00000001,
456 487
@@ -519,6 +550,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
519 .set_duplex = sh_eth_set_duplex, 550 .set_duplex = sh_eth_set_duplex,
520 .set_rate = sh_eth_set_rate_giga, 551 .set_rate = sh_eth_set_rate_giga,
521 552
553 .register_type = SH_ETH_REG_GIGABIT,
554
522 .ecsr_value = ECSR_ICD | ECSR_MPD, 555 .ecsr_value = ECSR_ICD | ECSR_MPD,
523 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 556 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
524 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 557 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -577,6 +610,8 @@ static struct sh_eth_cpu_data sh7734_data = {
577 .set_duplex = sh_eth_set_duplex, 610 .set_duplex = sh_eth_set_duplex,
578 .set_rate = sh_eth_set_rate_gether, 611 .set_rate = sh_eth_set_rate_gether,
579 612
613 .register_type = SH_ETH_REG_GIGABIT,
614
580 .ecsr_value = ECSR_ICD | ECSR_MPD, 615 .ecsr_value = ECSR_ICD | ECSR_MPD,
581 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 616 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
582 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 617 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -604,6 +639,8 @@ static struct sh_eth_cpu_data sh7763_data = {
604 .set_duplex = sh_eth_set_duplex, 639 .set_duplex = sh_eth_set_duplex,
605 .set_rate = sh_eth_set_rate_gether, 640 .set_rate = sh_eth_set_rate_gether,
606 641
642 .register_type = SH_ETH_REG_GIGABIT,
643
607 .ecsr_value = ECSR_ICD | ECSR_MPD, 644 .ecsr_value = ECSR_ICD | ECSR_MPD,
608 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 645 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
609 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 646 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -641,6 +678,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
641 .set_duplex = sh_eth_set_duplex, 678 .set_duplex = sh_eth_set_duplex,
642 .set_rate = sh_eth_set_rate_gether, 679 .set_rate = sh_eth_set_rate_gether,
643 680
681 .register_type = SH_ETH_REG_GIGABIT,
682
644 .ecsr_value = ECSR_ICD | ECSR_MPD, 683 .ecsr_value = ECSR_ICD | ECSR_MPD,
645 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 684 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
646 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 685 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -663,6 +702,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
663}; 702};
664 703
665static struct sh_eth_cpu_data sh7619_data = { 704static struct sh_eth_cpu_data sh7619_data = {
705 .register_type = SH_ETH_REG_FAST_SH3_SH2,
706
666 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 707 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
667 708
668 .apr = 1, 709 .apr = 1,
@@ -672,6 +713,8 @@ static struct sh_eth_cpu_data sh7619_data = {
672}; 713};
673 714
674static struct sh_eth_cpu_data sh771x_data = { 715static struct sh_eth_cpu_data sh771x_data = {
716 .register_type = SH_ETH_REG_FAST_SH3_SH2,
717
675 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 718 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
676 .tsu = 1, 719 .tsu = 1,
677}; 720};
@@ -1124,6 +1167,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1124 if (ret) 1167 if (ret)
1125 goto out; 1168 goto out;
1126 1169
1170 if (mdp->cd->rmiimode)
1171 sh_eth_write(ndev, 0x1, RMIIMODE);
1172
1127 /* Descriptor format */ 1173 /* Descriptor format */
1128 sh_eth_ring_format(ndev); 1174 sh_eth_ring_format(ndev);
1129 if (mdp->cd->rpadir) 1175 if (mdp->cd->rpadir)
@@ -1297,9 +1343,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1297 mdp->rx_skbuff[entry] = NULL; 1343 mdp->rx_skbuff[entry] = NULL;
1298 if (mdp->cd->rpadir) 1344 if (mdp->cd->rpadir)
1299 skb_reserve(skb, NET_IP_ALIGN); 1345 skb_reserve(skb, NET_IP_ALIGN);
1346 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1347 mdp->rx_buf_sz,
1348 DMA_FROM_DEVICE);
1300 skb_put(skb, pkt_len); 1349 skb_put(skb, pkt_len);
1301 skb->protocol = eth_type_trans(skb, ndev); 1350 skb->protocol = eth_type_trans(skb, ndev);
1302 netif_rx(skb); 1351 netif_receive_skb(skb);
1303 ndev->stats.rx_packets++; 1352 ndev->stats.rx_packets++;
1304 ndev->stats.rx_bytes += pkt_len; 1353 ndev->stats.rx_bytes += pkt_len;
1305 } 1354 }
@@ -1857,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
1857 1906
1858 pm_runtime_get_sync(&mdp->pdev->dev); 1907 pm_runtime_get_sync(&mdp->pdev->dev);
1859 1908
1909 napi_enable(&mdp->napi);
1910
1860 ret = request_irq(ndev->irq, sh_eth_interrupt, 1911 ret = request_irq(ndev->irq, sh_eth_interrupt,
1861 mdp->cd->irq_flags, ndev->name, ndev); 1912 mdp->cd->irq_flags, ndev->name, ndev);
1862 if (ret) { 1913 if (ret) {
1863 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1914 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1864 return ret; 1915 goto out_napi_off;
1865 } 1916 }
1866 1917
1867 /* Descriptor set */ 1918 /* Descriptor set */
@@ -1879,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
1879 if (ret) 1930 if (ret)
1880 goto out_free_irq; 1931 goto out_free_irq;
1881 1932
1882 napi_enable(&mdp->napi);
1883
1884 return ret; 1933 return ret;
1885 1934
1886out_free_irq: 1935out_free_irq:
1887 free_irq(ndev->irq, ndev); 1936 free_irq(ndev->irq, ndev);
1937out_napi_off:
1938 napi_disable(&mdp->napi);
1888 pm_runtime_put_sync(&mdp->pdev->dev); 1939 pm_runtime_put_sync(&mdp->pdev->dev);
1889 return ret; 1940 return ret;
1890} 1941}
@@ -1976,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
1976{ 2027{
1977 struct sh_eth_private *mdp = netdev_priv(ndev); 2028 struct sh_eth_private *mdp = netdev_priv(ndev);
1978 2029
1979 napi_disable(&mdp->napi);
1980
1981 netif_stop_queue(ndev); 2030 netif_stop_queue(ndev);
1982 2031
1983 /* Disable interrupts by clearing the interrupt mask. */ 2032 /* Disable interrupts by clearing the interrupt mask. */
@@ -1995,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
1995 2044
1996 free_irq(ndev->irq, ndev); 2045 free_irq(ndev->irq, ndev);
1997 2046
2047 napi_disable(&mdp->napi);
2048
1998 /* Free all the skbuffs in the Rx queue. */ 2049 /* Free all the skbuffs in the Rx queue. */
1999 sh_eth_ring_free(ndev); 2050 sh_eth_ring_free(ndev);
2000 2051
@@ -2561,7 +2612,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2561 struct resource *res; 2612 struct resource *res;
2562 struct net_device *ndev = NULL; 2613 struct net_device *ndev = NULL;
2563 struct sh_eth_private *mdp = NULL; 2614 struct sh_eth_private *mdp = NULL;
2564 struct sh_eth_plat_data *pd = pdev->dev.platform_data; 2615 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2565 const struct platform_device_id *id = platform_get_device_id(pdev); 2616 const struct platform_device_id *id = platform_get_device_id(pdev);
2566 2617
2567 /* get base addr */ 2618 /* get base addr */
@@ -2594,9 +2645,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2594 2645
2595 SET_NETDEV_DEV(ndev, &pdev->dev); 2646 SET_NETDEV_DEV(ndev, &pdev->dev);
2596 2647
2597 /* Fill in the fields of the device structure with ethernet values. */
2598 ether_setup(ndev);
2599
2600 mdp = netdev_priv(ndev); 2648 mdp = netdev_priv(ndev);
2601 mdp->num_tx_ring = TX_RING_SIZE; 2649 mdp->num_tx_ring = TX_RING_SIZE;
2602 mdp->num_rx_ring = RX_RING_SIZE; 2650 mdp->num_rx_ring = RX_RING_SIZE;
@@ -2618,10 +2666,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2618 mdp->edmac_endian = pd->edmac_endian; 2666 mdp->edmac_endian = pd->edmac_endian;
2619 mdp->no_ether_link = pd->no_ether_link; 2667 mdp->no_ether_link = pd->no_ether_link;
2620 mdp->ether_link_active_low = pd->ether_link_active_low; 2668 mdp->ether_link_active_low = pd->ether_link_active_low;
2621 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2622 2669
2623 /* set cpu data */ 2670 /* set cpu data */
2624 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 2671 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2672 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2625 sh_eth_set_default_cpu_data(mdp->cd); 2673 sh_eth_set_default_cpu_data(mdp->cd);
2626 2674
2627 /* set function */ 2675 /* set function */
@@ -2749,6 +2797,7 @@ static struct platform_device_id sh_eth_id_table[] = {
2749 { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, 2797 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2750 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, 2798 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2751 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, 2799 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2800 { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
2752 { } 2801 { }
2753}; 2802};
2754MODULE_DEVICE_TABLE(platform, sh_eth_id_table); 2803MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 99995bf38c40..a0db02c63b11 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -60,6 +60,7 @@ enum {
60 EDOCR, 60 EDOCR,
61 TFUCR, 61 TFUCR,
62 RFOCR, 62 RFOCR,
63 RMIIMODE,
63 FCFTR, 64 FCFTR,
64 RPADIR, 65 RPADIR,
65 TRIMD, 66 TRIMD,
@@ -156,6 +157,13 @@ enum {
156 SH_ETH_MAX_REGISTER_OFFSET, 157 SH_ETH_MAX_REGISTER_OFFSET,
157}; 158};
158 159
160enum {
161 SH_ETH_REG_GIGABIT,
162 SH_ETH_REG_FAST_RCAR,
163 SH_ETH_REG_FAST_SH4,
164 SH_ETH_REG_FAST_SH3_SH2
165};
166
159/* Driver's parameters */ 167/* Driver's parameters */
160#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 168#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
161#define SH4_SKB_RX_ALIGN 32 169#define SH4_SKB_RX_ALIGN 32
@@ -453,6 +461,7 @@ struct sh_eth_cpu_data {
453 void (*set_rate)(struct net_device *ndev); 461 void (*set_rate)(struct net_device *ndev);
454 462
455 /* mandatory initialize value */ 463 /* mandatory initialize value */
464 int register_type;
456 unsigned long eesipr_value; 465 unsigned long eesipr_value;
457 466
458 /* optional initialize value */ 467 /* optional initialize value */
@@ -482,6 +491,7 @@ struct sh_eth_cpu_data {
482 unsigned hw_crc:1; /* E-DMAC have CSMR */ 491 unsigned hw_crc:1; /* E-DMAC have CSMR */
483 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ 492 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
484 unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ 493 unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
494 unsigned rmiimode:1; /* EtherC has RMIIMODE register */
485}; 495};
486 496
487struct sh_eth_private { 497struct sh_eth_private {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 856e523ac936..c76571886011 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
721 721
722static int sgiseeq_probe(struct platform_device *pdev) 722static int sgiseeq_probe(struct platform_device *pdev)
723{ 723{
724 struct sgiseeq_platform_data *pd = pdev->dev.platform_data; 724 struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
725 struct hpc3_regs *hpcregs = pd->hpc; 725 struct hpc3_regs *hpcregs = pd->hpc;
726 struct sgiseeq_init_block *sr; 726 struct sgiseeq_init_block *sr;
727 unsigned int irq = pd->irq; 727 unsigned int irq = pd->irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a954..8b7152565c5e 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
1config SFC 1config SFC
2 tristate "Solarflare SFC4000/SFC9000-family support" 2 tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
3 depends on PCI 3 depends on PCI
4 select MDIO 4 select MDIO
5 select CRC32 5 select CRC32
@@ -8,12 +8,13 @@ config SFC
8 select PTP_1588_CLOCK 8 select PTP_1588_CLOCK
9 ---help--- 9 ---help---
10 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
11 the Solarflare SFC4000 and SFC9000-family controllers. 11 the Solarflare SFC4000, SFC9000-family and SFC9100-family
12 controllers.
12 13
13 To compile this driver as a module, choose M here. The module 14 To compile this driver as a module, choose M here. The module
14 will be called sfc. 15 will be called sfc.
15config SFC_MTD 16config SFC_MTD
16 bool "Solarflare SFC4000/SFC9000-family MTD support" 17 bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
17 depends on SFC && MTD && !(SFC=y && MTD=m) 18 depends on SFC && MTD && !(SFC=y && MTD=m)
18 default y 19 default y
19 ---help--- 20 ---help---
@@ -21,7 +22,7 @@ config SFC_MTD
21 (e.g. /dev/mtd1). This is required to update the firmware or 22 (e.g. /dev/mtd1). This is required to update the firmware or
22 the boot configuration under Linux. 23 the boot configuration under Linux.
23config SFC_MCDI_MON 24config SFC_MCDI_MON
24 bool "Solarflare SFC9000-family hwmon support" 25 bool "Solarflare SFC9000/SFC9100-family hwmon support"
25 depends on SFC && HWMON && !(SFC=y && HWMON=m) 26 depends on SFC && HWMON && !(SFC=y && HWMON=m)
26 default y 27 default y
27 ---help--- 28 ---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 945bf06e69ef..3a83c0dca8e6 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,8 +1,7 @@
1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ 1sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
2 falcon_xmac.o mcdi_mac.o \ 2 rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 3 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o ptp.o 4 mcdi.o mcdi_port.o mcdi_mon.o ptp.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 5sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o 6sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
8 7
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254f..17d83f37fbf2 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
29/* Lowest bit numbers and widths */ 29/* Lowest bit numbers and widths */
30#define EFX_DUMMY_FIELD_LBN 0 30#define EFX_DUMMY_FIELD_LBN 0
31#define EFX_DUMMY_FIELD_WIDTH 0 31#define EFX_DUMMY_FIELD_WIDTH 0
32#define EFX_WORD_0_LBN 0
33#define EFX_WORD_0_WIDTH 16
34#define EFX_WORD_1_LBN 16
35#define EFX_WORD_1_WIDTH 16
32#define EFX_DWORD_0_LBN 0 36#define EFX_DWORD_0_LBN 0
33#define EFX_DWORD_0_WIDTH 32 37#define EFX_DWORD_0_WIDTH 32
34#define EFX_DWORD_1_LBN 32 38#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 000000000000..5f42313b4965
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3043 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
17#include <linux/in.h>
18#include <linux/jhash.h>
19#include <linux/wait.h>
20#include <linux/workqueue.h>
21
22/* Hardware control for EF10 architecture including 'Huntington'. */
23
24#define EFX_EF10_DRVGEN_EV 7
25enum {
26 EFX_EF10_TEST = 1,
27 EFX_EF10_REFILL,
28};
29
30/* The reserved RSS context value */
31#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
32
33/* The filter table(s) are managed by firmware and we have write-only
34 * access. When removing filters we must identify them to the
35 * firmware by a 64-bit handle, but this is too wide for Linux kernel
36 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
37 * be able to tell in advance whether a requested insertion will
38 * replace an existing filter. Therefore we maintain a software hash
39 * table, which should be at least as large as the hardware hash
40 * table.
41 *
42 * Huntington has a single 8K filter table shared between all filter
43 * types and both ports.
44 */
45#define HUNT_FILTER_TBL_ROWS 8192
46
47struct efx_ef10_filter_table {
48/* The RX match field masks supported by this fw & hw, in order of priority */
49 enum efx_filter_match_flags rx_match_flags[
50 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
51 unsigned int rx_match_count;
52
53 struct {
54 unsigned long spec; /* pointer to spec plus flag bits */
55/* BUSY flag indicates that an update is in progress. STACK_OLD is
56 * used to mark and sweep stack-owned MAC filters.
57 */
58#define EFX_EF10_FILTER_FLAG_BUSY 1UL
59#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
60#define EFX_EF10_FILTER_FLAGS 3UL
61 u64 handle; /* firmware handle */
62 } *entry;
63 wait_queue_head_t waitq;
64/* Shadow of net_device address lists, guarded by mac_lock */
65#define EFX_EF10_FILTER_STACK_UC_MAX 32
66#define EFX_EF10_FILTER_STACK_MC_MAX 256
67 struct {
68 u8 addr[ETH_ALEN];
69 u16 id;
70 } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
71 stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
72 int stack_uc_count; /* negative for PROMISC */
73 int stack_mc_count; /* negative for PROMISC/ALLMULTI */
74};
75
76/* An arbitrary search limit for the software hash table */
77#define EFX_EF10_FILTER_SEARCH_LIMIT 200
78
79static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
80static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
81static void efx_ef10_filter_table_remove(struct efx_nic *efx);
82
83static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
84{
85 efx_dword_t reg;
86
87 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
88 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
89 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
90}
91
92static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
93{
94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95}
96
97static int efx_ef10_init_capabilities(struct efx_nic *efx)
98{
99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 struct efx_ef10_nic_data *nic_data = efx->nic_data;
101 size_t outlen;
102 int rc;
103
104 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
105
106 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
107 outbuf, sizeof(outbuf), &outlen);
108 if (rc)
109 return rc;
110
111 if (outlen >= sizeof(outbuf)) {
112 nic_data->datapath_caps =
113 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
114 if (!(nic_data->datapath_caps &
115 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
116 netif_err(efx, drv, efx->net_dev,
117 "Capabilities don't indicate TSO support.\n");
118 return -ENODEV;
119 }
120 }
121
122 return 0;
123}
124
125static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
126{
127 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
128 int rc;
129
130 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
131 outbuf, sizeof(outbuf), NULL);
132 if (rc)
133 return rc;
134 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
135 return rc > 0 ? rc : -ERANGE;
136}
137
138static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
139{
140 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
141 size_t outlen;
142 int rc;
143
144 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
145
146 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
147 outbuf, sizeof(outbuf), &outlen);
148 if (rc)
149 return rc;
150 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
151 return -EIO;
152
153 memcpy(mac_address,
154 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
155 return 0;
156}
157
158static int efx_ef10_probe(struct efx_nic *efx)
159{
160 struct efx_ef10_nic_data *nic_data;
161 int i, rc;
162
163 /* We can have one VI for each 8K region. However we need
164 * multiple TX queues per channel.
165 */
166 efx->max_channels =
167 min_t(unsigned int,
168 EFX_MAX_CHANNELS,
169 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
170 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
171 BUG_ON(efx->max_channels == 0);
172
173 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
174 if (!nic_data)
175 return -ENOMEM;
176 efx->nic_data = nic_data;
177
178 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
179 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
180 if (rc)
181 goto fail1;
182
183 /* Get the MC's warm boot count. In case it's rebooting right
184 * now, be prepared to retry.
185 */
186 i = 0;
187 for (;;) {
188 rc = efx_ef10_get_warm_boot_count(efx);
189 if (rc >= 0)
190 break;
191 if (++i == 5)
192 goto fail2;
193 ssleep(1);
194 }
195 nic_data->warm_boot_count = rc;
196
197 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
198
199 /* In case we're recovering from a crash (kexec), we want to
200 * cancel any outstanding request by the previous user of this
201 * function. We send a special message using the least
202 * significant bits of the 'high' (doorbell) register.
203 */
204 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
205
206 rc = efx_mcdi_init(efx);
207 if (rc)
208 goto fail2;
209
210 /* Reset (most) configuration for this function */
211 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
212 if (rc)
213 goto fail3;
214
215 /* Enable event logging */
216 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
217 if (rc)
218 goto fail3;
219
220 rc = efx_ef10_init_capabilities(efx);
221 if (rc < 0)
222 goto fail3;
223
224 efx->rx_packet_len_offset =
225 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
226
227 if (!(nic_data->datapath_caps &
228 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
229 netif_err(efx, probe, efx->net_dev,
230 "current firmware does not support an RX prefix\n");
231 rc = -ENODEV;
232 goto fail3;
233 }
234
235 rc = efx_mcdi_port_get_number(efx);
236 if (rc < 0)
237 goto fail3;
238 efx->port_num = rc;
239
240 rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
241 if (rc)
242 goto fail3;
243
244 rc = efx_ef10_get_sysclk_freq(efx);
245 if (rc < 0)
246 goto fail3;
247 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
248
249 /* Check whether firmware supports bug 35388 workaround */
250 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
251 if (rc == 0)
252 nic_data->workaround_35388 = true;
253 else if (rc != -ENOSYS && rc != -ENOENT)
254 goto fail3;
255 netif_dbg(efx, probe, efx->net_dev,
256 "workaround for bug 35388 is %sabled\n",
257 nic_data->workaround_35388 ? "en" : "dis");
258
259 rc = efx_mcdi_mon_probe(efx);
260 if (rc)
261 goto fail3;
262
263 efx_ptp_probe(efx);
264
265 return 0;
266
267fail3:
268 efx_mcdi_fini(efx);
269fail2:
270 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
271fail1:
272 kfree(nic_data);
273 efx->nic_data = NULL;
274 return rc;
275}
276
277static int efx_ef10_free_vis(struct efx_nic *efx)
278{
279 int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
280
281 /* -EALREADY means nothing to free, so ignore */
282 if (rc == -EALREADY)
283 rc = 0;
284 return rc;
285}
286
287static void efx_ef10_remove(struct efx_nic *efx)
288{
289 struct efx_ef10_nic_data *nic_data = efx->nic_data;
290 int rc;
291
292 efx_mcdi_mon_remove(efx);
293
294 /* This needs to be after efx_ptp_remove_channel() with no filters */
295 efx_ef10_rx_free_indir_table(efx);
296
297 rc = efx_ef10_free_vis(efx);
298 WARN_ON(rc != 0);
299
300 efx_mcdi_fini(efx);
301 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
302 kfree(nic_data);
303}
304
305static int efx_ef10_alloc_vis(struct efx_nic *efx,
306 unsigned int min_vis, unsigned int max_vis)
307{
308 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
309 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
310 struct efx_ef10_nic_data *nic_data = efx->nic_data;
311 size_t outlen;
312 int rc;
313
314 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
315 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
316 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
317 outbuf, sizeof(outbuf), &outlen);
318 if (rc != 0)
319 return rc;
320
321 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
322 return -EIO;
323
324 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
325 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
326
327 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
328 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
329 return 0;
330}
331
332static int efx_ef10_dimension_resources(struct efx_nic *efx)
333{
334 unsigned int n_vis =
335 max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
336
337 return efx_ef10_alloc_vis(efx, n_vis, n_vis);
338}
339
340static int efx_ef10_init_nic(struct efx_nic *efx)
341{
342 struct efx_ef10_nic_data *nic_data = efx->nic_data;
343 int rc;
344
345 if (nic_data->must_realloc_vis) {
346 /* We cannot let the number of VIs change now */
347 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
348 nic_data->n_allocated_vis);
349 if (rc)
350 return rc;
351 nic_data->must_realloc_vis = false;
352 }
353
354 efx_ef10_rx_push_indir_table(efx);
355 return 0;
356}
357
358static int efx_ef10_map_reset_flags(u32 *flags)
359{
360 enum {
361 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
362 ETH_RESET_SHARED_SHIFT),
363 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
364 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
365 ETH_RESET_PHY | ETH_RESET_MGMT) <<
366 ETH_RESET_SHARED_SHIFT)
367 };
368
369 /* We assume for now that our PCI function is permitted to
370 * reset everything.
371 */
372
373 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
374 *flags &= ~EF10_RESET_MC;
375 return RESET_TYPE_WORLD;
376 }
377
378 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
379 *flags &= ~EF10_RESET_PORT;
380 return RESET_TYPE_ALL;
381 }
382
383 /* no invisible reset implemented */
384
385 return -EINVAL;
386}
387
388#define EF10_DMA_STAT(ext_name, mcdi_name) \
389 [EF10_STAT_ ## ext_name] = \
390 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
391#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
392 [EF10_STAT_ ## int_name] = \
393 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
394#define EF10_OTHER_STAT(ext_name) \
395 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
396
397static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
398 EF10_DMA_STAT(tx_bytes, TX_BYTES),
399 EF10_DMA_STAT(tx_packets, TX_PKTS),
400 EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
401 EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
402 EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
403 EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
404 EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
405 EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
406 EF10_DMA_STAT(tx_64, TX_64_PKTS),
407 EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
408 EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
409 EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
410 EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
411 EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
412 EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
413 EF10_DMA_STAT(rx_bytes, RX_BYTES),
414 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
415 EF10_OTHER_STAT(rx_good_bytes),
416 EF10_OTHER_STAT(rx_bad_bytes),
417 EF10_DMA_STAT(rx_packets, RX_PKTS),
418 EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
419 EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
420 EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
421 EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
422 EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
423 EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
424 EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
425 EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
426 EF10_DMA_STAT(rx_64, RX_64_PKTS),
427 EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
428 EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
429 EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
430 EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
431 EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
432 EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
433 EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
434 EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
435 EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
436 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
437 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
438 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
439};
440
441#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
442 (1ULL << EF10_STAT_tx_packets) | \
443 (1ULL << EF10_STAT_tx_pause) | \
444 (1ULL << EF10_STAT_tx_unicast) | \
445 (1ULL << EF10_STAT_tx_multicast) | \
446 (1ULL << EF10_STAT_tx_broadcast) | \
447 (1ULL << EF10_STAT_rx_bytes) | \
448 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
449 (1ULL << EF10_STAT_rx_good_bytes) | \
450 (1ULL << EF10_STAT_rx_bad_bytes) | \
451 (1ULL << EF10_STAT_rx_packets) | \
452 (1ULL << EF10_STAT_rx_good) | \
453 (1ULL << EF10_STAT_rx_bad) | \
454 (1ULL << EF10_STAT_rx_pause) | \
455 (1ULL << EF10_STAT_rx_control) | \
456 (1ULL << EF10_STAT_rx_unicast) | \
457 (1ULL << EF10_STAT_rx_multicast) | \
458 (1ULL << EF10_STAT_rx_broadcast) | \
459 (1ULL << EF10_STAT_rx_lt64) | \
460 (1ULL << EF10_STAT_rx_64) | \
461 (1ULL << EF10_STAT_rx_65_to_127) | \
462 (1ULL << EF10_STAT_rx_128_to_255) | \
463 (1ULL << EF10_STAT_rx_256_to_511) | \
464 (1ULL << EF10_STAT_rx_512_to_1023) | \
465 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
466 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
467 (1ULL << EF10_STAT_rx_gtjumbo) | \
468 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
469 (1ULL << EF10_STAT_rx_overflow) | \
470 (1ULL << EF10_STAT_rx_nodesc_drops))
471
472/* These statistics are only provided by the 10G MAC. For a 10G/40G
473 * switchable port we do not expose these because they might not
474 * include all the packets they should.
475 */
476#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
477 (1ULL << EF10_STAT_tx_lt64) | \
478 (1ULL << EF10_STAT_tx_64) | \
479 (1ULL << EF10_STAT_tx_65_to_127) | \
480 (1ULL << EF10_STAT_tx_128_to_255) | \
481 (1ULL << EF10_STAT_tx_256_to_511) | \
482 (1ULL << EF10_STAT_tx_512_to_1023) | \
483 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
484 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
485
486/* These statistics are only provided by the 40G MAC. For a 10G/40G
487 * switchable port we do expose these because the errors will otherwise
488 * be silent.
489 */
490#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
491 (1ULL << EF10_STAT_rx_length_error))
492
493#if BITS_PER_LONG == 64
494#define STAT_MASK_BITMAP(bits) (bits)
495#else
496#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
497#endif
498
499static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
500{
501 static const unsigned long hunt_40g_stat_mask[] = {
502 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
503 HUNT_40G_EXTRA_STAT_MASK)
504 };
505 static const unsigned long hunt_10g_only_stat_mask[] = {
506 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
507 HUNT_10G_ONLY_STAT_MASK)
508 };
509 u32 port_caps = efx_mcdi_phy_get_caps(efx);
510
511 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
512 return hunt_40g_stat_mask;
513 else
514 return hunt_10g_only_stat_mask;
515}
516
517static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
518{
519 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
520 efx_ef10_stat_mask(efx), names);
521}
522
523static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
524{
525 struct efx_ef10_nic_data *nic_data = efx->nic_data;
526 const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
527 __le64 generation_start, generation_end;
528 u64 *stats = nic_data->stats;
529 __le64 *dma_stats;
530
531 dma_stats = efx->stats_buffer.addr;
532 nic_data = efx->nic_data;
533
534 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
535 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
536 return 0;
537 rmb();
538 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
539 stats, efx->stats_buffer.addr, false);
540 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
541 if (generation_end != generation_start)
542 return -EAGAIN;
543
544 /* Update derived statistics */
545 stats[EF10_STAT_rx_good_bytes] =
546 stats[EF10_STAT_rx_bytes] -
547 stats[EF10_STAT_rx_bytes_minus_good_bytes];
548 efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
549 stats[EF10_STAT_rx_bytes_minus_good_bytes]);
550
551 return 0;
552}
553
554
555static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
556 struct rtnl_link_stats64 *core_stats)
557{
558 const unsigned long *mask = efx_ef10_stat_mask(efx);
559 struct efx_ef10_nic_data *nic_data = efx->nic_data;
560 u64 *stats = nic_data->stats;
561 size_t stats_count = 0, index;
562 int retry;
563
564 /* If we're unlucky enough to read statistics during the DMA, wait
565 * up to 10ms for it to finish (typically takes <500us)
566 */
567 for (retry = 0; retry < 100; ++retry) {
568 if (efx_ef10_try_update_nic_stats(efx) == 0)
569 break;
570 udelay(100);
571 }
572
573 if (full_stats) {
574 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
575 if (efx_ef10_stat_desc[index].name) {
576 *full_stats++ = stats[index];
577 ++stats_count;
578 }
579 }
580 }
581
582 if (core_stats) {
583 core_stats->rx_packets = stats[EF10_STAT_rx_packets];
584 core_stats->tx_packets = stats[EF10_STAT_tx_packets];
585 core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
586 core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
587 core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
588 core_stats->multicast = stats[EF10_STAT_rx_multicast];
589 core_stats->rx_length_errors =
590 stats[EF10_STAT_rx_gtjumbo] +
591 stats[EF10_STAT_rx_length_error];
592 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
593 core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
594 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
595 core_stats->rx_errors = (core_stats->rx_length_errors +
596 core_stats->rx_crc_errors +
597 core_stats->rx_frame_errors);
598 }
599
600 return stats_count;
601}
602
603static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
604{
605 struct efx_nic *efx = channel->efx;
606 unsigned int mode, value;
607 efx_dword_t timer_cmd;
608
609 if (channel->irq_moderation) {
610 mode = 3;
611 value = channel->irq_moderation - 1;
612 } else {
613 mode = 0;
614 value = 0;
615 }
616
617 if (EFX_EF10_WORKAROUND_35388(efx)) {
618 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
619 EFE_DD_EVQ_IND_TIMER_FLAGS,
620 ERF_DD_EVQ_IND_TIMER_MODE, mode,
621 ERF_DD_EVQ_IND_TIMER_VAL, value);
622 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
623 channel->channel);
624 } else {
625 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
626 ERF_DZ_TC_TIMER_VAL, value);
627 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
628 channel->channel);
629 }
630}
631
632static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
633{
634 wol->supported = 0;
635 wol->wolopts = 0;
636 memset(&wol->sopass, 0, sizeof(wol->sopass));
637}
638
639static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
640{
641 if (type != 0)
642 return -EINVAL;
643 return 0;
644}
645
646static void efx_ef10_mcdi_request(struct efx_nic *efx,
647 const efx_dword_t *hdr, size_t hdr_len,
648 const efx_dword_t *sdu, size_t sdu_len)
649{
650 struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 u8 *pdu = nic_data->mcdi_buf.addr;
652
653 memcpy(pdu, hdr, hdr_len);
654 memcpy(pdu + hdr_len, sdu, sdu_len);
655 wmb();
656
657 /* The hardware provides 'low' and 'high' (doorbell) registers
658 * for passing the 64-bit address of an MCDI request to
659 * firmware. However the dwords are swapped by firmware. The
660 * least significant bits of the doorbell are then 0 for all
661 * MCDI requests due to alignment.
662 */
663 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
664 ER_DZ_MC_DB_LWRD);
665 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
666 ER_DZ_MC_DB_HWRD);
667}
668
669static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
670{
671 struct efx_ef10_nic_data *nic_data = efx->nic_data;
672 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
673
674 rmb();
675 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
676}
677
678static void
679efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
680 size_t offset, size_t outlen)
681{
682 struct efx_ef10_nic_data *nic_data = efx->nic_data;
683 const u8 *pdu = nic_data->mcdi_buf.addr;
684
685 memcpy(outbuf, pdu + offset, outlen);
686}
687
688static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
689{
690 struct efx_ef10_nic_data *nic_data = efx->nic_data;
691 int rc;
692
693 rc = efx_ef10_get_warm_boot_count(efx);
694 if (rc < 0) {
695 /* The firmware is presumably in the process of
696 * rebooting. However, we are supposed to report each
697 * reboot just once, so we must only do that once we
698 * can read and store the updated warm boot count.
699 */
700 return 0;
701 }
702
703 if (rc == nic_data->warm_boot_count)
704 return 0;
705
706 nic_data->warm_boot_count = rc;
707
708 /* All our allocations have been reset */
709 nic_data->must_realloc_vis = true;
710 nic_data->must_restore_filters = true;
711 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
712
713 return -EIO;
714}
715
716/* Handle an MSI interrupt
717 *
718 * Handle an MSI hardware interrupt. This routine schedules event
719 * queue processing. No interrupt acknowledgement cycle is necessary.
720 * Also, we never need to check that the interrupt is for us, since
721 * MSI interrupts cannot be shared.
722 */
723static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
724{
725 struct efx_msi_context *context = dev_id;
726 struct efx_nic *efx = context->efx;
727
728 netif_vdbg(efx, intr, efx->net_dev,
729 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
730
731 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
732 /* Note test interrupts */
733 if (context->index == efx->irq_level)
734 efx->last_irq_cpu = raw_smp_processor_id();
735
736 /* Schedule processing of the channel */
737 efx_schedule_channel_irq(efx->channel[context->index]);
738 }
739
740 return IRQ_HANDLED;
741}
742
743static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
744{
745 struct efx_nic *efx = dev_id;
746 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
747 struct efx_channel *channel;
748 efx_dword_t reg;
749 u32 queues;
750
751 /* Read the ISR which also ACKs the interrupts */
752 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
753 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
754
755 if (queues == 0)
756 return IRQ_NONE;
757
758 if (likely(soft_enabled)) {
759 /* Note test interrupts */
760 if (queues & (1U << efx->irq_level))
761 efx->last_irq_cpu = raw_smp_processor_id();
762
763 efx_for_each_channel(channel, efx) {
764 if (queues & 1)
765 efx_schedule_channel_irq(channel);
766 queues >>= 1;
767 }
768 }
769
770 netif_vdbg(efx, intr, efx->net_dev,
771 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
772 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
773
774 return IRQ_HANDLED;
775}
776
777static void efx_ef10_irq_test_generate(struct efx_nic *efx)
778{
779 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
780
781 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
782
783 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
784 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
785 inbuf, sizeof(inbuf), NULL, 0, NULL);
786}
787
788static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
789{
790 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
791 (tx_queue->ptr_mask + 1) *
792 sizeof(efx_qword_t),
793 GFP_KERNEL);
794}
795
796/* This writes to the TX_DESC_WPTR and also pushes data */
797static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
798 const efx_qword_t *txd)
799{
800 unsigned int write_ptr;
801 efx_oword_t reg;
802
803 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
804 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
805 reg.qword[0] = *txd;
806 efx_writeo_page(tx_queue->efx, &reg,
807 ER_DZ_TX_DESC_UPD, tx_queue->queue);
808}
809
810static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
811{
812 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
813 EFX_BUF_SIZE));
814 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
815 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
816 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
817 struct efx_channel *channel = tx_queue->channel;
818 struct efx_nic *efx = tx_queue->efx;
819 size_t inlen, outlen;
820 dma_addr_t dma_addr;
821 efx_qword_t *txd;
822 int rc;
823 int i;
824
825 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
826 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
827 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
828 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
829 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
830 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
831 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
832 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
833 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
834
835 dma_addr = tx_queue->txd.buf.dma_addr;
836
837 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
838 tx_queue->queue, entries, (u64)dma_addr);
839
840 for (i = 0; i < entries; ++i) {
841 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
842 dma_addr += EFX_BUF_SIZE;
843 }
844
845 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
846
847 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
848 outbuf, sizeof(outbuf), &outlen);
849 if (rc)
850 goto fail;
851
852 /* A previous user of this TX queue might have set us up the
853 * bomb by writing a descriptor to the TX push collector but
854 * not the doorbell. (Each collector belongs to a port, not a
855 * queue or function, so cannot easily be reset.) We must
856 * attempt to push a no-op descriptor in its place.
857 */
858 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
859 tx_queue->insert_count = 1;
860 txd = efx_tx_desc(tx_queue, 0);
861 EFX_POPULATE_QWORD_4(*txd,
862 ESF_DZ_TX_DESC_IS_OPT, true,
863 ESF_DZ_TX_OPTION_TYPE,
864 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
865 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
866 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
867 tx_queue->write_count = 1;
868 wmb();
869 efx_ef10_push_tx_desc(tx_queue, txd);
870
871 return;
872
873fail:
874 WARN_ON(true);
875 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
876}
877
878static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
879{
880 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
881 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
882 struct efx_nic *efx = tx_queue->efx;
883 size_t outlen;
884 int rc;
885
886 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
887 tx_queue->queue);
888
889 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
890 outbuf, sizeof(outbuf), &outlen);
891
892 if (rc && rc != -EALREADY)
893 goto fail;
894
895 return;
896
897fail:
898 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
899}
900
901static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
902{
903 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
904}
905
906/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
907static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
908{
909 unsigned int write_ptr;
910 efx_dword_t reg;
911
912 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
913 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
914 efx_writed_page(tx_queue->efx, &reg,
915 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
916}
917
918static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
919{
920 unsigned int old_write_count = tx_queue->write_count;
921 struct efx_tx_buffer *buffer;
922 unsigned int write_ptr;
923 efx_qword_t *txd;
924
925 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
926
927 do {
928 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
929 buffer = &tx_queue->buffer[write_ptr];
930 txd = efx_tx_desc(tx_queue, write_ptr);
931 ++tx_queue->write_count;
932
933 /* Create TX descriptor ring entry */
934 if (buffer->flags & EFX_TX_BUF_OPTION) {
935 *txd = buffer->option;
936 } else {
937 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
938 EFX_POPULATE_QWORD_3(
939 *txd,
940 ESF_DZ_TX_KER_CONT,
941 buffer->flags & EFX_TX_BUF_CONT,
942 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
943 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
944 }
945 } while (tx_queue->write_count != tx_queue->insert_count);
946
947 wmb(); /* Ensure descriptors are written before they are fetched */
948
949 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
950 txd = efx_tx_desc(tx_queue,
951 old_write_count & tx_queue->ptr_mask);
952 efx_ef10_push_tx_desc(tx_queue, txd);
953 ++tx_queue->pushes;
954 } else {
955 efx_ef10_notify_tx_desc(tx_queue);
956 }
957}
958
959static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
960{
961 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
962 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
963 size_t outlen;
964 int rc;
965
966 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
967 EVB_PORT_ID_ASSIGNED);
968 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
969 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
970 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
971 EFX_MAX_CHANNELS);
972
973 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
974 outbuf, sizeof(outbuf), &outlen);
975 if (rc != 0)
976 return rc;
977
978 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
979 return -EIO;
980
981 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
982
983 return 0;
984}
985
986static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
987{
988 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
989 int rc;
990
991 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
992 context);
993
994 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
995 NULL, 0, NULL);
996 WARN_ON(rc != 0);
997}
998
999static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1000{
1001 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1002 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1003 int i, rc;
1004
1005 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1006 context);
1007 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1008 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1009
1010 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1011 MCDI_PTR(tablebuf,
1012 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1013 (u8) efx->rx_indir_table[i];
1014
1015 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1016 sizeof(tablebuf), NULL, 0, NULL);
1017 if (rc != 0)
1018 return rc;
1019
1020 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1021 context);
1022 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1023 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1024 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1025 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1026 efx->rx_hash_key[i];
1027
1028 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1029 sizeof(keybuf), NULL, 0, NULL);
1030}
1031
1032static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1033{
1034 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1035
1036 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1037 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1038 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1039}
1040
1041static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1042{
1043 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1044 int rc;
1045
1046 netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1047
1048 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1049 rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1050 if (rc != 0)
1051 goto fail;
1052 }
1053
1054 rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1055 if (rc != 0)
1056 goto fail;
1057
1058 return;
1059
1060fail:
1061 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1062}
1063
1064static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1065{
1066 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1067 (rx_queue->ptr_mask + 1) *
1068 sizeof(efx_qword_t),
1069 GFP_KERNEL);
1070}
1071
1072static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1073{
1074 MCDI_DECLARE_BUF(inbuf,
1075 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1076 EFX_BUF_SIZE));
1077 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1078 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1079 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1080 struct efx_nic *efx = rx_queue->efx;
1081 size_t inlen, outlen;
1082 dma_addr_t dma_addr;
1083 int rc;
1084 int i;
1085
1086 rx_queue->scatter_n = 0;
1087 rx_queue->scatter_len = 0;
1088
1089 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1090 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1091 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1092 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1093 efx_rx_queue_index(rx_queue));
1094 MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1095 INIT_RXQ_IN_FLAG_PREFIX, 1);
1096 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1097 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1098
1099 dma_addr = rx_queue->rxd.buf.dma_addr;
1100
1101 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1102 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1103
1104 for (i = 0; i < entries; ++i) {
1105 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1106 dma_addr += EFX_BUF_SIZE;
1107 }
1108
1109 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1110
1111 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1112 outbuf, sizeof(outbuf), &outlen);
1113 if (rc)
1114 goto fail;
1115
1116 return;
1117
1118fail:
1119 WARN_ON(true);
1120 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1121}
1122
1123static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1124{
1125 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1126 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1127 struct efx_nic *efx = rx_queue->efx;
1128 size_t outlen;
1129 int rc;
1130
1131 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1132 efx_rx_queue_index(rx_queue));
1133
1134 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1135 outbuf, sizeof(outbuf), &outlen);
1136
1137 if (rc && rc != -EALREADY)
1138 goto fail;
1139
1140 return;
1141
1142fail:
1143 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1144}
1145
1146static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1147{
1148 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1149}
1150
1151/* This creates an entry in the RX descriptor queue */
1152static inline void
1153efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1154{
1155 struct efx_rx_buffer *rx_buf;
1156 efx_qword_t *rxd;
1157
1158 rxd = efx_rx_desc(rx_queue, index);
1159 rx_buf = efx_rx_buffer(rx_queue, index);
1160 EFX_POPULATE_QWORD_2(*rxd,
1161 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1162 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1163}
1164
1165static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1166{
1167 struct efx_nic *efx = rx_queue->efx;
1168 unsigned int write_count;
1169 efx_dword_t reg;
1170
1171 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1172 write_count = rx_queue->added_count & ~7;
1173 if (rx_queue->notified_count == write_count)
1174 return;
1175
1176 do
1177 efx_ef10_build_rx_desc(
1178 rx_queue,
1179 rx_queue->notified_count & rx_queue->ptr_mask);
1180 while (++rx_queue->notified_count != write_count);
1181
1182 wmb();
1183 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1184 write_count & rx_queue->ptr_mask);
1185 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1186 efx_rx_queue_index(rx_queue));
1187}
1188
1189static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1190
1191static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1192{
1193 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1194 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1195 efx_qword_t event;
1196
1197 EFX_POPULATE_QWORD_2(event,
1198 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1199 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1200
1201 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1202
1203 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1204 * already swapped the data to little-endian order.
1205 */
1206 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1207 sizeof(efx_qword_t));
1208
1209 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1210 inbuf, sizeof(inbuf), 0,
1211 efx_ef10_rx_defer_refill_complete, 0);
1212}
1213
1214static void
1215efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1216 int rc, efx_dword_t *outbuf,
1217 size_t outlen_actual)
1218{
1219 /* nothing to do */
1220}
1221
1222static int efx_ef10_ev_probe(struct efx_channel *channel)
1223{
1224 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1225 (channel->eventq_mask + 1) *
1226 sizeof(efx_qword_t),
1227 GFP_KERNEL);
1228}
1229
1230static int efx_ef10_ev_init(struct efx_channel *channel)
1231{
1232 MCDI_DECLARE_BUF(inbuf,
1233 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1234 EFX_BUF_SIZE));
1235 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1236 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1237 struct efx_nic *efx = channel->efx;
1238 struct efx_ef10_nic_data *nic_data;
1239 bool supports_rx_merge;
1240 size_t inlen, outlen;
1241 dma_addr_t dma_addr;
1242 int rc;
1243 int i;
1244
1245 nic_data = efx->nic_data;
1246 supports_rx_merge =
1247 !!(nic_data->datapath_caps &
1248 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1249
1250 /* Fill event queue with all ones (i.e. empty events) */
1251 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1252
1253 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1254 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1255 /* INIT_EVQ expects index in vector table, not absolute */
1256 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1257 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1258 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1259 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1260 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1261 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1262 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1263 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1264 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1265 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1266 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1267 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1268 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1269
1270 dma_addr = channel->eventq.buf.dma_addr;
1271 for (i = 0; i < entries; ++i) {
1272 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1273 dma_addr += EFX_BUF_SIZE;
1274 }
1275
1276 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1277
1278 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1279 outbuf, sizeof(outbuf), &outlen);
1280 if (rc)
1281 goto fail;
1282
1283 /* IRQ return is ignored */
1284
1285 return 0;
1286
1287fail:
1288 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1289 return rc;
1290}
1291
1292static void efx_ef10_ev_fini(struct efx_channel *channel)
1293{
1294 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1295 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1296 struct efx_nic *efx = channel->efx;
1297 size_t outlen;
1298 int rc;
1299
1300 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1301
1302 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1303 outbuf, sizeof(outbuf), &outlen);
1304
1305 if (rc && rc != -EALREADY)
1306 goto fail;
1307
1308 return;
1309
1310fail:
1311 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1312}
1313
1314static void efx_ef10_ev_remove(struct efx_channel *channel)
1315{
1316 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1317}
1318
1319static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1320 unsigned int rx_queue_label)
1321{
1322 struct efx_nic *efx = rx_queue->efx;
1323
1324 netif_info(efx, hw, efx->net_dev,
1325 "rx event arrived on queue %d labeled as queue %u\n",
1326 efx_rx_queue_index(rx_queue), rx_queue_label);
1327
1328 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1329}
1330
1331static void
1332efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1333 unsigned int actual, unsigned int expected)
1334{
1335 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1336 struct efx_nic *efx = rx_queue->efx;
1337
1338 netif_info(efx, hw, efx->net_dev,
1339 "dropped %d events (index=%d expected=%d)\n",
1340 dropped, actual, expected);
1341
1342 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1343}
1344
1345/* partially received RX was aborted. clean up. */
1346static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1347{
1348 unsigned int rx_desc_ptr;
1349
1350 WARN_ON(rx_queue->scatter_n == 0);
1351
1352 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1353 "scattered RX aborted (dropping %u buffers)\n",
1354 rx_queue->scatter_n);
1355
1356 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1357
1358 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1359 0, EFX_RX_PKT_DISCARD);
1360
1361 rx_queue->removed_count += rx_queue->scatter_n;
1362 rx_queue->scatter_n = 0;
1363 rx_queue->scatter_len = 0;
1364 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1365}
1366
1367static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1368 const efx_qword_t *event)
1369{
1370 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1371 unsigned int n_descs, n_packets, i;
1372 struct efx_nic *efx = channel->efx;
1373 struct efx_rx_queue *rx_queue;
1374 bool rx_cont;
1375 u16 flags = 0;
1376
1377 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1378 return 0;
1379
1380 /* Basic packet information */
1381 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1382 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1383 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1384 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1385 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1386
1387 WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1388
1389 rx_queue = efx_channel_get_rx_queue(channel);
1390
1391 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1392 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1393
1394 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1395 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1396
1397 if (n_descs != rx_queue->scatter_n + 1) {
1398 /* detect rx abort */
1399 if (unlikely(n_descs == rx_queue->scatter_n)) {
1400 WARN_ON(rx_bytes != 0);
1401 efx_ef10_handle_rx_abort(rx_queue);
1402 return 0;
1403 }
1404
1405 if (unlikely(rx_queue->scatter_n != 0)) {
1406 /* Scattered packet completions cannot be
1407 * merged, so something has gone wrong.
1408 */
1409 efx_ef10_handle_rx_bad_lbits(
1410 rx_queue, next_ptr_lbits,
1411 (rx_queue->removed_count +
1412 rx_queue->scatter_n + 1) &
1413 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1414 return 0;
1415 }
1416
1417 /* Merged completion for multiple non-scattered packets */
1418 rx_queue->scatter_n = 1;
1419 rx_queue->scatter_len = 0;
1420 n_packets = n_descs;
1421 ++channel->n_rx_merge_events;
1422 channel->n_rx_merge_packets += n_packets;
1423 flags |= EFX_RX_PKT_PREFIX_LEN;
1424 } else {
1425 ++rx_queue->scatter_n;
1426 rx_queue->scatter_len += rx_bytes;
1427 if (rx_cont)
1428 return 0;
1429 n_packets = 1;
1430 }
1431
1432 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1433 flags |= EFX_RX_PKT_DISCARD;
1434
1435 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1436 channel->n_rx_ip_hdr_chksum_err += n_packets;
1437 } else if (unlikely(EFX_QWORD_FIELD(*event,
1438 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1439 channel->n_rx_tcp_udp_chksum_err += n_packets;
1440 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1441 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1442 flags |= EFX_RX_PKT_CSUMMED;
1443 }
1444
1445 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1446 flags |= EFX_RX_PKT_TCP;
1447
1448 channel->irq_mod_score += 2 * n_packets;
1449
1450 /* Handle received packet(s) */
1451 for (i = 0; i < n_packets; i++) {
1452 efx_rx_packet(rx_queue,
1453 rx_queue->removed_count & rx_queue->ptr_mask,
1454 rx_queue->scatter_n, rx_queue->scatter_len,
1455 flags);
1456 rx_queue->removed_count += rx_queue->scatter_n;
1457 }
1458
1459 rx_queue->scatter_n = 0;
1460 rx_queue->scatter_len = 0;
1461
1462 return n_packets;
1463}
1464
1465static int
1466efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1467{
1468 struct efx_nic *efx = channel->efx;
1469 struct efx_tx_queue *tx_queue;
1470 unsigned int tx_ev_desc_ptr;
1471 unsigned int tx_ev_q_label;
1472 int tx_descs = 0;
1473
1474 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1475 return 0;
1476
1477 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1478 return 0;
1479
1480 /* Transmit completion */
1481 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1482 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1483 tx_queue = efx_channel_get_tx_queue(channel,
1484 tx_ev_q_label % EFX_TXQ_TYPES);
1485 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1486 tx_queue->ptr_mask);
1487 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1488
1489 return tx_descs;
1490}
1491
1492static void
1493efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1494{
1495 struct efx_nic *efx = channel->efx;
1496 int subcode;
1497
1498 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1499
1500 switch (subcode) {
1501 case ESE_DZ_DRV_TIMER_EV:
1502 case ESE_DZ_DRV_WAKE_UP_EV:
1503 break;
1504 case ESE_DZ_DRV_START_UP_EV:
1505 /* event queue init complete. ok. */
1506 break;
1507 default:
1508 netif_err(efx, hw, efx->net_dev,
1509 "channel %d unknown driver event type %d"
1510 " (data " EFX_QWORD_FMT ")\n",
1511 channel->channel, subcode,
1512 EFX_QWORD_VAL(*event));
1513
1514 }
1515}
1516
1517static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1518 efx_qword_t *event)
1519{
1520 struct efx_nic *efx = channel->efx;
1521 u32 subcode;
1522
1523 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1524
1525 switch (subcode) {
1526 case EFX_EF10_TEST:
1527 channel->event_test_cpu = raw_smp_processor_id();
1528 break;
1529 case EFX_EF10_REFILL:
1530 /* The queue must be empty, so we won't receive any rx
1531 * events, so efx_process_channel() won't refill the
1532 * queue. Refill it here
1533 */
1534 efx_fast_push_rx_descriptors(&channel->rx_queue);
1535 break;
1536 default:
1537 netif_err(efx, hw, efx->net_dev,
1538 "channel %d unknown driver event type %u"
1539 " (data " EFX_QWORD_FMT ")\n",
1540 channel->channel, (unsigned) subcode,
1541 EFX_QWORD_VAL(*event));
1542 }
1543}
1544
1545static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1546{
1547 struct efx_nic *efx = channel->efx;
1548 efx_qword_t event, *p_event;
1549 unsigned int read_ptr;
1550 int ev_code;
1551 int tx_descs = 0;
1552 int spent = 0;
1553
1554 read_ptr = channel->eventq_read_ptr;
1555
1556 for (;;) {
1557 p_event = efx_event(channel, read_ptr);
1558 event = *p_event;
1559
1560 if (!efx_event_present(&event))
1561 break;
1562
1563 EFX_SET_QWORD(*p_event);
1564
1565 ++read_ptr;
1566
1567 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1568
1569 netif_vdbg(efx, drv, efx->net_dev,
1570 "processing event on %d " EFX_QWORD_FMT "\n",
1571 channel->channel, EFX_QWORD_VAL(event));
1572
1573 switch (ev_code) {
1574 case ESE_DZ_EV_CODE_MCDI_EV:
1575 efx_mcdi_process_event(channel, &event);
1576 break;
1577 case ESE_DZ_EV_CODE_RX_EV:
1578 spent += efx_ef10_handle_rx_event(channel, &event);
1579 if (spent >= quota) {
1580 /* XXX can we split a merged event to
1581 * avoid going over-quota?
1582 */
1583 spent = quota;
1584 goto out;
1585 }
1586 break;
1587 case ESE_DZ_EV_CODE_TX_EV:
1588 tx_descs += efx_ef10_handle_tx_event(channel, &event);
1589 if (tx_descs > efx->txq_entries) {
1590 spent = quota;
1591 goto out;
1592 } else if (++spent == quota) {
1593 goto out;
1594 }
1595 break;
1596 case ESE_DZ_EV_CODE_DRIVER_EV:
1597 efx_ef10_handle_driver_event(channel, &event);
1598 if (++spent == quota)
1599 goto out;
1600 break;
1601 case EFX_EF10_DRVGEN_EV:
1602 efx_ef10_handle_driver_generated_event(channel, &event);
1603 break;
1604 default:
1605 netif_err(efx, hw, efx->net_dev,
1606 "channel %d unknown event type %d"
1607 " (data " EFX_QWORD_FMT ")\n",
1608 channel->channel, ev_code,
1609 EFX_QWORD_VAL(event));
1610 }
1611 }
1612
1613out:
1614 channel->eventq_read_ptr = read_ptr;
1615 return spent;
1616}
1617
1618static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1619{
1620 struct efx_nic *efx = channel->efx;
1621 efx_dword_t rptr;
1622
1623 if (EFX_EF10_WORKAROUND_35388(efx)) {
1624 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1625 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1626 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1627 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
1628
1629 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1630 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
1631 ERF_DD_EVQ_IND_RPTR,
1632 (channel->eventq_read_ptr &
1633 channel->eventq_mask) >>
1634 ERF_DD_EVQ_IND_RPTR_WIDTH);
1635 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1636 channel->channel);
1637 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1638 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
1639 ERF_DD_EVQ_IND_RPTR,
1640 channel->eventq_read_ptr &
1641 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
1642 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1643 channel->channel);
1644 } else {
1645 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
1646 channel->eventq_read_ptr &
1647 channel->eventq_mask);
1648 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
1649 }
1650}
1651
1652static void efx_ef10_ev_test_generate(struct efx_channel *channel)
1653{
1654 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1655 struct efx_nic *efx = channel->efx;
1656 efx_qword_t event;
1657 int rc;
1658
1659 EFX_POPULATE_QWORD_2(event,
1660 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1661 ESF_DZ_EV_DATA, EFX_EF10_TEST);
1662
1663 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1664
1665 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1666 * already swapped the data to little-endian order.
1667 */
1668 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1669 sizeof(efx_qword_t));
1670
1671 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
1672 NULL, 0, NULL);
1673 if (rc != 0)
1674 goto fail;
1675
1676 return;
1677
1678fail:
1679 WARN_ON(true);
1680 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1681}
1682
1683void efx_ef10_handle_drain_event(struct efx_nic *efx)
1684{
1685 if (atomic_dec_and_test(&efx->active_queues))
1686 wake_up(&efx->flush_wq);
1687
1688 WARN_ON(atomic_read(&efx->active_queues) < 0);
1689}
1690
1691static int efx_ef10_fini_dmaq(struct efx_nic *efx)
1692{
1693 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1694 struct efx_channel *channel;
1695 struct efx_tx_queue *tx_queue;
1696 struct efx_rx_queue *rx_queue;
1697 int pending;
1698
1699 /* If the MC has just rebooted, the TX/RX queues will have already been
1700 * torn down, but efx->active_queues needs to be set to zero.
1701 */
1702 if (nic_data->must_realloc_vis) {
1703 atomic_set(&efx->active_queues, 0);
1704 return 0;
1705 }
1706
1707 /* Do not attempt to write to the NIC during EEH recovery */
1708 if (efx->state != STATE_RECOVERY) {
1709 efx_for_each_channel(channel, efx) {
1710 efx_for_each_channel_rx_queue(rx_queue, channel)
1711 efx_ef10_rx_fini(rx_queue);
1712 efx_for_each_channel_tx_queue(tx_queue, channel)
1713 efx_ef10_tx_fini(tx_queue);
1714 }
1715
1716 wait_event_timeout(efx->flush_wq,
1717 atomic_read(&efx->active_queues) == 0,
1718 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
1719 pending = atomic_read(&efx->active_queues);
1720 if (pending) {
1721 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
1722 pending);
1723 return -ETIMEDOUT;
1724 }
1725 }
1726
1727 return 0;
1728}
1729
1730static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
1731 const struct efx_filter_spec *right)
1732{
1733 if ((left->match_flags ^ right->match_flags) |
1734 ((left->flags ^ right->flags) &
1735 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
1736 return false;
1737
1738 return memcmp(&left->outer_vid, &right->outer_vid,
1739 sizeof(struct efx_filter_spec) -
1740 offsetof(struct efx_filter_spec, outer_vid)) == 0;
1741}
1742
1743static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
1744{
1745 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
1746 return jhash2((const u32 *)&spec->outer_vid,
1747 (sizeof(struct efx_filter_spec) -
1748 offsetof(struct efx_filter_spec, outer_vid)) / 4,
1749 0);
1750 /* XXX should we randomise the initval? */
1751}
1752
1753/* Decide whether a filter should be exclusive or else should allow
1754 * delivery to additional recipients. Currently we decide that
1755 * filters for specific local unicast MAC and IP addresses are
1756 * exclusive.
1757 */
1758static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
1759{
1760 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
1761 !is_multicast_ether_addr(spec->loc_mac))
1762 return true;
1763
1764 if ((spec->match_flags &
1765 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1766 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1767 if (spec->ether_type == htons(ETH_P_IP) &&
1768 !ipv4_is_multicast(spec->loc_host[0]))
1769 return true;
1770 if (spec->ether_type == htons(ETH_P_IPV6) &&
1771 ((const u8 *)spec->loc_host)[0] != 0xff)
1772 return true;
1773 }
1774
1775 return false;
1776}
1777
1778static struct efx_filter_spec *
1779efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
1780 unsigned int filter_idx)
1781{
1782 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
1783 ~EFX_EF10_FILTER_FLAGS);
1784}
1785
1786static unsigned int
1787efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
1788 unsigned int filter_idx)
1789{
1790 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
1791}
1792
1793static void
1794efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
1795 unsigned int filter_idx,
1796 const struct efx_filter_spec *spec,
1797 unsigned int flags)
1798{
1799 table->entry[filter_idx].spec = (unsigned long)spec | flags;
1800}
1801
1802static void efx_ef10_filter_push_prep(struct efx_nic *efx,
1803 const struct efx_filter_spec *spec,
1804 efx_dword_t *inbuf, u64 handle,
1805 bool replacing)
1806{
1807 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1808
1809 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
1810
1811 if (replacing) {
1812 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1813 MC_CMD_FILTER_OP_IN_OP_REPLACE);
1814 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
1815 } else {
1816 u32 match_fields = 0;
1817
1818 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1819 efx_ef10_filter_is_exclusive(spec) ?
1820 MC_CMD_FILTER_OP_IN_OP_INSERT :
1821 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
1822
1823 /* Convert match flags and values. Unlike almost
1824 * everything else in MCDI, these fields are in
1825 * network byte order.
1826 */
1827 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
1828 match_fields |=
1829 is_multicast_ether_addr(spec->loc_mac) ?
1830 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
1831 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
1832#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
1833 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
1834 match_fields |= \
1835 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
1836 mcdi_field ## _LBN; \
1837 BUILD_BUG_ON( \
1838 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
1839 sizeof(spec->gen_field)); \
1840 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
1841 &spec->gen_field, sizeof(spec->gen_field)); \
1842 }
1843 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
1844 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
1845 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
1846 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
1847 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
1848 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
1849 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
1850 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
1851 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
1852 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
1853#undef COPY_FIELD
1854 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
1855 match_fields);
1856 }
1857
1858 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1859 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
1860 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
1861 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
1862 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
1863 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
1864 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
1865 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
1866 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
1867 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
1868 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
1869 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
1870 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
1871 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
1872 spec->rss_context !=
1873 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
1874 spec->rss_context : nic_data->rx_rss_context);
1875}
1876
1877static int efx_ef10_filter_push(struct efx_nic *efx,
1878 const struct efx_filter_spec *spec,
1879 u64 *handle, bool replacing)
1880{
1881 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
1882 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
1883 int rc;
1884
1885 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
1886 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
1887 outbuf, sizeof(outbuf), NULL);
1888 if (rc == 0)
1889 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
1890 return rc;
1891}
1892
1893static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
1894 enum efx_filter_match_flags match_flags)
1895{
1896 unsigned int match_pri;
1897
1898 for (match_pri = 0;
1899 match_pri < table->rx_match_count;
1900 match_pri++)
1901 if (table->rx_match_flags[match_pri] == match_flags)
1902 return match_pri;
1903
1904 return -EPROTONOSUPPORT;
1905}
1906
1907static s32 efx_ef10_filter_insert(struct efx_nic *efx,
1908 struct efx_filter_spec *spec,
1909 bool replace_equal)
1910{
1911 struct efx_ef10_filter_table *table = efx->filter_state;
1912 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1913 struct efx_filter_spec *saved_spec;
1914 unsigned int match_pri, hash;
1915 unsigned int priv_flags;
1916 bool replacing = false;
1917 int ins_index = -1;
1918 DEFINE_WAIT(wait);
1919 bool is_mc_recip;
1920 s32 rc;
1921
1922 /* For now, only support RX filters */
1923 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
1924 EFX_FILTER_FLAG_RX)
1925 return -EINVAL;
1926
1927 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
1928 if (rc < 0)
1929 return rc;
1930 match_pri = rc;
1931
1932 hash = efx_ef10_filter_hash(spec);
1933 is_mc_recip = efx_filter_is_mc_recipient(spec);
1934 if (is_mc_recip)
1935 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1936
1937 /* Find any existing filters with the same match tuple or
1938 * else a free slot to insert at. If any of them are busy,
1939 * we have to wait and retry.
1940 */
1941 for (;;) {
1942 unsigned int depth = 1;
1943 unsigned int i;
1944
1945 spin_lock_bh(&efx->filter_lock);
1946
1947 for (;;) {
1948 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
1949 saved_spec = efx_ef10_filter_entry_spec(table, i);
1950
1951 if (!saved_spec) {
1952 if (ins_index < 0)
1953 ins_index = i;
1954 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
1955 if (table->entry[i].spec &
1956 EFX_EF10_FILTER_FLAG_BUSY)
1957 break;
1958 if (spec->priority < saved_spec->priority &&
1959 !(saved_spec->priority ==
1960 EFX_FILTER_PRI_REQUIRED &&
1961 saved_spec->flags &
1962 EFX_FILTER_FLAG_RX_STACK)) {
1963 rc = -EPERM;
1964 goto out_unlock;
1965 }
1966 if (!is_mc_recip) {
1967 /* This is the only one */
1968 if (spec->priority ==
1969 saved_spec->priority &&
1970 !replace_equal) {
1971 rc = -EEXIST;
1972 goto out_unlock;
1973 }
1974 ins_index = i;
1975 goto found;
1976 } else if (spec->priority >
1977 saved_spec->priority ||
1978 (spec->priority ==
1979 saved_spec->priority &&
1980 replace_equal)) {
1981 if (ins_index < 0)
1982 ins_index = i;
1983 else
1984 __set_bit(depth, mc_rem_map);
1985 }
1986 }
1987
1988 /* Once we reach the maximum search depth, use
1989 * the first suitable slot or return -EBUSY if
1990 * there was none
1991 */
1992 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
1993 if (ins_index < 0) {
1994 rc = -EBUSY;
1995 goto out_unlock;
1996 }
1997 goto found;
1998 }
1999
2000 ++depth;
2001 }
2002
2003 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2004 spin_unlock_bh(&efx->filter_lock);
2005 schedule();
2006 }
2007
2008found:
2009 /* Create a software table entry if necessary, and mark it
2010 * busy. We might yet fail to insert, but any attempt to
2011 * insert a conflicting filter while we're waiting for the
2012 * firmware must find the busy entry.
2013 */
2014 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2015 if (saved_spec) {
2016 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2017 /* Just make sure it won't be removed */
2018 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2019 table->entry[ins_index].spec &=
2020 ~EFX_EF10_FILTER_FLAG_STACK_OLD;
2021 rc = ins_index;
2022 goto out_unlock;
2023 }
2024 replacing = true;
2025 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2026 } else {
2027 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2028 if (!saved_spec) {
2029 rc = -ENOMEM;
2030 goto out_unlock;
2031 }
2032 *saved_spec = *spec;
2033 priv_flags = 0;
2034 }
2035 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2036 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2037
2038 /* Mark lower-priority multicast recipients busy prior to removal */
2039 if (is_mc_recip) {
2040 unsigned int depth, i;
2041
2042 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2043 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2044 if (test_bit(depth, mc_rem_map))
2045 table->entry[i].spec |=
2046 EFX_EF10_FILTER_FLAG_BUSY;
2047 }
2048 }
2049
2050 spin_unlock_bh(&efx->filter_lock);
2051
2052 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2053 replacing);
2054
2055 /* Finalise the software table entry */
2056 spin_lock_bh(&efx->filter_lock);
2057 if (rc == 0) {
2058 if (replacing) {
2059 /* Update the fields that may differ */
2060 saved_spec->priority = spec->priority;
2061 saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2062 saved_spec->flags |= spec->flags;
2063 saved_spec->rss_context = spec->rss_context;
2064 saved_spec->dmaq_id = spec->dmaq_id;
2065 }
2066 } else if (!replacing) {
2067 kfree(saved_spec);
2068 saved_spec = NULL;
2069 }
2070 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2071
2072 /* Remove and finalise entries for lower-priority multicast
2073 * recipients
2074 */
2075 if (is_mc_recip) {
2076 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2077 unsigned int depth, i;
2078
2079 memset(inbuf, 0, sizeof(inbuf));
2080
2081 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2082 if (!test_bit(depth, mc_rem_map))
2083 continue;
2084
2085 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2086 saved_spec = efx_ef10_filter_entry_spec(table, i);
2087 priv_flags = efx_ef10_filter_entry_flags(table, i);
2088
2089 if (rc == 0) {
2090 spin_unlock_bh(&efx->filter_lock);
2091 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2092 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2093 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2094 table->entry[i].handle);
2095 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2096 inbuf, sizeof(inbuf),
2097 NULL, 0, NULL);
2098 spin_lock_bh(&efx->filter_lock);
2099 }
2100
2101 if (rc == 0) {
2102 kfree(saved_spec);
2103 saved_spec = NULL;
2104 priv_flags = 0;
2105 } else {
2106 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2107 }
2108 efx_ef10_filter_set_entry(table, i, saved_spec,
2109 priv_flags);
2110 }
2111 }
2112
2113 /* If successful, return the inserted filter ID */
2114 if (rc == 0)
2115 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2116
2117 wake_up_all(&table->waitq);
2118out_unlock:
2119 spin_unlock_bh(&efx->filter_lock);
2120 finish_wait(&table->waitq, &wait);
2121 return rc;
2122}
2123
2124void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2125{
2126 /* no need to do anything here on EF10 */
2127}
2128
2129/* Remove a filter.
2130 * If !stack_requested, remove by ID
2131 * If stack_requested, remove by index
2132 * Filter ID may come from userland and must be range-checked.
2133 */
2134static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2135 enum efx_filter_priority priority,
2136 u32 filter_id, bool stack_requested)
2137{
2138 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2139 struct efx_ef10_filter_table *table = efx->filter_state;
2140 MCDI_DECLARE_BUF(inbuf,
2141 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2142 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2143 struct efx_filter_spec *spec;
2144 DEFINE_WAIT(wait);
2145 int rc;
2146
2147 /* Find the software table entry and mark it busy. Don't
2148 * remove it yet; any attempt to update while we're waiting
2149 * for the firmware must find the busy entry.
2150 */
2151 for (;;) {
2152 spin_lock_bh(&efx->filter_lock);
2153 if (!(table->entry[filter_idx].spec &
2154 EFX_EF10_FILTER_FLAG_BUSY))
2155 break;
2156 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2157 spin_unlock_bh(&efx->filter_lock);
2158 schedule();
2159 }
2160 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2161 if (!spec || spec->priority > priority ||
2162 (!stack_requested &&
2163 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2164 filter_id / HUNT_FILTER_TBL_ROWS)) {
2165 rc = -ENOENT;
2166 goto out_unlock;
2167 }
2168 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2169 spin_unlock_bh(&efx->filter_lock);
2170
2171 if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2172 /* Reset steering of a stack-owned filter */
2173
2174 struct efx_filter_spec new_spec = *spec;
2175
2176 new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2177 new_spec.flags = (EFX_FILTER_FLAG_RX |
2178 EFX_FILTER_FLAG_RX_RSS |
2179 EFX_FILTER_FLAG_RX_STACK);
2180 new_spec.dmaq_id = 0;
2181 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2182 rc = efx_ef10_filter_push(efx, &new_spec,
2183 &table->entry[filter_idx].handle,
2184 true);
2185
2186 spin_lock_bh(&efx->filter_lock);
2187 if (rc == 0)
2188 *spec = new_spec;
2189 } else {
2190 /* Really remove the filter */
2191
2192 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2193 efx_ef10_filter_is_exclusive(spec) ?
2194 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2195 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2196 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2197 table->entry[filter_idx].handle);
2198 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2199 inbuf, sizeof(inbuf), NULL, 0, NULL);
2200
2201 spin_lock_bh(&efx->filter_lock);
2202 if (rc == 0) {
2203 kfree(spec);
2204 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2205 }
2206 }
2207 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2208 wake_up_all(&table->waitq);
2209out_unlock:
2210 spin_unlock_bh(&efx->filter_lock);
2211 finish_wait(&table->waitq, &wait);
2212 return rc;
2213}
2214
2215static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2216 enum efx_filter_priority priority,
2217 u32 filter_id)
2218{
2219 return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2220}
2221
2222static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2223 enum efx_filter_priority priority,
2224 u32 filter_id, struct efx_filter_spec *spec)
2225{
2226 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2227 struct efx_ef10_filter_table *table = efx->filter_state;
2228 const struct efx_filter_spec *saved_spec;
2229 int rc;
2230
2231 spin_lock_bh(&efx->filter_lock);
2232 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2233 if (saved_spec && saved_spec->priority == priority &&
2234 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2235 filter_id / HUNT_FILTER_TBL_ROWS) {
2236 *spec = *saved_spec;
2237 rc = 0;
2238 } else {
2239 rc = -ENOENT;
2240 }
2241 spin_unlock_bh(&efx->filter_lock);
2242 return rc;
2243}
2244
2245static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2246 enum efx_filter_priority priority)
2247{
2248 /* TODO */
2249}
2250
2251static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2252 enum efx_filter_priority priority)
2253{
2254 struct efx_ef10_filter_table *table = efx->filter_state;
2255 unsigned int filter_idx;
2256 s32 count = 0;
2257
2258 spin_lock_bh(&efx->filter_lock);
2259 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2260 if (table->entry[filter_idx].spec &&
2261 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2262 priority)
2263 ++count;
2264 }
2265 spin_unlock_bh(&efx->filter_lock);
2266 return count;
2267}
2268
2269static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2270{
2271 struct efx_ef10_filter_table *table = efx->filter_state;
2272
2273 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2274}
2275
2276static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2277 enum efx_filter_priority priority,
2278 u32 *buf, u32 size)
2279{
2280 struct efx_ef10_filter_table *table = efx->filter_state;
2281 struct efx_filter_spec *spec;
2282 unsigned int filter_idx;
2283 s32 count = 0;
2284
2285 spin_lock_bh(&efx->filter_lock);
2286 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2287 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2288 if (spec && spec->priority == priority) {
2289 if (count == size) {
2290 count = -EMSGSIZE;
2291 break;
2292 }
2293 buf[count++] = (efx_ef10_filter_rx_match_pri(
2294 table, spec->match_flags) *
2295 HUNT_FILTER_TBL_ROWS +
2296 filter_idx);
2297 }
2298 }
2299 spin_unlock_bh(&efx->filter_lock);
2300 return count;
2301}
2302
2303#ifdef CONFIG_RFS_ACCEL
2304
2305static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2306
2307static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2308 struct efx_filter_spec *spec)
2309{
2310 struct efx_ef10_filter_table *table = efx->filter_state;
2311 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2312 struct efx_filter_spec *saved_spec;
2313 unsigned int hash, i, depth = 1;
2314 bool replacing = false;
2315 int ins_index = -1;
2316 u64 cookie;
2317 s32 rc;
2318
2319 /* Must be an RX filter without RSS and not for a multicast
2320 * destination address (RFS only works for connected sockets).
2321 * These restrictions allow us to pass only a tiny amount of
2322 * data through to the completion function.
2323 */
2324 EFX_WARN_ON_PARANOID(spec->flags !=
2325 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2326 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2327 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2328
2329 hash = efx_ef10_filter_hash(spec);
2330
2331 spin_lock_bh(&efx->filter_lock);
2332
2333 /* Find any existing filter with the same match tuple or else
2334 * a free slot to insert at. If an existing filter is busy,
2335 * we have to give up.
2336 */
2337 for (;;) {
2338 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2339 saved_spec = efx_ef10_filter_entry_spec(table, i);
2340
2341 if (!saved_spec) {
2342 if (ins_index < 0)
2343 ins_index = i;
2344 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2345 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2346 rc = -EBUSY;
2347 goto fail_unlock;
2348 }
2349 EFX_WARN_ON_PARANOID(saved_spec->flags &
2350 EFX_FILTER_FLAG_RX_STACK);
2351 if (spec->priority < saved_spec->priority) {
2352 rc = -EPERM;
2353 goto fail_unlock;
2354 }
2355 ins_index = i;
2356 break;
2357 }
2358
2359 /* Once we reach the maximum search depth, use the
2360 * first suitable slot or return -EBUSY if there was
2361 * none
2362 */
2363 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2364 if (ins_index < 0) {
2365 rc = -EBUSY;
2366 goto fail_unlock;
2367 }
2368 break;
2369 }
2370
2371 ++depth;
2372 }
2373
2374 /* Create a software table entry if necessary, and mark it
2375 * busy. We might yet fail to insert, but any attempt to
2376 * insert a conflicting filter while we're waiting for the
2377 * firmware must find the busy entry.
2378 */
2379 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2380 if (saved_spec) {
2381 replacing = true;
2382 } else {
2383 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2384 if (!saved_spec) {
2385 rc = -ENOMEM;
2386 goto fail_unlock;
2387 }
2388 *saved_spec = *spec;
2389 }
2390 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2391 EFX_EF10_FILTER_FLAG_BUSY);
2392
2393 spin_unlock_bh(&efx->filter_lock);
2394
2395 /* Pack up the variables needed on completion */
2396 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2397
2398 efx_ef10_filter_push_prep(efx, spec, inbuf,
2399 table->entry[ins_index].handle, replacing);
2400 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2401 MC_CMD_FILTER_OP_OUT_LEN,
2402 efx_ef10_filter_rfs_insert_complete, cookie);
2403
2404 return ins_index;
2405
2406fail_unlock:
2407 spin_unlock_bh(&efx->filter_lock);
2408 return rc;
2409}
2410
2411static void
2412efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2413 int rc, efx_dword_t *outbuf,
2414 size_t outlen_actual)
2415{
2416 struct efx_ef10_filter_table *table = efx->filter_state;
2417 unsigned int ins_index, dmaq_id;
2418 struct efx_filter_spec *spec;
2419 bool replacing;
2420
2421 /* Unpack the cookie */
2422 replacing = cookie >> 31;
2423 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2424 dmaq_id = cookie & 0xffff;
2425
2426 spin_lock_bh(&efx->filter_lock);
2427 spec = efx_ef10_filter_entry_spec(table, ins_index);
2428 if (rc == 0) {
2429 table->entry[ins_index].handle =
2430 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2431 if (replacing)
2432 spec->dmaq_id = dmaq_id;
2433 } else if (!replacing) {
2434 kfree(spec);
2435 spec = NULL;
2436 }
2437 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2438 spin_unlock_bh(&efx->filter_lock);
2439
2440 wake_up_all(&table->waitq);
2441}
2442
2443static void
2444efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2445 unsigned long filter_idx,
2446 int rc, efx_dword_t *outbuf,
2447 size_t outlen_actual);
2448
2449static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2450 unsigned int filter_idx)
2451{
2452 struct efx_ef10_filter_table *table = efx->filter_state;
2453 struct efx_filter_spec *spec =
2454 efx_ef10_filter_entry_spec(table, filter_idx);
2455 MCDI_DECLARE_BUF(inbuf,
2456 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2457 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2458
2459 if (!spec ||
2460 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2461 spec->priority != EFX_FILTER_PRI_HINT ||
2462 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2463 flow_id, filter_idx))
2464 return false;
2465
2466 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2467 MC_CMD_FILTER_OP_IN_OP_REMOVE);
2468 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2469 table->entry[filter_idx].handle);
2470 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2471 efx_ef10_filter_rfs_expire_complete, filter_idx))
2472 return false;
2473
2474 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2475 return true;
2476}
2477
2478static void
2479efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2480 unsigned long filter_idx,
2481 int rc, efx_dword_t *outbuf,
2482 size_t outlen_actual)
2483{
2484 struct efx_ef10_filter_table *table = efx->filter_state;
2485 struct efx_filter_spec *spec =
2486 efx_ef10_filter_entry_spec(table, filter_idx);
2487
2488 spin_lock_bh(&efx->filter_lock);
2489 if (rc == 0) {
2490 kfree(spec);
2491 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2492 }
2493 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2494 wake_up_all(&table->waitq);
2495 spin_unlock_bh(&efx->filter_lock);
2496}
2497
2498#endif /* CONFIG_RFS_ACCEL */
2499
2500static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2501{
2502 int match_flags = 0;
2503
2504#define MAP_FLAG(gen_flag, mcdi_field) { \
2505 u32 old_mcdi_flags = mcdi_flags; \
2506 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2507 mcdi_field ## _LBN); \
2508 if (mcdi_flags != old_mcdi_flags) \
2509 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2510 }
2511 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2512 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2513 MAP_FLAG(REM_HOST, SRC_IP);
2514 MAP_FLAG(LOC_HOST, DST_IP);
2515 MAP_FLAG(REM_MAC, SRC_MAC);
2516 MAP_FLAG(REM_PORT, SRC_PORT);
2517 MAP_FLAG(LOC_MAC, DST_MAC);
2518 MAP_FLAG(LOC_PORT, DST_PORT);
2519 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2520 MAP_FLAG(INNER_VID, INNER_VLAN);
2521 MAP_FLAG(OUTER_VID, OUTER_VLAN);
2522 MAP_FLAG(IP_PROTO, IP_PROTO);
2523#undef MAP_FLAG
2524
2525 /* Did we map them all? */
2526 if (mcdi_flags)
2527 return -EINVAL;
2528
2529 return match_flags;
2530}
2531
2532static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2533{
2534 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2535 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2536 unsigned int pd_match_pri, pd_match_count;
2537 struct efx_ef10_filter_table *table;
2538 size_t outlen;
2539 int rc;
2540
2541 table = kzalloc(sizeof(*table), GFP_KERNEL);
2542 if (!table)
2543 return -ENOMEM;
2544
2545 /* Find out which RX filter types are supported, and their priorities */
2546 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2547 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2548 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2549 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2550 &outlen);
2551 if (rc)
2552 goto fail;
2553 pd_match_count = MCDI_VAR_ARRAY_LEN(
2554 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2555 table->rx_match_count = 0;
2556
2557 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2558 u32 mcdi_flags =
2559 MCDI_ARRAY_DWORD(
2560 outbuf,
2561 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2562 pd_match_pri);
2563 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2564 if (rc < 0) {
2565 netif_dbg(efx, probe, efx->net_dev,
2566 "%s: fw flags %#x pri %u not supported in driver\n",
2567 __func__, mcdi_flags, pd_match_pri);
2568 } else {
2569 netif_dbg(efx, probe, efx->net_dev,
2570 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2571 __func__, mcdi_flags, pd_match_pri,
2572 rc, table->rx_match_count);
2573 table->rx_match_flags[table->rx_match_count++] = rc;
2574 }
2575 }
2576
2577 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2578 if (!table->entry) {
2579 rc = -ENOMEM;
2580 goto fail;
2581 }
2582
2583 efx->filter_state = table;
2584 init_waitqueue_head(&table->waitq);
2585 return 0;
2586
2587fail:
2588 kfree(table);
2589 return rc;
2590}
2591
2592static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2593{
2594 struct efx_ef10_filter_table *table = efx->filter_state;
2595 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2596 struct efx_filter_spec *spec;
2597 unsigned int filter_idx;
2598 bool failed = false;
2599 int rc;
2600
2601 if (!nic_data->must_restore_filters)
2602 return;
2603
2604 spin_lock_bh(&efx->filter_lock);
2605
2606 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2607 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2608 if (!spec)
2609 continue;
2610
2611 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2612 spin_unlock_bh(&efx->filter_lock);
2613
2614 rc = efx_ef10_filter_push(efx, spec,
2615 &table->entry[filter_idx].handle,
2616 false);
2617 if (rc)
2618 failed = true;
2619
2620 spin_lock_bh(&efx->filter_lock);
2621 if (rc) {
2622 kfree(spec);
2623 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2624 } else {
2625 table->entry[filter_idx].spec &=
2626 ~EFX_EF10_FILTER_FLAG_BUSY;
2627 }
2628 }
2629
2630 spin_unlock_bh(&efx->filter_lock);
2631
2632 if (failed)
2633 netif_err(efx, hw, efx->net_dev,
2634 "unable to restore all filters\n");
2635 else
2636 nic_data->must_restore_filters = false;
2637}
2638
2639static void efx_ef10_filter_table_remove(struct efx_nic *efx)
2640{
2641 struct efx_ef10_filter_table *table = efx->filter_state;
2642 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2643 struct efx_filter_spec *spec;
2644 unsigned int filter_idx;
2645 int rc;
2646
2647 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2648 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2649 if (!spec)
2650 continue;
2651
2652 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2653 efx_ef10_filter_is_exclusive(spec) ?
2654 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2655 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2656 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2657 table->entry[filter_idx].handle);
2658 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2659 NULL, 0, NULL);
2660
2661 WARN_ON(rc != 0);
2662 kfree(spec);
2663 }
2664
2665 vfree(table->entry);
2666 kfree(table);
2667}
2668
2669static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
2670{
2671 struct efx_ef10_filter_table *table = efx->filter_state;
2672 struct net_device *net_dev = efx->net_dev;
2673 struct efx_filter_spec spec;
2674 bool remove_failed = false;
2675 struct netdev_hw_addr *uc;
2676 struct netdev_hw_addr *mc;
2677 unsigned int filter_idx;
2678 int i, n, rc;
2679
2680 if (!efx_dev_registered(efx))
2681 return;
2682
2683 /* Mark old filters that may need to be removed */
2684 spin_lock_bh(&efx->filter_lock);
2685 n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
2686 for (i = 0; i < n; i++) {
2687 filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
2688 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2689 }
2690 n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
2691 for (i = 0; i < n; i++) {
2692 filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
2693 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2694 }
2695 spin_unlock_bh(&efx->filter_lock);
2696
2697 /* Copy/convert the address lists; add the primary station
2698 * address and broadcast address
2699 */
2700 netif_addr_lock_bh(net_dev);
2701 if (net_dev->flags & IFF_PROMISC ||
2702 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
2703 table->stack_uc_count = -1;
2704 } else {
2705 table->stack_uc_count = 1 + netdev_uc_count(net_dev);
2706 memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
2707 ETH_ALEN);
2708 i = 1;
2709 netdev_for_each_uc_addr(uc, net_dev) {
2710 memcpy(table->stack_uc_list[i].addr,
2711 uc->addr, ETH_ALEN);
2712 i++;
2713 }
2714 }
2715 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
2716 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
2717 table->stack_mc_count = -1;
2718 } else {
2719 table->stack_mc_count = 1 + netdev_mc_count(net_dev);
2720 eth_broadcast_addr(table->stack_mc_list[0].addr);
2721 i = 1;
2722 netdev_for_each_mc_addr(mc, net_dev) {
2723 memcpy(table->stack_mc_list[i].addr,
2724 mc->addr, ETH_ALEN);
2725 i++;
2726 }
2727 }
2728 netif_addr_unlock_bh(net_dev);
2729
2730 /* Insert/renew unicast filters */
2731 if (table->stack_uc_count >= 0) {
2732 for (i = 0; i < table->stack_uc_count; i++) {
2733 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2734 EFX_FILTER_FLAG_RX_RSS |
2735 EFX_FILTER_FLAG_RX_STACK,
2736 0);
2737 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2738 table->stack_uc_list[i].addr);
2739 rc = efx_ef10_filter_insert(efx, &spec, true);
2740 if (rc < 0) {
2741 /* Fall back to unicast-promisc */
2742 while (i--)
2743 efx_ef10_filter_remove_safe(
2744 efx, EFX_FILTER_PRI_REQUIRED,
2745 table->stack_uc_list[i].id);
2746 table->stack_uc_count = -1;
2747 break;
2748 }
2749 table->stack_uc_list[i].id = rc;
2750 }
2751 }
2752 if (table->stack_uc_count < 0) {
2753 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2754 EFX_FILTER_FLAG_RX_RSS |
2755 EFX_FILTER_FLAG_RX_STACK,
2756 0);
2757 efx_filter_set_uc_def(&spec);
2758 rc = efx_ef10_filter_insert(efx, &spec, true);
2759 if (rc < 0) {
2760 WARN_ON(1);
2761 table->stack_uc_count = 0;
2762 } else {
2763 table->stack_uc_list[0].id = rc;
2764 }
2765 }
2766
2767 /* Insert/renew multicast filters */
2768 if (table->stack_mc_count >= 0) {
2769 for (i = 0; i < table->stack_mc_count; i++) {
2770 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2771 EFX_FILTER_FLAG_RX_RSS |
2772 EFX_FILTER_FLAG_RX_STACK,
2773 0);
2774 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2775 table->stack_mc_list[i].addr);
2776 rc = efx_ef10_filter_insert(efx, &spec, true);
2777 if (rc < 0) {
2778 /* Fall back to multicast-promisc */
2779 while (i--)
2780 efx_ef10_filter_remove_safe(
2781 efx, EFX_FILTER_PRI_REQUIRED,
2782 table->stack_mc_list[i].id);
2783 table->stack_mc_count = -1;
2784 break;
2785 }
2786 table->stack_mc_list[i].id = rc;
2787 }
2788 }
2789 if (table->stack_mc_count < 0) {
2790 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2791 EFX_FILTER_FLAG_RX_RSS |
2792 EFX_FILTER_FLAG_RX_STACK,
2793 0);
2794 efx_filter_set_mc_def(&spec);
2795 rc = efx_ef10_filter_insert(efx, &spec, true);
2796 if (rc < 0) {
2797 WARN_ON(1);
2798 table->stack_mc_count = 0;
2799 } else {
2800 table->stack_mc_list[0].id = rc;
2801 }
2802 }
2803
2804 /* Remove filters that weren't renewed. Since nothing else
2805 * changes the STACK_OLD flag or removes these filters, we
2806 * don't need to hold the filter_lock while scanning for
2807 * these filters.
2808 */
2809 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2810 if (ACCESS_ONCE(table->entry[i].spec) &
2811 EFX_EF10_FILTER_FLAG_STACK_OLD) {
2812 if (efx_ef10_filter_remove_internal(efx,
2813 EFX_FILTER_PRI_REQUIRED,
2814 i, true) < 0)
2815 remove_failed = true;
2816 }
2817 }
2818 WARN_ON(remove_failed);
2819}
2820
2821static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
2822{
2823 efx_ef10_filter_sync_rx_mode(efx);
2824
2825 return efx_mcdi_set_mac(efx);
2826}
2827
2828#ifdef CONFIG_SFC_MTD
2829
2830struct efx_ef10_nvram_type_info {
2831 u16 type, type_mask;
2832 u8 port;
2833 const char *name;
2834};
2835
2836static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
2837 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
2838 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
2839 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
2840 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
2841 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
2842 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
2843 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
2844 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
2845 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
2846 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
2847};
2848
2849static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
2850 struct efx_mcdi_mtd_partition *part,
2851 unsigned int type)
2852{
2853 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2854 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
2855 const struct efx_ef10_nvram_type_info *info;
2856 size_t size, erase_size, outlen;
2857 bool protected;
2858 int rc;
2859
2860 for (info = efx_ef10_nvram_types; ; info++) {
2861 if (info ==
2862 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
2863 return -ENODEV;
2864 if ((type & ~info->type_mask) == info->type)
2865 break;
2866 }
2867 if (info->port != efx_port_num(efx))
2868 return -ENODEV;
2869
2870 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
2871 if (rc)
2872 return rc;
2873 if (protected)
2874 return -ENODEV; /* hide it */
2875
2876 part->nvram_type = type;
2877
2878 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2879 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
2880 outbuf, sizeof(outbuf), &outlen);
2881 if (rc)
2882 return rc;
2883 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
2884 return -EIO;
2885 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
2886 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2887 part->fw_subtype = MCDI_DWORD(outbuf,
2888 NVRAM_METADATA_OUT_SUBTYPE);
2889
2890 part->common.dev_type_name = "EF10 NVRAM manager";
2891 part->common.type_name = info->name;
2892
2893 part->common.mtd.type = MTD_NORFLASH;
2894 part->common.mtd.flags = MTD_CAP_NORFLASH;
2895 part->common.mtd.size = size;
2896 part->common.mtd.erasesize = erase_size;
2897
2898 return 0;
2899}
2900
2901static int efx_ef10_mtd_probe(struct efx_nic *efx)
2902{
2903 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
2904 struct efx_mcdi_mtd_partition *parts;
2905 size_t outlen, n_parts_total, i, n_parts;
2906 unsigned int type;
2907 int rc;
2908
2909 ASSERT_RTNL();
2910
2911 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
2912 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
2913 outbuf, sizeof(outbuf), &outlen);
2914 if (rc)
2915 return rc;
2916 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
2917 return -EIO;
2918
2919 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
2920 if (n_parts_total >
2921 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
2922 return -EIO;
2923
2924 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
2925 if (!parts)
2926 return -ENOMEM;
2927
2928 n_parts = 0;
2929 for (i = 0; i < n_parts_total; i++) {
2930 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
2931 i);
2932 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
2933 if (rc == 0)
2934 n_parts++;
2935 else if (rc != -ENODEV)
2936 goto fail;
2937 }
2938
2939 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
2940fail:
2941 if (rc)
2942 kfree(parts);
2943 return rc;
2944}
2945
2946#endif /* CONFIG_SFC_MTD */
2947
2948static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
2949{
2950 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
2951}
2952
2953const struct efx_nic_type efx_hunt_a0_nic_type = {
2954 .mem_map_size = efx_ef10_mem_map_size,
2955 .probe = efx_ef10_probe,
2956 .remove = efx_ef10_remove,
2957 .dimension_resources = efx_ef10_dimension_resources,
2958 .init = efx_ef10_init_nic,
2959 .fini = efx_port_dummy_op_void,
2960 .map_reset_reason = efx_mcdi_map_reset_reason,
2961 .map_reset_flags = efx_ef10_map_reset_flags,
2962 .reset = efx_mcdi_reset,
2963 .probe_port = efx_mcdi_port_probe,
2964 .remove_port = efx_mcdi_port_remove,
2965 .fini_dmaq = efx_ef10_fini_dmaq,
2966 .describe_stats = efx_ef10_describe_stats,
2967 .update_stats = efx_ef10_update_stats,
2968 .start_stats = efx_mcdi_mac_start_stats,
2969 .stop_stats = efx_mcdi_mac_stop_stats,
2970 .set_id_led = efx_mcdi_set_id_led,
2971 .push_irq_moderation = efx_ef10_push_irq_moderation,
2972 .reconfigure_mac = efx_ef10_mac_reconfigure,
2973 .check_mac_fault = efx_mcdi_mac_check_fault,
2974 .reconfigure_port = efx_mcdi_port_reconfigure,
2975 .get_wol = efx_ef10_get_wol,
2976 .set_wol = efx_ef10_set_wol,
2977 .resume_wol = efx_port_dummy_op_void,
2978 /* TODO: test_chip */
2979 .test_nvram = efx_mcdi_nvram_test_all,
2980 .mcdi_request = efx_ef10_mcdi_request,
2981 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
2982 .mcdi_read_response = efx_ef10_mcdi_read_response,
2983 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
2984 .irq_enable_master = efx_port_dummy_op_void,
2985 .irq_test_generate = efx_ef10_irq_test_generate,
2986 .irq_disable_non_ev = efx_port_dummy_op_void,
2987 .irq_handle_msi = efx_ef10_msi_interrupt,
2988 .irq_handle_legacy = efx_ef10_legacy_interrupt,
2989 .tx_probe = efx_ef10_tx_probe,
2990 .tx_init = efx_ef10_tx_init,
2991 .tx_remove = efx_ef10_tx_remove,
2992 .tx_write = efx_ef10_tx_write,
2993 .rx_push_indir_table = efx_ef10_rx_push_indir_table,
2994 .rx_probe = efx_ef10_rx_probe,
2995 .rx_init = efx_ef10_rx_init,
2996 .rx_remove = efx_ef10_rx_remove,
2997 .rx_write = efx_ef10_rx_write,
2998 .rx_defer_refill = efx_ef10_rx_defer_refill,
2999 .ev_probe = efx_ef10_ev_probe,
3000 .ev_init = efx_ef10_ev_init,
3001 .ev_fini = efx_ef10_ev_fini,
3002 .ev_remove = efx_ef10_ev_remove,
3003 .ev_process = efx_ef10_ev_process,
3004 .ev_read_ack = efx_ef10_ev_read_ack,
3005 .ev_test_generate = efx_ef10_ev_test_generate,
3006 .filter_table_probe = efx_ef10_filter_table_probe,
3007 .filter_table_restore = efx_ef10_filter_table_restore,
3008 .filter_table_remove = efx_ef10_filter_table_remove,
3009 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3010 .filter_insert = efx_ef10_filter_insert,
3011 .filter_remove_safe = efx_ef10_filter_remove_safe,
3012 .filter_get_safe = efx_ef10_filter_get_safe,
3013 .filter_clear_rx = efx_ef10_filter_clear_rx,
3014 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3015 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3016 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3017#ifdef CONFIG_RFS_ACCEL
3018 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3019 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3020#endif
3021#ifdef CONFIG_SFC_MTD
3022 .mtd_probe = efx_ef10_mtd_probe,
3023 .mtd_rename = efx_mcdi_mtd_rename,
3024 .mtd_read = efx_mcdi_mtd_read,
3025 .mtd_erase = efx_mcdi_mtd_erase,
3026 .mtd_write = efx_mcdi_mtd_write,
3027 .mtd_sync = efx_mcdi_mtd_sync,
3028#endif
3029 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3030
3031 .revision = EFX_REV_HUNT_A0,
3032 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3033 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3034 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3035 .can_rx_scatter = true,
3036 .always_rx_scatter = true,
3037 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3038 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3039 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3040 NETIF_F_RXHASH | NETIF_F_NTUPLE),
3041 .mcdi_max_ver = 2,
3042 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3043};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 000000000000..b3f4e3755fd9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_EF10_REGS_H
11#define EFX_EF10_REGS_H
12
13/* EF10 hardware architecture definitions have a name prefix following
14 * the format:
15 *
16 * E<type>_<min-rev><max-rev>_
17 *
18 * The following <type> strings are used:
19 *
20 * MMIO register Host memory structure
21 * -------------------------------------------------------------
22 * Address R
23 * Bitfield RF SF
24 * Enumerator FE SE
25 *
26 * <min-rev> is the first revision to which the definition applies:
27 *
28 * D: Huntington A0
29 *
30 * If the definition has been changed or removed in later revisions
31 * then <max-rev> is the last revision to which the definition applies;
32 * otherwise it is "Z".
33 */
34
35/**************************************************************************
36 *
37 * EF10 registers and descriptors
38 *
39 **************************************************************************
40 */
41
42/* BIU_HW_REV_ID_REG: */
43#define ER_DZ_BIU_HW_REV_ID 0x00000000
44#define ERF_DZ_HW_REV_ID_LBN 0
45#define ERF_DZ_HW_REV_ID_WIDTH 32
46
47/* BIU_MC_SFT_STATUS_REG: */
48#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
49#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
50#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
51#define ERF_DZ_MC_SFT_STATUS_LBN 0
52#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
53
54/* BIU_INT_ISR_REG: */
55#define ER_DZ_BIU_INT_ISR 0x00000090
56#define ERF_DZ_ISR_REG_LBN 0
57#define ERF_DZ_ISR_REG_WIDTH 32
58
59/* MC_DB_LWRD_REG: */
60#define ER_DZ_MC_DB_LWRD 0x00000200
61#define ERF_DZ_MC_DOORBELL_L_LBN 0
62#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
63
64/* MC_DB_HWRD_REG: */
65#define ER_DZ_MC_DB_HWRD 0x00000204
66#define ERF_DZ_MC_DOORBELL_H_LBN 0
67#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
68
69/* EVQ_RPTR_REG: */
70#define ER_DZ_EVQ_RPTR 0x00000400
71#define ER_DZ_EVQ_RPTR_STEP 8192
72#define ER_DZ_EVQ_RPTR_ROWS 2048
73#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
74#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
75#define ERF_DZ_EVQ_RPTR_LBN 0
76#define ERF_DZ_EVQ_RPTR_WIDTH 15
77
78/* EVQ_TMR_REG: */
79#define ER_DZ_EVQ_TMR 0x00000420
80#define ER_DZ_EVQ_TMR_STEP 8192
81#define ER_DZ_EVQ_TMR_ROWS 2048
82#define ERF_DZ_TC_TIMER_MODE_LBN 14
83#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
84#define ERF_DZ_TC_TIMER_VAL_LBN 0
85#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
86
87/* RX_DESC_UPD_REG: */
88#define ER_DZ_RX_DESC_UPD 0x00000830
89#define ER_DZ_RX_DESC_UPD_STEP 8192
90#define ER_DZ_RX_DESC_UPD_ROWS 2048
91#define ERF_DZ_RX_DESC_WPTR_LBN 0
92#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
93
94/* TX_DESC_UPD_REG: */
95#define ER_DZ_TX_DESC_UPD 0x00000a10
96#define ER_DZ_TX_DESC_UPD_STEP 8192
97#define ER_DZ_TX_DESC_UPD_ROWS 2048
98#define ERF_DZ_RSVD_LBN 76
99#define ERF_DZ_RSVD_WIDTH 20
100#define ERF_DZ_TX_DESC_WPTR_LBN 64
101#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
102#define ERF_DZ_TX_DESC_HWORD_LBN 32
103#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
104#define ERF_DZ_TX_DESC_LWORD_LBN 0
105#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
106
107/* DRIVER_EV */
108#define ESF_DZ_DRV_CODE_LBN 60
109#define ESF_DZ_DRV_CODE_WIDTH 4
110#define ESF_DZ_DRV_SUB_CODE_LBN 56
111#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
112#define ESE_DZ_DRV_TIMER_EV 3
113#define ESE_DZ_DRV_START_UP_EV 2
114#define ESE_DZ_DRV_WAKE_UP_EV 1
115#define ESF_DZ_DRV_SUB_DATA_LBN 0
116#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
117#define ESF_DZ_DRV_EVQ_ID_LBN 0
118#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
119#define ESF_DZ_DRV_TMR_ID_LBN 0
120#define ESF_DZ_DRV_TMR_ID_WIDTH 14
121
122/* EVENT_ENTRY */
123#define ESF_DZ_EV_CODE_LBN 60
124#define ESF_DZ_EV_CODE_WIDTH 4
125#define ESE_DZ_EV_CODE_MCDI_EV 12
126#define ESE_DZ_EV_CODE_DRIVER_EV 5
127#define ESE_DZ_EV_CODE_TX_EV 2
128#define ESE_DZ_EV_CODE_RX_EV 0
129#define ESE_DZ_OTHER other
130#define ESF_DZ_EV_DATA_LBN 0
131#define ESF_DZ_EV_DATA_WIDTH 60
132
133/* MC_EVENT */
134#define ESF_DZ_MC_CODE_LBN 60
135#define ESF_DZ_MC_CODE_WIDTH 4
136#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
137#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
138#define ESF_DZ_MC_DROP_EVENT_LBN 58
139#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
140#define ESF_DZ_MC_SOFT_LBN 0
141#define ESF_DZ_MC_SOFT_WIDTH 58
142
143/* RX_EVENT */
144#define ESF_DZ_RX_CODE_LBN 60
145#define ESF_DZ_RX_CODE_WIDTH 4
146#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
147#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
148#define ESF_DZ_RX_DROP_EVENT_LBN 58
149#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
150#define ESF_DZ_RX_EV_RSVD2_LBN 54
151#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
152#define ESF_DZ_RX_EV_SOFT2_LBN 52
153#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
154#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
155#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
156#define ESF_DZ_RX_L4_CLASS_LBN 45
157#define ESF_DZ_RX_L4_CLASS_WIDTH 3
158#define ESE_DZ_L4_CLASS_RSVD7 7
159#define ESE_DZ_L4_CLASS_RSVD6 6
160#define ESE_DZ_L4_CLASS_RSVD5 5
161#define ESE_DZ_L4_CLASS_RSVD4 4
162#define ESE_DZ_L4_CLASS_RSVD3 3
163#define ESE_DZ_L4_CLASS_UDP 2
164#define ESE_DZ_L4_CLASS_TCP 1
165#define ESE_DZ_L4_CLASS_UNKNOWN 0
166#define ESF_DZ_RX_L3_CLASS_LBN 42
167#define ESF_DZ_RX_L3_CLASS_WIDTH 3
168#define ESE_DZ_L3_CLASS_RSVD7 7
169#define ESE_DZ_L3_CLASS_IP6_FRAG 6
170#define ESE_DZ_L3_CLASS_ARP 5
171#define ESE_DZ_L3_CLASS_IP4_FRAG 4
172#define ESE_DZ_L3_CLASS_FCOE 3
173#define ESE_DZ_L3_CLASS_IP6 2
174#define ESE_DZ_L3_CLASS_IP4 1
175#define ESE_DZ_L3_CLASS_UNKNOWN 0
176#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
177#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
178#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
179#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
180#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
181#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
182#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
183#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
184#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
185#define ESE_DZ_ETH_TAG_CLASS_NONE 0
186#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
187#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
188#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
189#define ESE_DZ_ETH_BASE_CLASS_LLC 1
190#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
191#define ESF_DZ_RX_MAC_CLASS_LBN 35
192#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
193#define ESE_DZ_MAC_CLASS_MCAST 1
194#define ESE_DZ_MAC_CLASS_UCAST 0
195#define ESF_DZ_RX_EV_SOFT1_LBN 32
196#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
197#define ESF_DZ_RX_EV_RSVD1_LBN 31
198#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
199#define ESF_DZ_RX_ABORT_LBN 30
200#define ESF_DZ_RX_ABORT_WIDTH 1
201#define ESF_DZ_RX_ECC_ERR_LBN 29
202#define ESF_DZ_RX_ECC_ERR_WIDTH 1
203#define ESF_DZ_RX_CRC1_ERR_LBN 28
204#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
205#define ESF_DZ_RX_CRC0_ERR_LBN 27
206#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
207#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
208#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
209#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
210#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
211#define ESF_DZ_RX_ECRC_ERR_LBN 24
212#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
213#define ESF_DZ_RX_QLABEL_LBN 16
214#define ESF_DZ_RX_QLABEL_WIDTH 5
215#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
216#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
217#define ESF_DZ_RX_CONT_LBN 14
218#define ESF_DZ_RX_CONT_WIDTH 1
219#define ESF_DZ_RX_BYTES_LBN 0
220#define ESF_DZ_RX_BYTES_WIDTH 14
221
222/* RX_KER_DESC */
223#define ESF_DZ_RX_KER_RESERVED_LBN 62
224#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
225#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
226#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
229
230/* RX_USER_DESC */
231#define ESF_DZ_RX_USR_RESERVED_LBN 62
232#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
233#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
234#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
235#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
236#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
237#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
238#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
239#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
240#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
241#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
242#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
243#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
244#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
245#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
246#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
247#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
248#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
249#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
250#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
251#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
252#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
253#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
254#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
255#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
256#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
257#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
258#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
259
260/* TX_CSUM_TSTAMP_DESC */
261#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
262#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
263#define ESF_DZ_TX_OPTION_TYPE_LBN 60
264#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
265#define ESE_DZ_TX_OPTION_DESC_TSO 7
266#define ESE_DZ_TX_OPTION_DESC_VLAN 6
267#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
268#define ESF_DZ_TX_TIMESTAMP_LBN 5
269#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
270#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
271#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
272#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
273#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
274#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
275#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
276#define ESE_DZ_TX_OPTION_CRC_FCOE 1
277#define ESE_DZ_TX_OPTION_CRC_OFF 0
278#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
279#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
280#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
281#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
282
283/* TX_EVENT */
284#define ESF_DZ_TX_CODE_LBN 60
285#define ESF_DZ_TX_CODE_WIDTH 4
286#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
287#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
288#define ESF_DZ_TX_DROP_EVENT_LBN 58
289#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
290#define ESF_DZ_TX_EV_RSVD_LBN 48
291#define ESF_DZ_TX_EV_RSVD_WIDTH 10
292#define ESF_DZ_TX_SOFT2_LBN 32
293#define ESF_DZ_TX_SOFT2_WIDTH 16
294#define ESF_DZ_TX_CAN_MERGE_LBN 31
295#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
296#define ESF_DZ_TX_SOFT1_LBN 24
297#define ESF_DZ_TX_SOFT1_WIDTH 7
298#define ESF_DZ_TX_QLABEL_LBN 16
299#define ESF_DZ_TX_QLABEL_WIDTH 5
300#define ESF_DZ_TX_DESCR_INDX_LBN 0
301#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
302
303/* TX_KER_DESC */
304#define ESF_DZ_TX_KER_TYPE_LBN 63
305#define ESF_DZ_TX_KER_TYPE_WIDTH 1
306#define ESF_DZ_TX_KER_CONT_LBN 62
307#define ESF_DZ_TX_KER_CONT_WIDTH 1
308#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
309#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
310#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
311#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
312
313/* TX_PIO_DESC */
314#define ESF_DZ_TX_PIO_TYPE_LBN 63
315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
316#define ESF_DZ_TX_PIO_OPT_LBN 60
317#define ESF_DZ_TX_PIO_OPT_WIDTH 3
318#define ESF_DZ_TX_PIO_CONT_LBN 59
319#define ESF_DZ_TX_PIO_CONT_WIDTH 1
320#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
321#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
322#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
323#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
324
325/* TX_TSO_DESC */
326#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
327#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
328#define ESF_DZ_TX_OPTION_TYPE_LBN 60
329#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
330#define ESE_DZ_TX_OPTION_DESC_TSO 7
331#define ESE_DZ_TX_OPTION_DESC_VLAN 6
332#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
333#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
334#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
335#define ESF_DZ_TX_TSO_IP_ID_LBN 32
336#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
337#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
338#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
339
340/* TX_USER_DESC */
341#define ESF_DZ_TX_USR_TYPE_LBN 63
342#define ESF_DZ_TX_USR_TYPE_WIDTH 1
343#define ESF_DZ_TX_USR_CONT_LBN 62
344#define ESF_DZ_TX_USR_CONT_WIDTH 1
345#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
346#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
347#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
348#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
349#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
350#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
351#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
352#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
353#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
354#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
355#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
356#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
357#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
358#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
359#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
360#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
361#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
362#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
363#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
364#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
365#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
366#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
367#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
368#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
369#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
370#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
371/*************************************************************************/
372
373/* TX_DESC_UPD_REG: Transmit descriptor update register.
374 * We may write just one dword of these registers.
375 */
376#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
377#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
378#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
379
380/* The workaround for bug 35388 requires multiplexing writes through
381 * the TX_DESC_UPD_DWORD address.
382 * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
383 * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
384 * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
385 */
386#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
387#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
388#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
389#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
390#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
391#define ERF_DD_EVQ_IND_RPTR_LBN 0
392#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
393#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
394#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
395#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
396#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
397#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
398#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
399#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
400
401/* TX_PIOBUF
402 * PIO buffer aperture (paged)
403 */
404#define ER_DZ_TX_PIOBUF 4096
405#define ER_DZ_TX_PIOBUF_SIZE 2048
406
407/* RX packet prefix */
408#define ES_DZ_RX_PREFIX_HASH_OFST 0
409#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
410#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
411#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
412#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
413#define ES_DZ_RX_PREFIX_SIZE 14
414
415#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c72968840f1a..07c9bc4c61bc 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
17#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/tcp.h> 18#include <linux/tcp.h>
19#include <linux/in.h> 19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h> 20#include <linux/ethtool.h>
22#include <linux/topology.h> 21#include <linux/topology.h>
23#include <linux/gfp.h> 22#include <linux/gfp.h>
@@ -81,8 +80,7 @@ const char *const efx_reset_type_names[] = {
81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 80 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
82 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 81 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
83 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 82 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
84 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", 83 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
85 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
86 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 84 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
87 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 85 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
88}; 86};
@@ -191,8 +189,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
191 * 189 *
192 *************************************************************************/ 190 *************************************************************************/
193 191
194static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); 192static int efx_soft_enable_interrupts(struct efx_nic *efx);
195static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); 193static void efx_soft_disable_interrupts(struct efx_nic *efx);
196static void efx_remove_channel(struct efx_channel *channel); 194static void efx_remove_channel(struct efx_channel *channel);
197static void efx_remove_channels(struct efx_nic *efx); 195static void efx_remove_channels(struct efx_nic *efx);
198static const struct efx_channel_type efx_default_channel_type; 196static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +246,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
248 efx_channel_get_rx_queue(channel); 246 efx_channel_get_rx_queue(channel);
249 247
250 efx_rx_flush_packet(channel); 248 efx_rx_flush_packet(channel);
251 if (rx_queue->enabled) 249 efx_fast_push_rx_descriptors(rx_queue);
252 efx_fast_push_rx_descriptors(rx_queue);
253 } 250 }
254 251
255 return spent; 252 return spent;
256} 253}
257 254
258/* Mark channel as finished processing
259 *
260 * Note that since we will not receive further interrupts for this
261 * channel before we finish processing and call the eventq_read_ack()
262 * method, there is no need to use the interrupt hold-off timers.
263 */
264static inline void efx_channel_processed(struct efx_channel *channel)
265{
266 /* The interrupt handler for this channel may set work_pending
267 * as soon as we acknowledge the events we've seen. Make sure
268 * it's cleared before then. */
269 channel->work_pending = false;
270 smp_wmb();
271
272 efx_nic_eventq_read_ack(channel);
273}
274
275/* NAPI poll handler 255/* NAPI poll handler
276 * 256 *
277 * NAPI guarantees serialisation of polls of the same device, which 257 * NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +296,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
316 296
317 /* There is no race here; although napi_disable() will 297 /* There is no race here; although napi_disable() will
318 * only wait for napi_complete(), this isn't a problem 298 * only wait for napi_complete(), this isn't a problem
319 * since efx_channel_processed() will have no effect if 299 * since efx_nic_eventq_read_ack() will have no effect if
320 * interrupts have already been disabled. 300 * interrupts have already been disabled.
321 */ 301 */
322 napi_complete(napi); 302 napi_complete(napi);
323 efx_channel_processed(channel); 303 efx_nic_eventq_read_ack(channel);
324 } 304 }
325 305
326 return spent; 306 return spent;
327} 307}
328 308
329/* Process the eventq of the specified channel immediately on this CPU
330 *
331 * Disable hardware generated interrupts, wait for any existing
332 * processing to finish, then directly poll (and ack ) the eventq.
333 * Finally reenable NAPI and interrupts.
334 *
335 * This is for use only during a loopback self-test. It must not
336 * deliver any packets up the stack as this can result in deadlock.
337 */
338void efx_process_channel_now(struct efx_channel *channel)
339{
340 struct efx_nic *efx = channel->efx;
341
342 BUG_ON(channel->channel >= efx->n_channels);
343 BUG_ON(!channel->enabled);
344 BUG_ON(!efx->loopback_selftest);
345
346 /* Disable interrupts and wait for ISRs to complete */
347 efx_nic_disable_interrupts(efx);
348 if (efx->legacy_irq) {
349 synchronize_irq(efx->legacy_irq);
350 efx->legacy_irq_enabled = false;
351 }
352 if (channel->irq)
353 synchronize_irq(channel->irq);
354
355 /* Wait for any NAPI processing to complete */
356 napi_disable(&channel->napi_str);
357
358 /* Poll the channel */
359 efx_process_channel(channel, channel->eventq_mask + 1);
360
361 /* Ack the eventq. This may cause an interrupt to be generated
362 * when they are reenabled */
363 efx_channel_processed(channel);
364
365 napi_enable(&channel->napi_str);
366 if (efx->legacy_irq)
367 efx->legacy_irq_enabled = true;
368 efx_nic_enable_interrupts(efx);
369}
370
371/* Create event queue 309/* Create event queue
372 * Event queue memory allocations are done only once. If the channel 310 * Event queue memory allocations are done only once. If the channel
373 * is reset, the memory buffer will be reused; this guards against 311 * is reset, the memory buffer will be reused; this guards against
@@ -391,14 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
391} 329}
392 330
393/* Prepare channel's event queue */ 331/* Prepare channel's event queue */
394static void efx_init_eventq(struct efx_channel *channel) 332static int efx_init_eventq(struct efx_channel *channel)
395{ 333{
396 netif_dbg(channel->efx, drv, channel->efx->net_dev, 334 struct efx_nic *efx = channel->efx;
397 "chan %d init event queue\n", channel->channel); 335 int rc;
398 336
399 channel->eventq_read_ptr = 0; 337 EFX_WARN_ON_PARANOID(channel->eventq_init);
400 338
401 efx_nic_init_eventq(channel); 339 netif_dbg(efx, drv, efx->net_dev,
340 "chan %d init event queue\n", channel->channel);
341
342 rc = efx_nic_init_eventq(channel);
343 if (rc == 0) {
344 efx->type->push_irq_moderation(channel);
345 channel->eventq_read_ptr = 0;
346 channel->eventq_init = true;
347 }
348 return rc;
402} 349}
403 350
404/* Enable event queue processing and NAPI */ 351/* Enable event queue processing and NAPI */
@@ -407,11 +354,7 @@ static void efx_start_eventq(struct efx_channel *channel)
407 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 354 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
408 "chan %d start event queue\n", channel->channel); 355 "chan %d start event queue\n", channel->channel);
409 356
410 /* The interrupt handler for this channel may set work_pending 357 /* Make sure the NAPI handler sees the enabled flag set */
411 * as soon as we enable it. Make sure it's cleared before
412 * then. Similarly, make sure it sees the enabled flag set.
413 */
414 channel->work_pending = false;
415 channel->enabled = true; 358 channel->enabled = true;
416 smp_wmb(); 359 smp_wmb();
417 360
@@ -431,10 +374,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
431 374
432static void efx_fini_eventq(struct efx_channel *channel) 375static void efx_fini_eventq(struct efx_channel *channel)
433{ 376{
377 if (!channel->eventq_init)
378 return;
379
434 netif_dbg(channel->efx, drv, channel->efx->net_dev, 380 netif_dbg(channel->efx, drv, channel->efx->net_dev,
435 "chan %d fini event queue\n", channel->channel); 381 "chan %d fini event queue\n", channel->channel);
436 382
437 efx_nic_fini_eventq(channel); 383 efx_nic_fini_eventq(channel);
384 channel->eventq_init = false;
438} 385}
439 386
440static void efx_remove_eventq(struct efx_channel *channel) 387static void efx_remove_eventq(struct efx_channel *channel)
@@ -583,8 +530,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
583 530
584 efx_for_each_channel(channel, efx) 531 efx_for_each_channel(channel, efx)
585 channel->type->get_name(channel, 532 channel->type->get_name(channel,
586 efx->channel_name[channel->channel], 533 efx->msi_context[channel->channel].name,
587 sizeof(efx->channel_name[0])); 534 sizeof(efx->msi_context[0].name));
588} 535}
589 536
590static int efx_probe_channels(struct efx_nic *efx) 537static int efx_probe_channels(struct efx_nic *efx)
@@ -634,13 +581,13 @@ static void efx_start_datapath(struct efx_nic *efx)
634 * support the current MTU, including padding for header 581 * support the current MTU, including padding for header
635 * alignment and overruns. 582 * alignment and overruns.
636 */ 583 */
637 efx->rx_dma_len = (efx->type->rx_buffer_hash_size + 584 efx->rx_dma_len = (efx->rx_prefix_size +
638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
639 efx->type->rx_buffer_padding); 586 efx->type->rx_buffer_padding);
640 rx_buf_len = (sizeof(struct efx_rx_page_state) + 587 rx_buf_len = (sizeof(struct efx_rx_page_state) +
641 NET_IP_ALIGN + efx->rx_dma_len); 588 NET_IP_ALIGN + efx->rx_dma_len);
642 if (rx_buf_len <= PAGE_SIZE) { 589 if (rx_buf_len <= PAGE_SIZE) {
643 efx->rx_scatter = false; 590 efx->rx_scatter = efx->type->always_rx_scatter;
644 efx->rx_buffer_order = 0; 591 efx->rx_buffer_order = 0;
645 } else if (efx->type->can_rx_scatter) { 592 } else if (efx->type->can_rx_scatter) {
646 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 593 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -668,9 +615,9 @@ static void efx_start_datapath(struct efx_nic *efx)
668 efx->rx_dma_len, efx->rx_page_buf_step, 615 efx->rx_dma_len, efx->rx_page_buf_step,
669 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 616 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
670 617
671 /* RX filters also have scatter-enabled flags */ 618 /* RX filters may also have scatter-enabled flags */
672 if (efx->rx_scatter != old_rx_scatter) 619 if (efx->rx_scatter != old_rx_scatter)
673 efx_filter_update_rx_scatter(efx); 620 efx->type->filter_update_rx_scatter(efx);
674 621
675 /* We must keep at least one descriptor in a TX ring empty. 622 /* We must keep at least one descriptor in a TX ring empty.
676 * We could avoid this when the queue size does not exactly 623 * We could avoid this when the queue size does not exactly
@@ -684,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
684 631
685 /* Initialise the channels */ 632 /* Initialise the channels */
686 efx_for_each_channel(channel, efx) { 633 efx_for_each_channel(channel, efx) {
687 efx_for_each_channel_tx_queue(tx_queue, channel) 634 efx_for_each_channel_tx_queue(tx_queue, channel) {
688 efx_init_tx_queue(tx_queue); 635 efx_init_tx_queue(tx_queue);
636 atomic_inc(&efx->active_queues);
637 }
689 638
690 efx_for_each_channel_rx_queue(rx_queue, channel) { 639 efx_for_each_channel_rx_queue(rx_queue, channel) {
691 efx_init_rx_queue(rx_queue); 640 efx_init_rx_queue(rx_queue);
641 atomic_inc(&efx->active_queues);
692 efx_nic_generate_fill_event(rx_queue); 642 efx_nic_generate_fill_event(rx_queue);
693 } 643 }
694 644
@@ -704,30 +654,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
704 struct efx_channel *channel; 654 struct efx_channel *channel;
705 struct efx_tx_queue *tx_queue; 655 struct efx_tx_queue *tx_queue;
706 struct efx_rx_queue *rx_queue; 656 struct efx_rx_queue *rx_queue;
707 struct pci_dev *dev = efx->pci_dev;
708 int rc; 657 int rc;
709 658
710 EFX_ASSERT_RESET_SERIALISED(efx); 659 EFX_ASSERT_RESET_SERIALISED(efx);
711 BUG_ON(efx->port_enabled); 660 BUG_ON(efx->port_enabled);
712 661
713 /* Only perform flush if dma is enabled */ 662 /* Stop RX refill */
714 if (dev->is_busmaster && efx->state != STATE_RECOVERY) { 663 efx_for_each_channel(channel, efx) {
715 rc = efx_nic_flush_queues(efx); 664 efx_for_each_channel_rx_queue(rx_queue, channel)
716 665 rx_queue->refill_enabled = false;
717 if (rc && EFX_WORKAROUND_7803(efx)) {
718 /* Schedule a reset to recover from the flush failure. The
719 * descriptor caches reference memory we're about to free,
720 * but falcon_reconfigure_mac_wrapper() won't reconnect
721 * the MACs because of the pending reset. */
722 netif_err(efx, drv, efx->net_dev,
723 "Resetting to recover from flush failure\n");
724 efx_schedule_reset(efx, RESET_TYPE_ALL);
725 } else if (rc) {
726 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
727 } else {
728 netif_dbg(efx, drv, efx->net_dev,
729 "successfully flushed all queues\n");
730 }
731 } 666 }
732 667
733 efx_for_each_channel(channel, efx) { 668 efx_for_each_channel(channel, efx) {
@@ -741,7 +676,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
741 efx_stop_eventq(channel); 676 efx_stop_eventq(channel);
742 efx_start_eventq(channel); 677 efx_start_eventq(channel);
743 } 678 }
679 }
680
681 rc = efx->type->fini_dmaq(efx);
682 if (rc && EFX_WORKAROUND_7803(efx)) {
683 /* Schedule a reset to recover from the flush failure. The
684 * descriptor caches reference memory we're about to free,
685 * but falcon_reconfigure_mac_wrapper() won't reconnect
686 * the MACs because of the pending reset.
687 */
688 netif_err(efx, drv, efx->net_dev,
689 "Resetting to recover from flush failure\n");
690 efx_schedule_reset(efx, RESET_TYPE_ALL);
691 } else if (rc) {
692 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
693 } else {
694 netif_dbg(efx, drv, efx->net_dev,
695 "successfully flushed all queues\n");
696 }
744 697
698 efx_for_each_channel(channel, efx) {
745 efx_for_each_channel_rx_queue(rx_queue, channel) 699 efx_for_each_channel_rx_queue(rx_queue, channel)
746 efx_fini_rx_queue(rx_queue); 700 efx_fini_rx_queue(rx_queue);
747 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 701 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -779,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
779 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 733 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
780 u32 old_rxq_entries, old_txq_entries; 734 u32 old_rxq_entries, old_txq_entries;
781 unsigned i, next_buffer_table = 0; 735 unsigned i, next_buffer_table = 0;
782 int rc; 736 int rc, rc2;
783 737
784 rc = efx_check_disabled(efx); 738 rc = efx_check_disabled(efx);
785 if (rc) 739 if (rc)
@@ -809,7 +763,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
809 763
810 efx_device_detach_sync(efx); 764 efx_device_detach_sync(efx);
811 efx_stop_all(efx); 765 efx_stop_all(efx);
812 efx_stop_interrupts(efx, true); 766 efx_soft_disable_interrupts(efx);
813 767
814 /* Clone channels (where possible) */ 768 /* Clone channels (where possible) */
815 memset(other_channel, 0, sizeof(other_channel)); 769 memset(other_channel, 0, sizeof(other_channel));
@@ -859,9 +813,16 @@ out:
859 } 813 }
860 } 814 }
861 815
862 efx_start_interrupts(efx, true); 816 rc2 = efx_soft_enable_interrupts(efx);
863 efx_start_all(efx); 817 if (rc2) {
864 netif_device_attach(efx->net_dev); 818 rc = rc ? rc : rc2;
819 netif_err(efx, drv, efx->net_dev,
820 "unable to restart interrupts on channel reallocation\n");
821 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
822 } else {
823 efx_start_all(efx);
824 netif_device_attach(efx->net_dev);
825 }
865 return rc; 826 return rc;
866 827
867rollback: 828rollback:
@@ -931,10 +892,9 @@ void efx_link_status_changed(struct efx_nic *efx)
931 /* Status message for kernel log */ 892 /* Status message for kernel log */
932 if (link_state->up) 893 if (link_state->up)
933 netif_info(efx, link, efx->net_dev, 894 netif_info(efx, link, efx->net_dev,
934 "link up at %uMbps %s-duplex (MTU %d)%s\n", 895 "link up at %uMbps %s-duplex (MTU %d)\n",
935 link_state->speed, link_state->fd ? "full" : "half", 896 link_state->speed, link_state->fd ? "full" : "half",
936 efx->net_dev->mtu, 897 efx->net_dev->mtu);
937 (efx->promiscuous ? " [PROMISC]" : ""));
938 else 898 else
939 netif_info(efx, link, efx->net_dev, "link down\n"); 899 netif_info(efx, link, efx->net_dev, "link down\n");
940} 900}
@@ -983,10 +943,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
983 943
984 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 944 WARN_ON(!mutex_is_locked(&efx->mac_lock));
985 945
986 /* Serialise the promiscuous flag with efx_set_rx_mode. */
987 netif_addr_lock_bh(efx->net_dev);
988 netif_addr_unlock_bh(efx->net_dev);
989
990 /* Disable PHY transmit in mac level loopbacks */ 946 /* Disable PHY transmit in mac level loopbacks */
991 phy_mode = efx->phy_mode; 947 phy_mode = efx->phy_mode;
992 if (LOOPBACK_INTERNAL(efx)) 948 if (LOOPBACK_INTERNAL(efx))
@@ -1144,6 +1100,7 @@ static int efx_init_io(struct efx_nic *efx)
1144{ 1100{
1145 struct pci_dev *pci_dev = efx->pci_dev; 1101 struct pci_dev *pci_dev = efx->pci_dev;
1146 dma_addr_t dma_mask = efx->type->max_dma_mask; 1102 dma_addr_t dma_mask = efx->type->max_dma_mask;
1103 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1147 int rc; 1104 int rc;
1148 1105
1149 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1106 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1196,20 +1153,18 @@ static int efx_init_io(struct efx_nic *efx)
1196 rc = -EIO; 1153 rc = -EIO;
1197 goto fail3; 1154 goto fail3;
1198 } 1155 }
1199 efx->membase = ioremap_nocache(efx->membase_phys, 1156 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1200 efx->type->mem_map_size);
1201 if (!efx->membase) { 1157 if (!efx->membase) {
1202 netif_err(efx, probe, efx->net_dev, 1158 netif_err(efx, probe, efx->net_dev,
1203 "could not map memory BAR at %llx+%x\n", 1159 "could not map memory BAR at %llx+%x\n",
1204 (unsigned long long)efx->membase_phys, 1160 (unsigned long long)efx->membase_phys, mem_map_size);
1205 efx->type->mem_map_size);
1206 rc = -ENOMEM; 1161 rc = -ENOMEM;
1207 goto fail4; 1162 goto fail4;
1208 } 1163 }
1209 netif_dbg(efx, probe, efx->net_dev, 1164 netif_dbg(efx, probe, efx->net_dev,
1210 "memory BAR at %llx+%x (virtual %p)\n", 1165 "memory BAR at %llx+%x (virtual %p)\n",
1211 (unsigned long long)efx->membase_phys, 1166 (unsigned long long)efx->membase_phys, mem_map_size,
1212 efx->type->mem_map_size, efx->membase); 1167 efx->membase);
1213 1168
1214 return 0; 1169 return 0;
1215 1170
@@ -1288,8 +1243,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1288 */ 1243 */
1289static int efx_probe_interrupts(struct efx_nic *efx) 1244static int efx_probe_interrupts(struct efx_nic *efx)
1290{ 1245{
1291 unsigned int max_channels =
1292 min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1293 unsigned int extra_channels = 0; 1246 unsigned int extra_channels = 0;
1294 unsigned int i, j; 1247 unsigned int i, j;
1295 int rc; 1248 int rc;
@@ -1306,7 +1259,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1306 if (separate_tx_channels) 1259 if (separate_tx_channels)
1307 n_channels *= 2; 1260 n_channels *= 2;
1308 n_channels += extra_channels; 1261 n_channels += extra_channels;
1309 n_channels = min(n_channels, max_channels); 1262 n_channels = min(n_channels, efx->max_channels);
1310 1263
1311 for (i = 0; i < n_channels; i++) 1264 for (i = 0; i < n_channels; i++)
1312 xentries[i].entry = i; 1265 xentries[i].entry = i;
@@ -1392,31 +1345,42 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1392 return 0; 1345 return 0;
1393} 1346}
1394 1347
1395/* Enable interrupts, then probe and start the event queues */ 1348static int efx_soft_enable_interrupts(struct efx_nic *efx)
1396static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1397{ 1349{
1398 struct efx_channel *channel; 1350 struct efx_channel *channel, *end_channel;
1351 int rc;
1399 1352
1400 BUG_ON(efx->state == STATE_DISABLED); 1353 BUG_ON(efx->state == STATE_DISABLED);
1401 1354
1402 if (efx->eeh_disabled_legacy_irq) { 1355 efx->irq_soft_enabled = true;
1403 enable_irq(efx->legacy_irq); 1356 smp_wmb();
1404 efx->eeh_disabled_legacy_irq = false;
1405 }
1406 if (efx->legacy_irq)
1407 efx->legacy_irq_enabled = true;
1408 efx_nic_enable_interrupts(efx);
1409 1357
1410 efx_for_each_channel(channel, efx) { 1358 efx_for_each_channel(channel, efx) {
1411 if (!channel->type->keep_eventq || !may_keep_eventq) 1359 if (!channel->type->keep_eventq) {
1412 efx_init_eventq(channel); 1360 rc = efx_init_eventq(channel);
1361 if (rc)
1362 goto fail;
1363 }
1413 efx_start_eventq(channel); 1364 efx_start_eventq(channel);
1414 } 1365 }
1415 1366
1416 efx_mcdi_mode_event(efx); 1367 efx_mcdi_mode_event(efx);
1368
1369 return 0;
1370fail:
1371 end_channel = channel;
1372 efx_for_each_channel(channel, efx) {
1373 if (channel == end_channel)
1374 break;
1375 efx_stop_eventq(channel);
1376 if (!channel->type->keep_eventq)
1377 efx_fini_eventq(channel);
1378 }
1379
1380 return rc;
1417} 1381}
1418 1382
1419static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) 1383static void efx_soft_disable_interrupts(struct efx_nic *efx)
1420{ 1384{
1421 struct efx_channel *channel; 1385 struct efx_channel *channel;
1422 1386
@@ -1425,20 +1389,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1425 1389
1426 efx_mcdi_mode_poll(efx); 1390 efx_mcdi_mode_poll(efx);
1427 1391
1428 efx_nic_disable_interrupts(efx); 1392 efx->irq_soft_enabled = false;
1429 if (efx->legacy_irq) { 1393 smp_wmb();
1394
1395 if (efx->legacy_irq)
1430 synchronize_irq(efx->legacy_irq); 1396 synchronize_irq(efx->legacy_irq);
1431 efx->legacy_irq_enabled = false;
1432 }
1433 1397
1434 efx_for_each_channel(channel, efx) { 1398 efx_for_each_channel(channel, efx) {
1435 if (channel->irq) 1399 if (channel->irq)
1436 synchronize_irq(channel->irq); 1400 synchronize_irq(channel->irq);
1437 1401
1438 efx_stop_eventq(channel); 1402 efx_stop_eventq(channel);
1439 if (!channel->type->keep_eventq || !may_keep_eventq) 1403 if (!channel->type->keep_eventq)
1404 efx_fini_eventq(channel);
1405 }
1406
1407 /* Flush the asynchronous MCDI request queue */
1408 efx_mcdi_flush_async(efx);
1409}
1410
1411static int efx_enable_interrupts(struct efx_nic *efx)
1412{
1413 struct efx_channel *channel, *end_channel;
1414 int rc;
1415
1416 BUG_ON(efx->state == STATE_DISABLED);
1417
1418 if (efx->eeh_disabled_legacy_irq) {
1419 enable_irq(efx->legacy_irq);
1420 efx->eeh_disabled_legacy_irq = false;
1421 }
1422
1423 efx->type->irq_enable_master(efx);
1424
1425 efx_for_each_channel(channel, efx) {
1426 if (channel->type->keep_eventq) {
1427 rc = efx_init_eventq(channel);
1428 if (rc)
1429 goto fail;
1430 }
1431 }
1432
1433 rc = efx_soft_enable_interrupts(efx);
1434 if (rc)
1435 goto fail;
1436
1437 return 0;
1438
1439fail:
1440 end_channel = channel;
1441 efx_for_each_channel(channel, efx) {
1442 if (channel == end_channel)
1443 break;
1444 if (channel->type->keep_eventq)
1440 efx_fini_eventq(channel); 1445 efx_fini_eventq(channel);
1441 } 1446 }
1447
1448 efx->type->irq_disable_non_ev(efx);
1449
1450 return rc;
1451}
1452
1453static void efx_disable_interrupts(struct efx_nic *efx)
1454{
1455 struct efx_channel *channel;
1456
1457 efx_soft_disable_interrupts(efx);
1458
1459 efx_for_each_channel(channel, efx) {
1460 if (channel->type->keep_eventq)
1461 efx_fini_eventq(channel);
1462 }
1463
1464 efx->type->irq_disable_non_ev(efx);
1442} 1465}
1443 1466
1444static void efx_remove_interrupts(struct efx_nic *efx) 1467static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1495,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
1495 * in MSI-X interrupts. */ 1518 * in MSI-X interrupts. */
1496 rc = efx_probe_interrupts(efx); 1519 rc = efx_probe_interrupts(efx);
1497 if (rc) 1520 if (rc)
1498 goto fail; 1521 goto fail1;
1499 1522
1500 efx->type->dimension_resources(efx); 1523 rc = efx->type->dimension_resources(efx);
1524 if (rc)
1525 goto fail2;
1501 1526
1502 if (efx->n_channels > 1) 1527 if (efx->n_channels > 1)
1503 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1528 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1515,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1515 1540
1516 return 0; 1541 return 0;
1517 1542
1518fail: 1543fail2:
1544 efx_remove_interrupts(efx);
1545fail1:
1519 efx->type->remove(efx); 1546 efx->type->remove(efx);
1520 return rc; 1547 return rc;
1521} 1548}
@@ -1528,6 +1555,44 @@ static void efx_remove_nic(struct efx_nic *efx)
1528 efx->type->remove(efx); 1555 efx->type->remove(efx);
1529} 1556}
1530 1557
1558static int efx_probe_filters(struct efx_nic *efx)
1559{
1560 int rc;
1561
1562 spin_lock_init(&efx->filter_lock);
1563
1564 rc = efx->type->filter_table_probe(efx);
1565 if (rc)
1566 return rc;
1567
1568#ifdef CONFIG_RFS_ACCEL
1569 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1570 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
1571 sizeof(*efx->rps_flow_id),
1572 GFP_KERNEL);
1573 if (!efx->rps_flow_id) {
1574 efx->type->filter_table_remove(efx);
1575 return -ENOMEM;
1576 }
1577 }
1578#endif
1579
1580 return 0;
1581}
1582
1583static void efx_remove_filters(struct efx_nic *efx)
1584{
1585#ifdef CONFIG_RFS_ACCEL
1586 kfree(efx->rps_flow_id);
1587#endif
1588 efx->type->filter_table_remove(efx);
1589}
1590
1591static void efx_restore_filters(struct efx_nic *efx)
1592{
1593 efx->type->filter_table_restore(efx);
1594}
1595
1531/************************************************************************** 1596/**************************************************************************
1532 * 1597 *
1533 * NIC startup/shutdown 1598 * NIC startup/shutdown
@@ -1917,34 +1982,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
1917 struct rtnl_link_stats64 *stats) 1982 struct rtnl_link_stats64 *stats)
1918{ 1983{
1919 struct efx_nic *efx = netdev_priv(net_dev); 1984 struct efx_nic *efx = netdev_priv(net_dev);
1920 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1921 1985
1922 spin_lock_bh(&efx->stats_lock); 1986 spin_lock_bh(&efx->stats_lock);
1923 1987 efx->type->update_stats(efx, NULL, stats);
1924 efx->type->update_stats(efx);
1925
1926 stats->rx_packets = mac_stats->rx_packets;
1927 stats->tx_packets = mac_stats->tx_packets;
1928 stats->rx_bytes = mac_stats->rx_bytes;
1929 stats->tx_bytes = mac_stats->tx_bytes;
1930 stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1931 stats->multicast = mac_stats->rx_multicast;
1932 stats->collisions = mac_stats->tx_collision;
1933 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1934 mac_stats->rx_length_error);
1935 stats->rx_crc_errors = mac_stats->rx_bad;
1936 stats->rx_frame_errors = mac_stats->rx_align_error;
1937 stats->rx_fifo_errors = mac_stats->rx_overflow;
1938 stats->rx_missed_errors = mac_stats->rx_missed;
1939 stats->tx_window_errors = mac_stats->tx_late_collision;
1940
1941 stats->rx_errors = (stats->rx_length_errors +
1942 stats->rx_crc_errors +
1943 stats->rx_frame_errors +
1944 mac_stats->rx_symbol_error);
1945 stats->tx_errors = (stats->tx_window_errors +
1946 mac_stats->tx_bad);
1947
1948 spin_unlock_bh(&efx->stats_lock); 1988 spin_unlock_bh(&efx->stats_lock);
1949 1989
1950 return stats; 1990 return stats;
@@ -2018,30 +2058,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
2018static void efx_set_rx_mode(struct net_device *net_dev) 2058static void efx_set_rx_mode(struct net_device *net_dev)
2019{ 2059{
2020 struct efx_nic *efx = netdev_priv(net_dev); 2060 struct efx_nic *efx = netdev_priv(net_dev);
2021 struct netdev_hw_addr *ha;
2022 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2023 u32 crc;
2024 int bit;
2025
2026 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
2027
2028 /* Build multicast hash table */
2029 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
2030 memset(mc_hash, 0xff, sizeof(*mc_hash));
2031 } else {
2032 memset(mc_hash, 0x00, sizeof(*mc_hash));
2033 netdev_for_each_mc_addr(ha, net_dev) {
2034 crc = ether_crc_le(ETH_ALEN, ha->addr);
2035 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2036 __set_bit_le(bit, mc_hash);
2037 }
2038
2039 /* Broadcast packets go through the multicast hash filter.
2040 * ether_crc_le() of the broadcast address is 0xbe2612ff
2041 * so we always add bit 0xff to the mask.
2042 */
2043 __set_bit_le(0xff, mc_hash);
2044 }
2045 2061
2046 if (efx->port_enabled) 2062 if (efx->port_enabled)
2047 queue_work(efx->workqueue, &efx->mac_work); 2063 queue_work(efx->workqueue, &efx->mac_work);
@@ -2059,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2059 return 0; 2075 return 0;
2060} 2076}
2061 2077
2062static const struct net_device_ops efx_netdev_ops = { 2078static const struct net_device_ops efx_farch_netdev_ops = {
2063 .ndo_open = efx_net_open, 2079 .ndo_open = efx_net_open,
2064 .ndo_stop = efx_net_stop, 2080 .ndo_stop = efx_net_stop,
2065 .ndo_get_stats64 = efx_net_stats, 2081 .ndo_get_stats64 = efx_net_stats,
@@ -2086,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
2086#endif 2102#endif
2087}; 2103};
2088 2104
2105static const struct net_device_ops efx_ef10_netdev_ops = {
2106 .ndo_open = efx_net_open,
2107 .ndo_stop = efx_net_stop,
2108 .ndo_get_stats64 = efx_net_stats,
2109 .ndo_tx_timeout = efx_watchdog,
2110 .ndo_start_xmit = efx_hard_start_xmit,
2111 .ndo_validate_addr = eth_validate_addr,
2112 .ndo_do_ioctl = efx_ioctl,
2113 .ndo_change_mtu = efx_change_mtu,
2114 .ndo_set_mac_address = efx_set_mac_address,
2115 .ndo_set_rx_mode = efx_set_rx_mode,
2116 .ndo_set_features = efx_set_features,
2117#ifdef CONFIG_NET_POLL_CONTROLLER
2118 .ndo_poll_controller = efx_netpoll,
2119#endif
2120#ifdef CONFIG_RFS_ACCEL
2121 .ndo_rx_flow_steer = efx_filter_rfs,
2122#endif
2123};
2124
2089static void efx_update_name(struct efx_nic *efx) 2125static void efx_update_name(struct efx_nic *efx)
2090{ 2126{
2091 strcpy(efx->name, efx->net_dev->name); 2127 strcpy(efx->name, efx->net_dev->name);
@@ -2098,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
2098{ 2134{
2099 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2135 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2100 2136
2101 if (net_dev->netdev_ops == &efx_netdev_ops && 2137 if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
2138 net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
2102 event == NETDEV_CHANGENAME) 2139 event == NETDEV_CHANGENAME)
2103 efx_update_name(netdev_priv(net_dev)); 2140 efx_update_name(netdev_priv(net_dev));
2104 2141
@@ -2125,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
2125 2162
2126 net_dev->watchdog_timeo = 5 * HZ; 2163 net_dev->watchdog_timeo = 5 * HZ;
2127 net_dev->irq = efx->pci_dev->irq; 2164 net_dev->irq = efx->pci_dev->irq;
2128 net_dev->netdev_ops = &efx_netdev_ops; 2165 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
2166 net_dev->netdev_ops = &efx_ef10_netdev_ops;
2167 net_dev->priv_flags |= IFF_UNICAST_FLT;
2168 } else {
2169 net_dev->netdev_ops = &efx_farch_netdev_ops;
2170 }
2129 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2171 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2130 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2172 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2131 2173
@@ -2185,22 +2227,11 @@ fail_locked:
2185 2227
2186static void efx_unregister_netdev(struct efx_nic *efx) 2228static void efx_unregister_netdev(struct efx_nic *efx)
2187{ 2229{
2188 struct efx_channel *channel;
2189 struct efx_tx_queue *tx_queue;
2190
2191 if (!efx->net_dev) 2230 if (!efx->net_dev)
2192 return; 2231 return;
2193 2232
2194 BUG_ON(netdev_priv(efx->net_dev) != efx); 2233 BUG_ON(netdev_priv(efx->net_dev) != efx);
2195 2234
2196 /* Free up any skbs still remaining. This has to happen before
2197 * we try to unregister the netdev as running their destructors
2198 * may be needed to get the device ref. count to 0. */
2199 efx_for_each_channel(channel, efx) {
2200 efx_for_each_channel_tx_queue(tx_queue, channel)
2201 efx_release_tx_buffers(tx_queue);
2202 }
2203
2204 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2235 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2205 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2236 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2206 2237
@@ -2223,7 +2254,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2223 EFX_ASSERT_RESET_SERIALISED(efx); 2254 EFX_ASSERT_RESET_SERIALISED(efx);
2224 2255
2225 efx_stop_all(efx); 2256 efx_stop_all(efx);
2226 efx_stop_interrupts(efx, false); 2257 efx_disable_interrupts(efx);
2227 2258
2228 mutex_lock(&efx->mac_lock); 2259 mutex_lock(&efx->mac_lock);
2229 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2260 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2260,9 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2260 "could not restore PHY settings\n"); 2291 "could not restore PHY settings\n");
2261 } 2292 }
2262 2293
2263 efx->type->reconfigure_mac(efx); 2294 rc = efx_enable_interrupts(efx);
2264 2295 if (rc)
2265 efx_start_interrupts(efx, false); 2296 goto fail;
2266 efx_restore_filters(efx); 2297 efx_restore_filters(efx);
2267 efx_sriov_reset(efx); 2298 efx_sriov_reset(efx);
2268 2299
@@ -2458,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2458 .driver_data = (unsigned long) &siena_a0_nic_type}, 2489 .driver_data = (unsigned long) &siena_a0_nic_type},
2459 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 2490 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
2460 .driver_data = (unsigned long) &siena_a0_nic_type}, 2491 .driver_data = (unsigned long) &siena_a0_nic_type},
2492 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
2493 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2461 {0} /* end of list */ 2494 {0} /* end of list */
2462}; 2495};
2463 2496
@@ -2516,6 +2549,9 @@ static int efx_init_struct(struct efx_nic *efx,
2516 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2549 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2517 2550
2518 efx->net_dev = net_dev; 2551 efx->net_dev = net_dev;
2552 efx->rx_prefix_size = efx->type->rx_prefix_size;
2553 efx->rx_packet_hash_offset =
2554 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2519 spin_lock_init(&efx->stats_lock); 2555 spin_lock_init(&efx->stats_lock);
2520 mutex_init(&efx->mac_lock); 2556 mutex_init(&efx->mac_lock);
2521 efx->phy_op = &efx_dummy_phy_operations; 2557 efx->phy_op = &efx_dummy_phy_operations;
@@ -2527,10 +2563,10 @@ static int efx_init_struct(struct efx_nic *efx,
2527 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2563 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2528 if (!efx->channel[i]) 2564 if (!efx->channel[i])
2529 goto fail; 2565 goto fail;
2566 efx->msi_context[i].efx = efx;
2567 efx->msi_context[i].index = i;
2530 } 2568 }
2531 2569
2532 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2533
2534 /* Higher numbered interrupt modes are less capable! */ 2570 /* Higher numbered interrupt modes are less capable! */
2535 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2571 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2536 interrupt_mode); 2572 interrupt_mode);
@@ -2579,7 +2615,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
2579 BUG_ON(efx->state == STATE_READY); 2615 BUG_ON(efx->state == STATE_READY);
2580 cancel_work_sync(&efx->reset_work); 2616 cancel_work_sync(&efx->reset_work);
2581 2617
2582 efx_stop_interrupts(efx, false); 2618 efx_disable_interrupts(efx);
2583 efx_nic_fini_interrupt(efx); 2619 efx_nic_fini_interrupt(efx);
2584 efx_fini_port(efx); 2620 efx_fini_port(efx);
2585 efx->type->fini(efx); 2621 efx->type->fini(efx);
@@ -2601,7 +2637,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2601 /* Mark the NIC as fini, then stop the interface */ 2637 /* Mark the NIC as fini, then stop the interface */
2602 rtnl_lock(); 2638 rtnl_lock();
2603 dev_close(efx->net_dev); 2639 dev_close(efx->net_dev);
2604 efx_stop_interrupts(efx, false); 2640 efx_disable_interrupts(efx);
2605 rtnl_unlock(); 2641 rtnl_unlock();
2606 2642
2607 efx_sriov_fini(efx); 2643 efx_sriov_fini(efx);
@@ -2703,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2703 rc = efx_nic_init_interrupt(efx); 2739 rc = efx_nic_init_interrupt(efx);
2704 if (rc) 2740 if (rc)
2705 goto fail5; 2741 goto fail5;
2706 efx_start_interrupts(efx, false); 2742 rc = efx_enable_interrupts(efx);
2743 if (rc)
2744 goto fail6;
2707 2745
2708 return 0; 2746 return 0;
2709 2747
2748 fail6:
2749 efx_nic_fini_interrupt(efx);
2710 fail5: 2750 fail5:
2711 efx_fini_port(efx); 2751 efx_fini_port(efx);
2712 fail4: 2752 fail4:
@@ -2824,7 +2864,7 @@ static int efx_pm_freeze(struct device *dev)
2824 efx_device_detach_sync(efx); 2864 efx_device_detach_sync(efx);
2825 2865
2826 efx_stop_all(efx); 2866 efx_stop_all(efx);
2827 efx_stop_interrupts(efx, false); 2867 efx_disable_interrupts(efx);
2828 } 2868 }
2829 2869
2830 rtnl_unlock(); 2870 rtnl_unlock();
@@ -2834,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
2834 2874
2835static int efx_pm_thaw(struct device *dev) 2875static int efx_pm_thaw(struct device *dev)
2836{ 2876{
2877 int rc;
2837 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2878 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2838 2879
2839 rtnl_lock(); 2880 rtnl_lock();
2840 2881
2841 if (efx->state != STATE_DISABLED) { 2882 if (efx->state != STATE_DISABLED) {
2842 efx_start_interrupts(efx, false); 2883 rc = efx_enable_interrupts(efx);
2884 if (rc)
2885 goto fail;
2843 2886
2844 mutex_lock(&efx->mac_lock); 2887 mutex_lock(&efx->mac_lock);
2845 efx->phy_op->reconfigure(efx); 2888 efx->phy_op->reconfigure(efx);
@@ -2860,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
2860 queue_work(reset_workqueue, &efx->reset_work); 2903 queue_work(reset_workqueue, &efx->reset_work);
2861 2904
2862 return 0; 2905 return 0;
2906
2907fail:
2908 rtnl_unlock();
2909
2910 return rc;
2863} 2911}
2864 2912
2865static int efx_pm_poweroff(struct device *dev) 2913static int efx_pm_poweroff(struct device *dev)
@@ -2896,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
2896 rc = efx->type->init(efx); 2944 rc = efx->type->init(efx);
2897 if (rc) 2945 if (rc)
2898 return rc; 2946 return rc;
2899 efx_pm_thaw(dev); 2947 rc = efx_pm_thaw(dev);
2900 return 0; 2948 return rc;
2901} 2949}
2902 2950
2903static int efx_pm_suspend(struct device *dev) 2951static int efx_pm_suspend(struct device *dev)
@@ -2942,7 +2990,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
2942 efx_device_detach_sync(efx); 2990 efx_device_detach_sync(efx);
2943 2991
2944 efx_stop_all(efx); 2992 efx_stop_all(efx);
2945 efx_stop_interrupts(efx, false); 2993 efx_disable_interrupts(efx);
2946 2994
2947 status = PCI_ERS_RESULT_NEED_RESET; 2995 status = PCI_ERS_RESULT_NEED_RESET;
2948 } else { 2996 } else {
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index bdb30bbb0c97..34d00f5771fe 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
24extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 24extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
25extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 25extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
26extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
27extern netdev_tx_t 26extern netdev_tx_t
28efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 27efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
29extern netdev_tx_t 28extern netdev_tx_t
@@ -69,27 +68,99 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
69#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 68#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
70 69
71/* Filters */ 70/* Filters */
72extern int efx_probe_filters(struct efx_nic *efx); 71
73extern void efx_restore_filters(struct efx_nic *efx); 72/**
74extern void efx_remove_filters(struct efx_nic *efx); 73 * efx_filter_insert_filter - add or replace a filter
75extern void efx_filter_update_rx_scatter(struct efx_nic *efx); 74 * @efx: NIC in which to insert the filter
76extern s32 efx_filter_insert_filter(struct efx_nic *efx, 75 * @spec: Specification for the filter
77 struct efx_filter_spec *spec, 76 * @replace_equal: Flag for whether the specified filter may replace an
78 bool replace); 77 * existing filter with equal priority
79extern int efx_filter_remove_id_safe(struct efx_nic *efx, 78 *
80 enum efx_filter_priority priority, 79 * On success, return the filter ID.
81 u32 filter_id); 80 * On failure, return a negative error code.
82extern int efx_filter_get_filter_safe(struct efx_nic *efx, 81 *
83 enum efx_filter_priority priority, 82 * If existing filters have equal match values to the new filter spec,
84 u32 filter_id, struct efx_filter_spec *); 83 * then the new filter might replace them or the function might fail,
85extern void efx_filter_clear_rx(struct efx_nic *efx, 84 * as follows.
86 enum efx_filter_priority priority); 85 *
87extern u32 efx_filter_count_rx_used(struct efx_nic *efx, 86 * 1. If the existing filters have lower priority, or @replace_equal
88 enum efx_filter_priority priority); 87 * is set and they have equal priority, replace them.
89extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); 88 *
90extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, 89 * 2. If the existing filters have higher priority, return -%EPERM.
91 enum efx_filter_priority priority, 90 *
92 u32 *buf, u32 size); 91 * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
92 * support delivery to multiple recipients, return -%EEXIST.
93 *
94 * This implies that filters for multiple multicast recipients must
95 * all be inserted with the same priority and @replace_equal = %false.
96 */
97static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
98 struct efx_filter_spec *spec,
99 bool replace_equal)
100{
101 return efx->type->filter_insert(efx, spec, replace_equal);
102}
103
104/**
105 * efx_filter_remove_id_safe - remove a filter by ID, carefully
106 * @efx: NIC from which to remove the filter
107 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
108 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
109 *
110 * This function will range-check @filter_id, so it is safe to call
111 * with a value passed from userland.
112 */
113static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
114 enum efx_filter_priority priority,
115 u32 filter_id)
116{
117 return efx->type->filter_remove_safe(efx, priority, filter_id);
118}
119
120/**
121 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
122 * @efx: NIC from which to remove the filter
123 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
124 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
125 * @spec: Buffer in which to store filter specification
126 *
127 * This function will range-check @filter_id, so it is safe to call
128 * with a value passed from userland.
129 */
130static inline int
131efx_filter_get_filter_safe(struct efx_nic *efx,
132 enum efx_filter_priority priority,
133 u32 filter_id, struct efx_filter_spec *spec)
134{
135 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
136}
137
138/**
139 * efx_farch_filter_clear_rx - remove RX filters by priority
140 * @efx: NIC from which to remove the filters
141 * @priority: Maximum priority to remove
142 */
143static inline void efx_filter_clear_rx(struct efx_nic *efx,
144 enum efx_filter_priority priority)
145{
146 return efx->type->filter_clear_rx(efx, priority);
147}
148
149static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
150 enum efx_filter_priority priority)
151{
152 return efx->type->filter_count_rx_used(efx, priority);
153}
154static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
155{
156 return efx->type->filter_get_rx_id_limit(efx);
157}
158static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
159 enum efx_filter_priority priority,
160 u32 *buf, u32 size)
161{
162 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
163}
93#ifdef CONFIG_RFS_ACCEL 164#ifdef CONFIG_RFS_ACCEL
94extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 165extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
95 u16 rxq_index, u32 flow_id); 166 u16 rxq_index, u32 flow_id);
@@ -105,11 +176,11 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
105static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} 176static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
106#define efx_filter_rfs_enabled() 0 177#define efx_filter_rfs_enabled() 0
107#endif 178#endif
179extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
108 180
109/* Channels */ 181/* Channels */
110extern int efx_channel_dummy_op_int(struct efx_channel *channel); 182extern int efx_channel_dummy_op_int(struct efx_channel *channel);
111extern void efx_channel_dummy_op_void(struct efx_channel *channel); 183extern void efx_channel_dummy_op_void(struct efx_channel *channel);
112extern void efx_process_channel_now(struct efx_channel *channel);
113extern int 184extern int
114efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); 185efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
115 186
@@ -141,7 +212,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
141 212
142/* MTD */ 213/* MTD */
143#ifdef CONFIG_SFC_MTD 214#ifdef CONFIG_SFC_MTD
144extern int efx_mtd_probe(struct efx_nic *efx); 215extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
216 size_t n_parts, size_t sizeof_part);
217static inline int efx_mtd_probe(struct efx_nic *efx)
218{
219 return efx->type->mtd_probe(efx);
220}
145extern void efx_mtd_rename(struct efx_nic *efx); 221extern void efx_mtd_rename(struct efx_nic *efx);
146extern void efx_mtd_remove(struct efx_nic *efx); 222extern void efx_mtd_remove(struct efx_nic *efx);
147#else 223#else
@@ -155,7 +231,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
155 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 231 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
156 "channel %d scheduling NAPI poll on CPU%d\n", 232 "channel %d scheduling NAPI poll on CPU%d\n",
157 channel->channel, raw_smp_processor_id()); 233 channel->channel, raw_smp_processor_id());
158 channel->work_pending = true;
159 234
160 napi_schedule(&channel->napi_str); 235 napi_schedule(&channel->napi_str);
161} 236}
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index ab8fb5889e55..7fdfee019092 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,7 @@ enum efx_loopback_mode {
147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
148 * @RESET_TYPE_INT_ERROR: reset due to internal error 148 * @RESET_TYPE_INT_ERROR: reset due to internal error
149 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors 149 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
150 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch 150 * @RESET_TYPE_DMA_ERROR: DMA error
151 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
152 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors 151 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
153 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion 152 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
154 */ 153 */
@@ -163,8 +162,7 @@ enum reset_type {
163 RESET_TYPE_TX_WATCHDOG, 162 RESET_TYPE_TX_WATCHDOG,
164 RESET_TYPE_INT_ERROR, 163 RESET_TYPE_INT_ERROR,
165 RESET_TYPE_RX_RECOVERY, 164 RESET_TYPE_RX_RECOVERY,
166 RESET_TYPE_RX_DESC_FETCH, 165 RESET_TYPE_DMA_ERROR,
167 RESET_TYPE_TX_DESC_FETCH,
168 RESET_TYPE_TX_SKIP, 166 RESET_TYPE_TX_SKIP,
169 RESET_TYPE_MC_FAILURE, 167 RESET_TYPE_MC_FAILURE,
170 RESET_TYPE_MAX, 168 RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1fc21458413d..5b471cf5c323 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -19,14 +19,9 @@
19#include "filter.h" 19#include "filter.h"
20#include "nic.h" 20#include "nic.h"
21 21
22struct ethtool_string { 22struct efx_sw_stat_desc {
23 char name[ETH_GSTRING_LEN];
24};
25
26struct efx_ethtool_stat {
27 const char *name; 23 const char *name;
28 enum { 24 enum {
29 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 EFX_ETHTOOL_STAT_SOURCE_nic, 25 EFX_ETHTOOL_STAT_SOURCE_nic,
31 EFX_ETHTOOL_STAT_SOURCE_channel, 26 EFX_ETHTOOL_STAT_SOURCE_channel,
32 EFX_ETHTOOL_STAT_SOURCE_tx_queue 27 EFX_ETHTOOL_STAT_SOURCE_tx_queue
@@ -35,7 +30,7 @@ struct efx_ethtool_stat {
35 u64(*get_stat) (void *field); /* Reader function */ 30 u64(*get_stat) (void *field); /* Reader function */
36}; 31};
37 32
38/* Initialiser for a struct #efx_ethtool_stat with type-checking */ 33/* Initialiser for a struct efx_sw_stat_desc with type-checking */
39#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ 34#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
40 get_stat_function) { \ 35 get_stat_function) { \
41 .name = #stat_name, \ 36 .name = #stat_name, \
@@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
52 return *(unsigned int *)field; 47 return *(unsigned int *)field;
53} 48}
54 49
55static u64 efx_get_u64_stat(void *field)
56{
57 return *(u64 *) field;
58}
59
60static u64 efx_get_atomic_stat(void *field) 50static u64 efx_get_atomic_stat(void *field)
61{ 51{
62 return atomic_read((atomic_t *) field); 52 return atomic_read((atomic_t *) field);
63} 53}
64 54
65#define EFX_ETHTOOL_U64_MAC_STAT(field) \
66 EFX_ETHTOOL_STAT(field, mac_stats, field, \
67 u64, efx_get_u64_stat)
68
69#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
70 EFX_ETHTOOL_STAT(name, nic, n_##name, \
71 unsigned int, efx_get_uint_stat)
72
73#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ 55#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
74 EFX_ETHTOOL_STAT(field, nic, field, \ 56 EFX_ETHTOOL_STAT(field, nic, field, \
75 atomic_t, efx_get_atomic_stat) 57 atomic_t, efx_get_atomic_stat)
@@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
82 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ 64 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
83 unsigned int, efx_get_uint_stat) 65 unsigned int, efx_get_uint_stat)
84 66
85static const struct efx_ethtool_stat efx_ethtool_stats[] = { 67static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
86 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 68 EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
87 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
88 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
89 EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
90 EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
91 EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
92 EFX_ETHTOOL_U64_MAC_STAT(tx_control),
93 EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
94 EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
95 EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
96 EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
97 EFX_ETHTOOL_U64_MAC_STAT(tx_64),
98 EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
99 EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
100 EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
101 EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
102 EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
103 EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
104 EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
105 EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
106 EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
107 EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
108 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
109 EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
110 EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
111 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
112 EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
113 EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
114 EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
115 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), 69 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
116 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
117 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
118 EFX_ETHTOOL_UINT_TXQ_STAT(pushes), 72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
122 EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
123 EFX_ETHTOOL_U64_MAC_STAT(rx_good),
124 EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
125 EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
126 EFX_ETHTOOL_U64_MAC_STAT(rx_control),
127 EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
128 EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
129 EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
130 EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
131 EFX_ETHTOOL_U64_MAC_STAT(rx_64),
132 EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
133 EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
134 EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
135 EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
136 EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
137 EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
138 EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
139 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
140 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
141 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
142 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
143 EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
144 EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
145 EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
146 EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
147 EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
148 EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
149 EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
150 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
151 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 73 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
152 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 74 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -155,10 +77,11 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 77 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 78 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), 79 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
80 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
81 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
158}; 82};
159 83
160/* Number of ethtool statistics */ 84#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
161#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
162 85
163#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 86#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
164 87
@@ -205,8 +128,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
205 efx->phy_op->get_settings(efx, ecmd); 128 efx->phy_op->get_settings(efx, ecmd);
206 mutex_unlock(&efx->mac_lock); 129 mutex_unlock(&efx->mac_lock);
207 130
208 /* GMAC does not support 1000Mbps HD */
209 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
210 /* Both MACs support pause frames (bidirectional and respond-only) */ 131 /* Both MACs support pause frames (bidirectional and respond-only) */
211 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 132 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
212 133
@@ -291,12 +212,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
291 * 212 *
292 * Fill in an individual self-test entry. 213 * Fill in an individual self-test entry.
293 */ 214 */
294static void efx_fill_test(unsigned int test_index, 215static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
295 struct ethtool_string *strings, u64 *data,
296 int *test, const char *unit_format, int unit_id, 216 int *test, const char *unit_format, int unit_id,
297 const char *test_format, const char *test_id) 217 const char *test_format, const char *test_id)
298{ 218{
299 struct ethtool_string unit_str, test_str; 219 char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
300 220
301 /* Fill data value, if applicable */ 221 /* Fill data value, if applicable */
302 if (data) 222 if (data)
@@ -305,15 +225,14 @@ static void efx_fill_test(unsigned int test_index,
305 /* Fill string, if applicable */ 225 /* Fill string, if applicable */
306 if (strings) { 226 if (strings) {
307 if (strchr(unit_format, '%')) 227 if (strchr(unit_format, '%'))
308 snprintf(unit_str.name, sizeof(unit_str.name), 228 snprintf(unit_str, sizeof(unit_str),
309 unit_format, unit_id); 229 unit_format, unit_id);
310 else 230 else
311 strcpy(unit_str.name, unit_format); 231 strcpy(unit_str, unit_format);
312 snprintf(test_str.name, sizeof(test_str.name), 232 snprintf(test_str, sizeof(test_str), test_format, test_id);
313 test_format, test_id); 233 snprintf(strings + test_index * ETH_GSTRING_LEN,
314 snprintf(strings[test_index].name, 234 ETH_GSTRING_LEN,
315 sizeof(strings[test_index].name), 235 "%-6s %-24s", unit_str, test_str);
316 "%-6s %-24s", unit_str.name, test_str.name);
317 } 236 }
318} 237}
319 238
@@ -336,7 +255,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
336 struct efx_loopback_self_tests *lb_tests, 255 struct efx_loopback_self_tests *lb_tests,
337 enum efx_loopback_mode mode, 256 enum efx_loopback_mode mode,
338 unsigned int test_index, 257 unsigned int test_index,
339 struct ethtool_string *strings, u64 *data) 258 u8 *strings, u64 *data)
340{ 259{
341 struct efx_channel *channel = 260 struct efx_channel *channel =
342 efx_get_channel(efx, efx->tx_channel_offset); 261 efx_get_channel(efx, efx->tx_channel_offset);
@@ -373,8 +292,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
373 */ 292 */
374static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 293static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
375 struct efx_self_tests *tests, 294 struct efx_self_tests *tests,
376 struct ethtool_string *strings, 295 u8 *strings, u64 *data)
377 u64 *data)
378{ 296{
379 struct efx_channel *channel; 297 struct efx_channel *channel;
380 unsigned int n = 0, i; 298 unsigned int n = 0, i;
@@ -433,12 +351,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
433static int efx_ethtool_get_sset_count(struct net_device *net_dev, 351static int efx_ethtool_get_sset_count(struct net_device *net_dev,
434 int string_set) 352 int string_set)
435{ 353{
354 struct efx_nic *efx = netdev_priv(net_dev);
355
436 switch (string_set) { 356 switch (string_set) {
437 case ETH_SS_STATS: 357 case ETH_SS_STATS:
438 return EFX_ETHTOOL_NUM_STATS; 358 return efx->type->describe_stats(efx, NULL) +
359 EFX_ETHTOOL_SW_STAT_COUNT;
439 case ETH_SS_TEST: 360 case ETH_SS_TEST:
440 return efx_ethtool_fill_self_tests(netdev_priv(net_dev), 361 return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
441 NULL, NULL, NULL);
442 default: 362 default:
443 return -EINVAL; 363 return -EINVAL;
444 } 364 }
@@ -448,20 +368,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
448 u32 string_set, u8 *strings) 368 u32 string_set, u8 *strings)
449{ 369{
450 struct efx_nic *efx = netdev_priv(net_dev); 370 struct efx_nic *efx = netdev_priv(net_dev);
451 struct ethtool_string *ethtool_strings =
452 (struct ethtool_string *)strings;
453 int i; 371 int i;
454 372
455 switch (string_set) { 373 switch (string_set) {
456 case ETH_SS_STATS: 374 case ETH_SS_STATS:
457 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 375 strings += (efx->type->describe_stats(efx, strings) *
458 strlcpy(ethtool_strings[i].name, 376 ETH_GSTRING_LEN);
459 efx_ethtool_stats[i].name, 377 for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
460 sizeof(ethtool_strings[i].name)); 378 strlcpy(strings + i * ETH_GSTRING_LEN,
379 efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
461 break; 380 break;
462 case ETH_SS_TEST: 381 case ETH_SS_TEST:
463 efx_ethtool_fill_self_tests(efx, NULL, 382 efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
464 ethtool_strings, NULL);
465 break; 383 break;
466 default: 384 default:
467 /* No other string sets */ 385 /* No other string sets */
@@ -474,27 +392,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
474 u64 *data) 392 u64 *data)
475{ 393{
476 struct efx_nic *efx = netdev_priv(net_dev); 394 struct efx_nic *efx = netdev_priv(net_dev);
477 struct efx_mac_stats *mac_stats = &efx->mac_stats; 395 const struct efx_sw_stat_desc *stat;
478 const struct efx_ethtool_stat *stat;
479 struct efx_channel *channel; 396 struct efx_channel *channel;
480 struct efx_tx_queue *tx_queue; 397 struct efx_tx_queue *tx_queue;
481 int i; 398 int i;
482 399
483 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
484
485 spin_lock_bh(&efx->stats_lock); 400 spin_lock_bh(&efx->stats_lock);
486 401
487 /* Update MAC and NIC statistics */ 402 /* Get NIC statistics */
488 efx->type->update_stats(efx); 403 data += efx->type->update_stats(efx, data, NULL);
489 404
490 /* Fill detailed statistics buffer */ 405 /* Get software statistics */
491 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { 406 for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
492 stat = &efx_ethtool_stats[i]; 407 stat = &efx_sw_stat_desc[i];
493 switch (stat->source) { 408 switch (stat->source) {
494 case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
495 data[i] = stat->get_stat((void *)mac_stats +
496 stat->offset);
497 break;
498 case EFX_ETHTOOL_STAT_SOURCE_nic: 409 case EFX_ETHTOOL_STAT_SOURCE_nic:
499 data[i] = stat->get_stat((void *)efx + stat->offset); 410 data[i] = stat->get_stat((void *)efx + stat->offset);
500 break; 411 break;
@@ -709,7 +620,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
709 struct efx_nic *efx = netdev_priv(net_dev); 620 struct efx_nic *efx = netdev_priv(net_dev);
710 u8 wanted_fc, old_fc; 621 u8 wanted_fc, old_fc;
711 u32 old_adv; 622 u32 old_adv;
712 bool reset;
713 int rc = 0; 623 int rc = 0;
714 624
715 mutex_lock(&efx->mac_lock); 625 mutex_lock(&efx->mac_lock);
@@ -732,24 +642,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
732 goto out; 642 goto out;
733 } 643 }
734 644
735 /* TX flow control may automatically turn itself off if the 645 /* Hook for Falcon bug 11482 workaround */
736 * link partner (intermittently) stops responding to pause 646 if (efx->type->prepare_enable_fc_tx &&
737 * frames. There isn't any indication that this has happened, 647 (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
738 * so the best we do is leave it up to the user to spot this 648 efx->type->prepare_enable_fc_tx(efx);
739 * and fix it be cycling transmit flow control on this end. */
740 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
741 if (EFX_WORKAROUND_11482(efx) && reset) {
742 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
743 /* Recover by resetting the EM block */
744 falcon_stop_nic_stats(efx);
745 falcon_drain_tx_fifo(efx);
746 falcon_reconfigure_xmac(efx);
747 falcon_start_nic_stats(efx);
748 } else {
749 /* Schedule a reset to recover */
750 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
751 }
752 }
753 649
754 old_adv = efx->link_advertising; 650 old_adv = efx->link_advertising;
755 old_fc = efx->wanted_fc; 651 old_fc = efx->wanted_fc;
@@ -814,11 +710,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
814 return efx_reset(efx, rc); 710 return efx_reset(efx, rc);
815} 711}
816 712
817/* MAC address mask including only MC flag */ 713/* MAC address mask including only I/G bit */
818static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 714static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
819 715
820#define IP4_ADDR_FULL_MASK ((__force __be32)~0) 716#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
821#define PORT_FULL_MASK ((__force __be16)~0) 717#define PORT_FULL_MASK ((__force __be16)~0)
718#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
822 719
823static int efx_ethtool_get_class_rule(struct efx_nic *efx, 720static int efx_ethtool_get_class_rule(struct efx_nic *efx,
824 struct ethtool_rx_flow_spec *rule) 721 struct ethtool_rx_flow_spec *rule)
@@ -828,8 +725,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
828 struct ethhdr *mac_entry = &rule->h_u.ether_spec; 725 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
829 struct ethhdr *mac_mask = &rule->m_u.ether_spec; 726 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
830 struct efx_filter_spec spec; 727 struct efx_filter_spec spec;
831 u16 vid;
832 u8 proto;
833 int rc; 728 int rc;
834 729
835 rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, 730 rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -837,44 +732,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
837 if (rc) 732 if (rc)
838 return rc; 733 return rc;
839 734
840 if (spec.dmaq_id == 0xfff) 735 if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
841 rule->ring_cookie = RX_CLS_FLOW_DISC; 736 rule->ring_cookie = RX_CLS_FLOW_DISC;
842 else 737 else
843 rule->ring_cookie = spec.dmaq_id; 738 rule->ring_cookie = spec.dmaq_id;
844 739
845 if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) { 740 if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
846 rule->flow_type = ETHER_FLOW; 741 spec.ether_type == htons(ETH_P_IP) &&
847 memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN); 742 (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
848 if (spec.type == EFX_FILTER_MC_DEF) 743 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
849 memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN); 744 !(spec.match_flags &
850 return 0; 745 ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
851 } 746 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
852 747 EFX_FILTER_MATCH_IP_PROTO |
853 rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest); 748 EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
854 if (rc == 0) { 749 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
750 TCP_V4_FLOW : UDP_V4_FLOW);
751 if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
752 ip_entry->ip4dst = spec.loc_host[0];
753 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
754 }
755 if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
756 ip_entry->ip4src = spec.rem_host[0];
757 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
758 }
759 if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
760 ip_entry->pdst = spec.loc_port;
761 ip_mask->pdst = PORT_FULL_MASK;
762 }
763 if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
764 ip_entry->psrc = spec.rem_port;
765 ip_mask->psrc = PORT_FULL_MASK;
766 }
767 } else if (!(spec.match_flags &
768 ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
769 EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
770 EFX_FILTER_MATCH_OUTER_VID))) {
855 rule->flow_type = ETHER_FLOW; 771 rule->flow_type = ETHER_FLOW;
856 memset(mac_mask->h_dest, ~0, ETH_ALEN); 772 if (spec.match_flags &
857 if (vid != EFX_FILTER_VID_UNSPEC) { 773 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
858 rule->flow_type |= FLOW_EXT; 774 memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
859 rule->h_ext.vlan_tci = htons(vid); 775 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
860 rule->m_ext.vlan_tci = htons(0xfff); 776 memset(mac_mask->h_dest, ~0, ETH_ALEN);
777 else
778 memcpy(mac_mask->h_dest, mac_addr_ig_mask,
779 ETH_ALEN);
861 } 780 }
862 return 0; 781 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
782 memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
783 memset(mac_mask->h_source, ~0, ETH_ALEN);
784 }
785 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
786 mac_entry->h_proto = spec.ether_type;
787 mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
788 }
789 } else {
790 /* The above should handle all filters that we insert */
791 WARN_ON(1);
792 return -EINVAL;
863 } 793 }
864 794
865 rc = efx_filter_get_ipv4_local(&spec, &proto, 795 if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
866 &ip_entry->ip4dst, &ip_entry->pdst); 796 rule->flow_type |= FLOW_EXT;
867 if (rc != 0) { 797 rule->h_ext.vlan_tci = spec.outer_vid;
868 rc = efx_filter_get_ipv4_full( 798 rule->m_ext.vlan_tci = htons(0xfff);
869 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
870 &ip_entry->ip4src, &ip_entry->psrc);
871 EFX_WARN_ON_PARANOID(rc);
872 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
873 ip_mask->psrc = PORT_FULL_MASK;
874 } 799 }
875 rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; 800
876 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
877 ip_mask->pdst = PORT_FULL_MASK;
878 return rc; 801 return rc;
879} 802}
880 803
@@ -982,82 +905,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
982 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 905 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
983 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 906 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
984 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 907 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
985 0xfff : rule->ring_cookie); 908 EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
986 909
987 switch (rule->flow_type) { 910 switch (rule->flow_type & ~FLOW_EXT) {
988 case TCP_V4_FLOW: 911 case TCP_V4_FLOW:
989 case UDP_V4_FLOW: { 912 case UDP_V4_FLOW:
990 u8 proto = (rule->flow_type == TCP_V4_FLOW ? 913 spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
991 IPPROTO_TCP : IPPROTO_UDP); 914 EFX_FILTER_MATCH_IP_PROTO);
992 915 spec.ether_type = htons(ETH_P_IP);
993 /* Must match all of destination, */ 916 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
994 if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK && 917 IPPROTO_TCP : IPPROTO_UDP);
995 ip_mask->pdst == PORT_FULL_MASK)) 918 if (ip_mask->ip4dst) {
996 return -EINVAL; 919 if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
997 /* all or none of source, */ 920 return -EINVAL;
998 if ((ip_mask->ip4src || ip_mask->psrc) && 921 spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
999 !(ip_mask->ip4src == IP4_ADDR_FULL_MASK && 922 spec.loc_host[0] = ip_entry->ip4dst;
1000 ip_mask->psrc == PORT_FULL_MASK)) 923 }
1001 return -EINVAL; 924 if (ip_mask->ip4src) {
1002 /* and nothing else */ 925 if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
1003 if (ip_mask->tos || rule->m_ext.vlan_tci) 926 return -EINVAL;
927 spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
928 spec.rem_host[0] = ip_entry->ip4src;
929 }
930 if (ip_mask->pdst) {
931 if (ip_mask->pdst != PORT_FULL_MASK)
932 return -EINVAL;
933 spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
934 spec.loc_port = ip_entry->pdst;
935 }
936 if (ip_mask->psrc) {
937 if (ip_mask->psrc != PORT_FULL_MASK)
938 return -EINVAL;
939 spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
940 spec.rem_port = ip_entry->psrc;
941 }
942 if (ip_mask->tos)
1004 return -EINVAL; 943 return -EINVAL;
1005
1006 if (ip_mask->ip4src)
1007 rc = efx_filter_set_ipv4_full(&spec, proto,
1008 ip_entry->ip4dst,
1009 ip_entry->pdst,
1010 ip_entry->ip4src,
1011 ip_entry->psrc);
1012 else
1013 rc = efx_filter_set_ipv4_local(&spec, proto,
1014 ip_entry->ip4dst,
1015 ip_entry->pdst);
1016 if (rc)
1017 return rc;
1018 break; 944 break;
1019 }
1020
1021 case ETHER_FLOW | FLOW_EXT:
1022 case ETHER_FLOW: {
1023 u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
1024 ntohs(rule->m_ext.vlan_tci) : 0);
1025
1026 /* Must not match on source address or Ethertype */
1027 if (!is_zero_ether_addr(mac_mask->h_source) ||
1028 mac_mask->h_proto)
1029 return -EINVAL;
1030 945
1031 /* Is it a default UC or MC filter? */ 946 case ETHER_FLOW:
1032 if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) && 947 if (!is_zero_ether_addr(mac_mask->h_dest)) {
1033 vlan_tag_mask == 0) { 948 if (ether_addr_equal(mac_mask->h_dest,
1034 if (is_multicast_ether_addr(mac_entry->h_dest)) 949 mac_addr_ig_mask))
1035 rc = efx_filter_set_mc_def(&spec); 950 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
951 else if (is_broadcast_ether_addr(mac_mask->h_dest))
952 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
1036 else 953 else
1037 rc = efx_filter_set_uc_def(&spec); 954 return -EINVAL;
955 memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
1038 } 956 }
1039 /* Otherwise, it must match all of destination and all 957 if (!is_zero_ether_addr(mac_mask->h_source)) {
1040 * or none of VID. 958 if (!is_broadcast_ether_addr(mac_mask->h_source))
1041 */ 959 return -EINVAL;
1042 else if (is_broadcast_ether_addr(mac_mask->h_dest) && 960 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
1043 (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) { 961 memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
1044 rc = efx_filter_set_eth_local( 962 }
1045 &spec, 963 if (mac_mask->h_proto) {
1046 vlan_tag_mask ? 964 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
1047 ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, 965 return -EINVAL;
1048 mac_entry->h_dest); 966 spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1049 } else { 967 spec.ether_type = mac_entry->h_proto;
1050 rc = -EINVAL;
1051 } 968 }
1052 if (rc)
1053 return rc;
1054 break; 969 break;
1055 }
1056 970
1057 default: 971 default:
1058 return -EINVAL; 972 return -EINVAL;
1059 } 973 }
1060 974
975 if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
976 if (rule->m_ext.vlan_tci != htons(0xfff))
977 return -EINVAL;
978 spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
979 spec.outer_vid = rule->h_ext.vlan_tci;
980 }
981
1061 rc = efx_filter_insert_filter(efx, &spec, true); 982 rc = efx_filter_insert_filter(efx, &spec, true);
1062 if (rc < 0) 983 if (rc < 0)
1063 return rc; 984 return rc;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 71998e7995d9..ff5d322b9b49 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -19,17 +19,284 @@
19#include "net_driver.h" 19#include "net_driver.h"
20#include "bitfield.h" 20#include "bitfield.h"
21#include "efx.h" 21#include "efx.h"
22#include "spi.h"
23#include "nic.h" 22#include "nic.h"
24#include "regs.h" 23#include "farch_regs.h"
25#include "io.h" 24#include "io.h"
26#include "phy.h" 25#include "phy.h"
27#include "workarounds.h" 26#include "workarounds.h"
28#include "selftest.h" 27#include "selftest.h"
28#include "mdio_10g.h"
29 29
30/* Hardware control for SFC4000 (aka Falcon). */ 30/* Hardware control for SFC4000 (aka Falcon). */
31 31
32/**************************************************************************
33 *
34 * NIC stats
35 *
36 **************************************************************************
37 */
38
39#define FALCON_MAC_STATS_SIZE 0x100
40
41#define XgRxOctets_offset 0x0
42#define XgRxOctets_WIDTH 48
43#define XgRxOctetsOK_offset 0x8
44#define XgRxOctetsOK_WIDTH 48
45#define XgRxPkts_offset 0x10
46#define XgRxPkts_WIDTH 32
47#define XgRxPktsOK_offset 0x14
48#define XgRxPktsOK_WIDTH 32
49#define XgRxBroadcastPkts_offset 0x18
50#define XgRxBroadcastPkts_WIDTH 32
51#define XgRxMulticastPkts_offset 0x1C
52#define XgRxMulticastPkts_WIDTH 32
53#define XgRxUnicastPkts_offset 0x20
54#define XgRxUnicastPkts_WIDTH 32
55#define XgRxUndersizePkts_offset 0x24
56#define XgRxUndersizePkts_WIDTH 32
57#define XgRxOversizePkts_offset 0x28
58#define XgRxOversizePkts_WIDTH 32
59#define XgRxJabberPkts_offset 0x2C
60#define XgRxJabberPkts_WIDTH 32
61#define XgRxUndersizeFCSerrorPkts_offset 0x30
62#define XgRxUndersizeFCSerrorPkts_WIDTH 32
63#define XgRxDropEvents_offset 0x34
64#define XgRxDropEvents_WIDTH 32
65#define XgRxFCSerrorPkts_offset 0x38
66#define XgRxFCSerrorPkts_WIDTH 32
67#define XgRxAlignError_offset 0x3C
68#define XgRxAlignError_WIDTH 32
69#define XgRxSymbolError_offset 0x40
70#define XgRxSymbolError_WIDTH 32
71#define XgRxInternalMACError_offset 0x44
72#define XgRxInternalMACError_WIDTH 32
73#define XgRxControlPkts_offset 0x48
74#define XgRxControlPkts_WIDTH 32
75#define XgRxPausePkts_offset 0x4C
76#define XgRxPausePkts_WIDTH 32
77#define XgRxPkts64Octets_offset 0x50
78#define XgRxPkts64Octets_WIDTH 32
79#define XgRxPkts65to127Octets_offset 0x54
80#define XgRxPkts65to127Octets_WIDTH 32
81#define XgRxPkts128to255Octets_offset 0x58
82#define XgRxPkts128to255Octets_WIDTH 32
83#define XgRxPkts256to511Octets_offset 0x5C
84#define XgRxPkts256to511Octets_WIDTH 32
85#define XgRxPkts512to1023Octets_offset 0x60
86#define XgRxPkts512to1023Octets_WIDTH 32
87#define XgRxPkts1024to15xxOctets_offset 0x64
88#define XgRxPkts1024to15xxOctets_WIDTH 32
89#define XgRxPkts15xxtoMaxOctets_offset 0x68
90#define XgRxPkts15xxtoMaxOctets_WIDTH 32
91#define XgRxLengthError_offset 0x6C
92#define XgRxLengthError_WIDTH 32
93#define XgTxPkts_offset 0x80
94#define XgTxPkts_WIDTH 32
95#define XgTxOctets_offset 0x88
96#define XgTxOctets_WIDTH 48
97#define XgTxMulticastPkts_offset 0x90
98#define XgTxMulticastPkts_WIDTH 32
99#define XgTxBroadcastPkts_offset 0x94
100#define XgTxBroadcastPkts_WIDTH 32
101#define XgTxUnicastPkts_offset 0x98
102#define XgTxUnicastPkts_WIDTH 32
103#define XgTxControlPkts_offset 0x9C
104#define XgTxControlPkts_WIDTH 32
105#define XgTxPausePkts_offset 0xA0
106#define XgTxPausePkts_WIDTH 32
107#define XgTxPkts64Octets_offset 0xA4
108#define XgTxPkts64Octets_WIDTH 32
109#define XgTxPkts65to127Octets_offset 0xA8
110#define XgTxPkts65to127Octets_WIDTH 32
111#define XgTxPkts128to255Octets_offset 0xAC
112#define XgTxPkts128to255Octets_WIDTH 32
113#define XgTxPkts256to511Octets_offset 0xB0
114#define XgTxPkts256to511Octets_WIDTH 32
115#define XgTxPkts512to1023Octets_offset 0xB4
116#define XgTxPkts512to1023Octets_WIDTH 32
117#define XgTxPkts1024to15xxOctets_offset 0xB8
118#define XgTxPkts1024to15xxOctets_WIDTH 32
119#define XgTxPkts1519toMaxOctets_offset 0xBC
120#define XgTxPkts1519toMaxOctets_WIDTH 32
121#define XgTxUndersizePkts_offset 0xC0
122#define XgTxUndersizePkts_WIDTH 32
123#define XgTxOversizePkts_offset 0xC4
124#define XgTxOversizePkts_WIDTH 32
125#define XgTxNonTcpUdpPkt_offset 0xC8
126#define XgTxNonTcpUdpPkt_WIDTH 16
127#define XgTxMacSrcErrPkt_offset 0xCC
128#define XgTxMacSrcErrPkt_WIDTH 16
129#define XgTxIpSrcErrPkt_offset 0xD0
130#define XgTxIpSrcErrPkt_WIDTH 16
131#define XgDmaDone_offset 0xD4
132#define XgDmaDone_WIDTH 32
133
134#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
135 (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
136
137#define FALCON_DMA_STAT(ext_name, hw_name) \
138 [FALCON_STAT_ ## ext_name] = \
139 { #ext_name, \
140 /* 48-bit stats are zero-padded to 64 on DMA */ \
141 hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
142 hw_name ## _ ## offset }
143#define FALCON_OTHER_STAT(ext_name) \
144 [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
145
146static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
147 FALCON_DMA_STAT(tx_bytes, XgTxOctets),
148 FALCON_DMA_STAT(tx_packets, XgTxPkts),
149 FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
150 FALCON_DMA_STAT(tx_control, XgTxControlPkts),
151 FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
152 FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
153 FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
154 FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
155 FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
156 FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
157 FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
158 FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
159 FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
160 FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
161 FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
162 FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
163 FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
164 FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
165 FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
166 FALCON_DMA_STAT(rx_bytes, XgRxOctets),
167 FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
168 FALCON_OTHER_STAT(rx_bad_bytes),
169 FALCON_DMA_STAT(rx_packets, XgRxPkts),
170 FALCON_DMA_STAT(rx_good, XgRxPktsOK),
171 FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
172 FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
173 FALCON_DMA_STAT(rx_control, XgRxControlPkts),
174 FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
175 FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
176 FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
177 FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
178 FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
179 FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
180 FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
181 FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
182 FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
183 FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
184 FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
185 FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
186 FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
187 FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
188 FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
189 FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
190 FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
191 FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
192 FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
193 FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
194};
195static const unsigned long falcon_stat_mask[] = {
196 [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
197};
198
199/**************************************************************************
200 *
201 * Basic SPI command set and bit definitions
202 *
203 *************************************************************************/
204
205#define SPI_WRSR 0x01 /* Write status register */
206#define SPI_WRITE 0x02 /* Write data to memory array */
207#define SPI_READ 0x03 /* Read data from memory array */
208#define SPI_WRDI 0x04 /* Reset write enable latch */
209#define SPI_RDSR 0x05 /* Read status register */
210#define SPI_WREN 0x06 /* Set write enable latch */
211#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
212
213#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
214#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
215#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
216#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
217#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
218#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
219
220/**************************************************************************
221 *
222 * Non-volatile memory layout
223 *
224 **************************************************************************
225 */
226
227/* SFC4000 flash is partitioned into:
228 * 0-0x400 chip and board config (see struct falcon_nvconfig)
229 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
230 * 0x8000-end boot code (mapped to PCI expansion ROM)
231 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
232 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
233 * 0-0x400 chip and board config
234 * configurable VPD
235 * 0x800-0x1800 boot config
236 * Aside from the chip and board config, all of these are optional and may
237 * be absent or truncated depending on the devices used.
238 */
239#define FALCON_NVCONFIG_END 0x400U
240#define FALCON_FLASH_BOOTCODE_START 0x8000U
241#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
242#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
243
244/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
245struct falcon_nvconfig_board_v2 {
246 __le16 nports;
247 u8 port0_phy_addr;
248 u8 port0_phy_type;
249 u8 port1_phy_addr;
250 u8 port1_phy_type;
251 __le16 asic_sub_revision;
252 __le16 board_revision;
253} __packed;
254
255/* Board configuration v3 extra information */
256struct falcon_nvconfig_board_v3 {
257 __le32 spi_device_type[2];
258} __packed;
259
260/* Bit numbers for spi_device_type */
261#define SPI_DEV_TYPE_SIZE_LBN 0
262#define SPI_DEV_TYPE_SIZE_WIDTH 5
263#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
264#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
265#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
266#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
267#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
268#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
269#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
270#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
271#define SPI_DEV_TYPE_FIELD(type, field) \
272 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
273
274#define FALCON_NVCONFIG_OFFSET 0x300
275
276#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
277struct falcon_nvconfig {
278 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
279 u8 mac_address[2][8]; /* 0x310 */
280 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
281 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
282 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
283 efx_oword_t hw_init_reg; /* 0x350 */
284 efx_oword_t nic_stat_reg; /* 0x360 */
285 efx_oword_t glb_ctl_reg; /* 0x370 */
286 efx_oword_t srm_cfg_reg; /* 0x380 */
287 efx_oword_t spare_reg; /* 0x390 */
288 __le16 board_magic_num; /* 0x3A0 */
289 __le16 board_struct_ver;
290 __le16 board_checksum;
291 struct falcon_nvconfig_board_v2 board_v2;
292 efx_oword_t ee_base_page_reg; /* 0x3B0 */
293 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
294} __packed;
295
296/*************************************************************************/
297
32static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 298static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
299static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
33 300
34static const unsigned int 301static const unsigned int
35/* "Large" EEPROM device: Atmel AT25640 or similar 302/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -146,7 +413,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
146 * 413 *
147 * NB most hardware supports MSI interrupts 414 * NB most hardware supports MSI interrupts
148 */ 415 */
149inline void falcon_irq_ack_a1(struct efx_nic *efx) 416static inline void falcon_irq_ack_a1(struct efx_nic *efx)
150{ 417{
151 efx_dword_t reg; 418 efx_dword_t reg;
152 419
@@ -156,7 +423,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
156} 423}
157 424
158 425
159irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 426static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
160{ 427{
161 struct efx_nic *efx = dev_id; 428 struct efx_nic *efx = dev_id;
162 efx_oword_t *int_ker = efx->irq_status.addr; 429 efx_oword_t *int_ker = efx->irq_status.addr;
@@ -177,10 +444,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
177 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 444 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
178 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 445 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
179 446
447 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
448 return IRQ_HANDLED;
449
180 /* Check to see if we have a serious error condition */ 450 /* Check to see if we have a serious error condition */
181 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 451 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
182 if (unlikely(syserr)) 452 if (unlikely(syserr))
183 return efx_nic_fatal_interrupt(efx); 453 return efx_farch_fatal_interrupt(efx);
184 454
185 /* Determine interrupting queues, clear interrupt status 455 /* Determine interrupting queues, clear interrupt status
186 * register and acknowledge the device interrupt. 456 * register and acknowledge the device interrupt.
@@ -241,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
241 } 511 }
242} 512}
243 513
244int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, 514static int
245 unsigned int command, int address, 515falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
246 const void *in, void *out, size_t len) 516 unsigned int command, int address,
517 const void *in, void *out, size_t len)
247{ 518{
248 bool addressed = (address >= 0); 519 bool addressed = (address >= 0);
249 bool reading = (out != NULL); 520 bool reading = (out != NULL);
@@ -297,48 +568,16 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
297 return 0; 568 return 0;
298} 569}
299 570
300static size_t
301falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
302{
303 return min(FALCON_SPI_MAX_LEN,
304 (spi->block_size - (start & (spi->block_size - 1))));
305}
306
307static inline u8 571static inline u8
308efx_spi_munge_command(const struct efx_spi_device *spi, 572falcon_spi_munge_command(const struct falcon_spi_device *spi,
309 const u8 command, const unsigned int address) 573 const u8 command, const unsigned int address)
310{ 574{
311 return command | (((address >> 8) & spi->munge_address) << 3); 575 return command | (((address >> 8) & spi->munge_address) << 3);
312} 576}
313 577
314/* Wait up to 10 ms for buffered write completion */ 578static int
315int 579falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
316falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) 580 loff_t start, size_t len, size_t *retlen, u8 *buffer)
317{
318 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
319 u8 status;
320 int rc;
321
322 for (;;) {
323 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
324 &status, sizeof(status));
325 if (rc)
326 return rc;
327 if (!(status & SPI_STATUS_NRDY))
328 return 0;
329 if (time_after_eq(jiffies, timeout)) {
330 netif_err(efx, hw, efx->net_dev,
331 "SPI write timeout on device %d"
332 " last status=0x%02x\n",
333 spi->device_id, status);
334 return -ETIMEDOUT;
335 }
336 schedule_timeout_uninterruptible(1);
337 }
338}
339
340int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
341 loff_t start, size_t len, size_t *retlen, u8 *buffer)
342{ 581{
343 size_t block_len, pos = 0; 582 size_t block_len, pos = 0;
344 unsigned int command; 583 unsigned int command;
@@ -347,7 +586,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
347 while (pos < len) { 586 while (pos < len) {
348 block_len = min(len - pos, FALCON_SPI_MAX_LEN); 587 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
349 588
350 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 589 command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
351 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, 590 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
352 buffer + pos, block_len); 591 buffer + pos, block_len);
353 if (rc) 592 if (rc)
@@ -367,8 +606,52 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
367 return rc; 606 return rc;
368} 607}
369 608
370int 609#ifdef CONFIG_SFC_MTD
371falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, 610
611struct falcon_mtd_partition {
612 struct efx_mtd_partition common;
613 const struct falcon_spi_device *spi;
614 size_t offset;
615};
616
617#define to_falcon_mtd_partition(mtd) \
618 container_of(mtd, struct falcon_mtd_partition, common.mtd)
619
620static size_t
621falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
622{
623 return min(FALCON_SPI_MAX_LEN,
624 (spi->block_size - (start & (spi->block_size - 1))));
625}
626
627/* Wait up to 10 ms for buffered write completion */
628static int
629falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
630{
631 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
632 u8 status;
633 int rc;
634
635 for (;;) {
636 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
637 &status, sizeof(status));
638 if (rc)
639 return rc;
640 if (!(status & SPI_STATUS_NRDY))
641 return 0;
642 if (time_after_eq(jiffies, timeout)) {
643 netif_err(efx, hw, efx->net_dev,
644 "SPI write timeout on device %d"
645 " last status=0x%02x\n",
646 spi->device_id, status);
647 return -ETIMEDOUT;
648 }
649 schedule_timeout_uninterruptible(1);
650 }
651}
652
653static int
654falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
372 loff_t start, size_t len, size_t *retlen, const u8 *buffer) 655 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
373{ 656{
374 u8 verify_buffer[FALCON_SPI_MAX_LEN]; 657 u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -383,7 +666,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
383 666
384 block_len = min(len - pos, 667 block_len = min(len - pos,
385 falcon_spi_write_limit(spi, start + pos)); 668 falcon_spi_write_limit(spi, start + pos));
386 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); 669 command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
387 rc = falcon_spi_cmd(efx, spi, command, start + pos, 670 rc = falcon_spi_cmd(efx, spi, command, start + pos,
388 buffer + pos, NULL, block_len); 671 buffer + pos, NULL, block_len);
389 if (rc) 672 if (rc)
@@ -393,7 +676,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
393 if (rc) 676 if (rc)
394 break; 677 break;
395 678
396 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 679 command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
397 rc = falcon_spi_cmd(efx, spi, command, start + pos, 680 rc = falcon_spi_cmd(efx, spi, command, start + pos,
398 NULL, verify_buffer, block_len); 681 NULL, verify_buffer, block_len);
399 if (memcmp(verify_buffer, buffer + pos, block_len)) { 682 if (memcmp(verify_buffer, buffer + pos, block_len)) {
@@ -416,6 +699,520 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
416 return rc; 699 return rc;
417} 700}
418 701
702static int
703falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
704{
705 const struct falcon_spi_device *spi = part->spi;
706 struct efx_nic *efx = part->common.mtd.priv;
707 u8 status;
708 int rc, i;
709
710 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
711 for (i = 0; i < 40; i++) {
712 __set_current_state(uninterruptible ?
713 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
714 schedule_timeout(HZ / 10);
715 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
716 &status, sizeof(status));
717 if (rc)
718 return rc;
719 if (!(status & SPI_STATUS_NRDY))
720 return 0;
721 if (signal_pending(current))
722 return -EINTR;
723 }
724 pr_err("%s: timed out waiting for %s\n",
725 part->common.name, part->common.dev_type_name);
726 return -ETIMEDOUT;
727}
728
729static int
730falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
731{
732 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
733 SPI_STATUS_BP0);
734 u8 status;
735 int rc;
736
737 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
738 &status, sizeof(status));
739 if (rc)
740 return rc;
741
742 if (!(status & unlock_mask))
743 return 0; /* already unlocked */
744
745 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
746 if (rc)
747 return rc;
748 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
749 if (rc)
750 return rc;
751
752 status &= ~unlock_mask;
753 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
754 NULL, sizeof(status));
755 if (rc)
756 return rc;
757 rc = falcon_spi_wait_write(efx, spi);
758 if (rc)
759 return rc;
760
761 return 0;
762}
763
764#define FALCON_SPI_VERIFY_BUF_LEN 16
765
766static int
767falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
768{
769 const struct falcon_spi_device *spi = part->spi;
770 struct efx_nic *efx = part->common.mtd.priv;
771 unsigned pos, block_len;
772 u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
773 u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
774 int rc;
775
776 if (len != spi->erase_size)
777 return -EINVAL;
778
779 if (spi->erase_command == 0)
780 return -EOPNOTSUPP;
781
782 rc = falcon_spi_unlock(efx, spi);
783 if (rc)
784 return rc;
785 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
786 if (rc)
787 return rc;
788 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
789 NULL, 0);
790 if (rc)
791 return rc;
792 rc = falcon_spi_slow_wait(part, false);
793
794 /* Verify the entire region has been wiped */
795 memset(empty, 0xff, sizeof(empty));
796 for (pos = 0; pos < len; pos += block_len) {
797 block_len = min(len - pos, sizeof(buffer));
798 rc = falcon_spi_read(efx, spi, start + pos, block_len,
799 NULL, buffer);
800 if (rc)
801 return rc;
802 if (memcmp(empty, buffer, block_len))
803 return -EIO;
804
805 /* Avoid locking up the system */
806 cond_resched();
807 if (signal_pending(current))
808 return -EINTR;
809 }
810
811 return rc;
812}
813
814static void falcon_mtd_rename(struct efx_mtd_partition *part)
815{
816 struct efx_nic *efx = part->mtd.priv;
817
818 snprintf(part->name, sizeof(part->name), "%s %s",
819 efx->name, part->type_name);
820}
821
822static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
823 size_t len, size_t *retlen, u8 *buffer)
824{
825 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
826 struct efx_nic *efx = mtd->priv;
827 struct falcon_nic_data *nic_data = efx->nic_data;
828 int rc;
829
830 rc = mutex_lock_interruptible(&nic_data->spi_lock);
831 if (rc)
832 return rc;
833 rc = falcon_spi_read(efx, part->spi, part->offset + start,
834 len, retlen, buffer);
835 mutex_unlock(&nic_data->spi_lock);
836 return rc;
837}
838
839static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
840{
841 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
842 struct efx_nic *efx = mtd->priv;
843 struct falcon_nic_data *nic_data = efx->nic_data;
844 int rc;
845
846 rc = mutex_lock_interruptible(&nic_data->spi_lock);
847 if (rc)
848 return rc;
849 rc = falcon_spi_erase(part, part->offset + start, len);
850 mutex_unlock(&nic_data->spi_lock);
851 return rc;
852}
853
854static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
855 size_t len, size_t *retlen, const u8 *buffer)
856{
857 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
858 struct efx_nic *efx = mtd->priv;
859 struct falcon_nic_data *nic_data = efx->nic_data;
860 int rc;
861
862 rc = mutex_lock_interruptible(&nic_data->spi_lock);
863 if (rc)
864 return rc;
865 rc = falcon_spi_write(efx, part->spi, part->offset + start,
866 len, retlen, buffer);
867 mutex_unlock(&nic_data->spi_lock);
868 return rc;
869}
870
871static int falcon_mtd_sync(struct mtd_info *mtd)
872{
873 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
874 struct efx_nic *efx = mtd->priv;
875 struct falcon_nic_data *nic_data = efx->nic_data;
876 int rc;
877
878 mutex_lock(&nic_data->spi_lock);
879 rc = falcon_spi_slow_wait(part, true);
880 mutex_unlock(&nic_data->spi_lock);
881 return rc;
882}
883
884static int falcon_mtd_probe(struct efx_nic *efx)
885{
886 struct falcon_nic_data *nic_data = efx->nic_data;
887 struct falcon_mtd_partition *parts;
888 struct falcon_spi_device *spi;
889 size_t n_parts;
890 int rc = -ENODEV;
891
892 ASSERT_RTNL();
893
894 /* Allocate space for maximum number of partitions */
895 parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
896 if (!parts)
897 return -ENOMEM;
898 n_parts = 0;
899
900 spi = &nic_data->spi_flash;
901 if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
902 parts[n_parts].spi = spi;
903 parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
904 parts[n_parts].common.dev_type_name = "flash";
905 parts[n_parts].common.type_name = "sfc_flash_bootrom";
906 parts[n_parts].common.mtd.type = MTD_NORFLASH;
907 parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
908 parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
909 parts[n_parts].common.mtd.erasesize = spi->erase_size;
910 n_parts++;
911 }
912
913 spi = &nic_data->spi_eeprom;
914 if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
915 parts[n_parts].spi = spi;
916 parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
917 parts[n_parts].common.dev_type_name = "EEPROM";
918 parts[n_parts].common.type_name = "sfc_bootconfig";
919 parts[n_parts].common.mtd.type = MTD_RAM;
920 parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
921 parts[n_parts].common.mtd.size =
922 min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
923 FALCON_EEPROM_BOOTCONFIG_START;
924 parts[n_parts].common.mtd.erasesize = spi->erase_size;
925 n_parts++;
926 }
927
928 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
929 if (rc)
930 kfree(parts);
931 return rc;
932}
933
934#endif /* CONFIG_SFC_MTD */
935
936/**************************************************************************
937 *
938 * XMAC operations
939 *
940 **************************************************************************
941 */
942
943/* Configure the XAUI driver that is an output from Falcon */
944static void falcon_setup_xaui(struct efx_nic *efx)
945{
946 efx_oword_t sdctl, txdrv;
947
948 /* Move the XAUI into low power, unless there is no PHY, in
949 * which case the XAUI will have to drive a cable. */
950 if (efx->phy_type == PHY_TYPE_NONE)
951 return;
952
953 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
954 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
955 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
956 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
957 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
958 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
959 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
960 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
961 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
962 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
963
964 EFX_POPULATE_OWORD_8(txdrv,
965 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
966 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
967 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
968 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
969 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
970 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
971 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
972 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
973 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
974}
975
976int falcon_reset_xaui(struct efx_nic *efx)
977{
978 struct falcon_nic_data *nic_data = efx->nic_data;
979 efx_oword_t reg;
980 int count;
981
982 /* Don't fetch MAC statistics over an XMAC reset */
983 WARN_ON(nic_data->stats_disable_count == 0);
984
985 /* Start reset sequence */
986 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
987 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
988
989 /* Wait up to 10 ms for completion, then reinitialise */
990 for (count = 0; count < 1000; count++) {
991 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
992 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
993 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
994 falcon_setup_xaui(efx);
995 return 0;
996 }
997 udelay(10);
998 }
999 netif_err(efx, hw, efx->net_dev,
1000 "timed out waiting for XAUI/XGXS reset\n");
1001 return -ETIMEDOUT;
1002}
1003
1004static void falcon_ack_status_intr(struct efx_nic *efx)
1005{
1006 struct falcon_nic_data *nic_data = efx->nic_data;
1007 efx_oword_t reg;
1008
1009 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1010 return;
1011
1012 /* We expect xgmii faults if the wireside link is down */
1013 if (!efx->link_state.up)
1014 return;
1015
1016 /* We can only use this interrupt to signal the negative edge of
1017 * xaui_align [we have to poll the positive edge]. */
1018 if (nic_data->xmac_poll_required)
1019 return;
1020
1021 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1022}
1023
1024static bool falcon_xgxs_link_ok(struct efx_nic *efx)
1025{
1026 efx_oword_t reg;
1027 bool align_done, link_ok = false;
1028 int sync_status;
1029
1030 /* Read link status */
1031 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1032
1033 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1034 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1035 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1036 link_ok = true;
1037
1038 /* Clear link status ready for next read */
1039 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1040 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1041 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1042 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1043
1044 return link_ok;
1045}
1046
1047static bool falcon_xmac_link_ok(struct efx_nic *efx)
1048{
1049 /*
1050 * Check MAC's XGXS link status except when using XGMII loopback
1051 * which bypasses the XGXS block.
1052 * If possible, check PHY's XGXS link status except when using
1053 * MAC loopback.
1054 */
1055 return (efx->loopback_mode == LOOPBACK_XGMII ||
1056 falcon_xgxs_link_ok(efx)) &&
1057 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1058 LOOPBACK_INTERNAL(efx) ||
1059 efx_mdio_phyxgxs_lane_sync(efx));
1060}
1061
1062static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
1063{
1064 unsigned int max_frame_len;
1065 efx_oword_t reg;
1066 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
1067 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
1068
1069 /* Configure MAC - cut-thru mode is hard wired on */
1070 EFX_POPULATE_OWORD_3(reg,
1071 FRF_AB_XM_RX_JUMBO_MODE, 1,
1072 FRF_AB_XM_TX_STAT_EN, 1,
1073 FRF_AB_XM_RX_STAT_EN, 1);
1074 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1075
1076 /* Configure TX */
1077 EFX_POPULATE_OWORD_6(reg,
1078 FRF_AB_XM_TXEN, 1,
1079 FRF_AB_XM_TX_PRMBL, 1,
1080 FRF_AB_XM_AUTO_PAD, 1,
1081 FRF_AB_XM_TXCRC, 1,
1082 FRF_AB_XM_FCNTL, tx_fc,
1083 FRF_AB_XM_IPG, 0x3);
1084 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1085
1086 /* Configure RX */
1087 EFX_POPULATE_OWORD_5(reg,
1088 FRF_AB_XM_RXEN, 1,
1089 FRF_AB_XM_AUTO_DEPAD, 0,
1090 FRF_AB_XM_ACPT_ALL_MCAST, 1,
1091 FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1092 FRF_AB_XM_PASS_CRC_ERR, 1);
1093 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1094
1095 /* Set frame length */
1096 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
1097 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1098 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1099 EFX_POPULATE_OWORD_2(reg,
1100 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1101 FRF_AB_XM_TX_JUMBO_MODE, 1);
1102 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1103
1104 EFX_POPULATE_OWORD_2(reg,
1105 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1106 FRF_AB_XM_DIS_FCNTL, !rx_fc);
1107 efx_writeo(efx, &reg, FR_AB_XM_FC);
1108
1109 /* Set MAC address */
1110 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1111 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1112 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1113 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1114}
1115
1116static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
1117{
1118 efx_oword_t reg;
1119 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1120 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1121 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1122 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1123
1124 /* XGXS block is flaky and will need to be reset if moving
1125 * into our out of XGMII, XGXS or XAUI loopbacks. */
1126 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1127 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1128 old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1129
1130 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
1131 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1132
1133 /* The PHY driver may have turned XAUI off */
1134 if ((xgxs_loopback != old_xgxs_loopback) ||
1135 (xaui_loopback != old_xaui_loopback) ||
1136 (xgmii_loopback != old_xgmii_loopback))
1137 falcon_reset_xaui(efx);
1138
1139 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1140 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1141 (xgxs_loopback || xaui_loopback) ?
1142 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1143 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1144 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1145 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1146
1147 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
1148 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1149 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1150 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1151 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1152 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1153}
1154
1155
1156/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
1157static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
1158{
1159 bool mac_up = falcon_xmac_link_ok(efx);
1160
1161 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1162 efx_phy_mode_disabled(efx->phy_mode))
1163 /* XAUI link is expected to be down */
1164 return mac_up;
1165
1166 falcon_stop_nic_stats(efx);
1167
1168 while (!mac_up && tries) {
1169 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1170 falcon_reset_xaui(efx);
1171 udelay(200);
1172
1173 mac_up = falcon_xmac_link_ok(efx);
1174 --tries;
1175 }
1176
1177 falcon_start_nic_stats(efx);
1178
1179 return mac_up;
1180}
1181
1182static bool falcon_xmac_check_fault(struct efx_nic *efx)
1183{
1184 return !falcon_xmac_link_ok_retry(efx, 5);
1185}
1186
1187static int falcon_reconfigure_xmac(struct efx_nic *efx)
1188{
1189 struct falcon_nic_data *nic_data = efx->nic_data;
1190
1191 efx_farch_filter_sync_rx_mode(efx);
1192
1193 falcon_reconfigure_xgxs_core(efx);
1194 falcon_reconfigure_xmac_core(efx);
1195
1196 falcon_reconfigure_mac_wrapper(efx);
1197
1198 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1199 falcon_ack_status_intr(efx);
1200
1201 return 0;
1202}
1203
1204static void falcon_poll_xmac(struct efx_nic *efx)
1205{
1206 struct falcon_nic_data *nic_data = efx->nic_data;
1207
1208 /* We expect xgmii faults if the wireside link is down */
1209 if (!efx->link_state.up || !nic_data->xmac_poll_required)
1210 return;
1211
1212 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1213 falcon_ack_status_intr(efx);
1214}
1215
419/************************************************************************** 1216/**************************************************************************
420 * 1217 *
421 * MAC wrapper 1218 * MAC wrapper
@@ -497,7 +1294,7 @@ static void falcon_reset_macs(struct efx_nic *efx)
497 falcon_setup_xaui(efx); 1294 falcon_setup_xaui(efx);
498} 1295}
499 1296
500void falcon_drain_tx_fifo(struct efx_nic *efx) 1297static void falcon_drain_tx_fifo(struct efx_nic *efx)
501{ 1298{
502 efx_oword_t reg; 1299 efx_oword_t reg;
503 1300
@@ -529,7 +1326,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
529 falcon_drain_tx_fifo(efx); 1326 falcon_drain_tx_fifo(efx);
530} 1327}
531 1328
532void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) 1329static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
533{ 1330{
534 struct efx_link_state *link_state = &efx->link_state; 1331 struct efx_link_state *link_state = &efx->link_state;
535 efx_oword_t reg; 1332 efx_oword_t reg;
@@ -550,7 +1347,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
550 EFX_POPULATE_OWORD_5(reg, 1347 EFX_POPULATE_OWORD_5(reg,
551 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, 1348 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
552 FRF_AB_MAC_BCAD_ACPT, 1, 1349 FRF_AB_MAC_BCAD_ACPT, 1,
553 FRF_AB_MAC_UC_PROM, efx->promiscuous, 1350 FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
554 FRF_AB_MAC_LINK_STATUS, 1, /* always set */ 1351 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
555 FRF_AB_MAC_SPEED, link_speed); 1352 FRF_AB_MAC_SPEED, link_speed);
556 /* On B0, MAC backpressure can be disabled and packets get 1353 /* On B0, MAC backpressure can be disabled and packets get
@@ -583,10 +1380,7 @@ static void falcon_stats_request(struct efx_nic *efx)
583 WARN_ON(nic_data->stats_pending); 1380 WARN_ON(nic_data->stats_pending);
584 WARN_ON(nic_data->stats_disable_count); 1381 WARN_ON(nic_data->stats_disable_count);
585 1382
586 if (nic_data->stats_dma_done == NULL) 1383 FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
587 return; /* no mac selected */
588
589 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
590 nic_data->stats_pending = true; 1384 nic_data->stats_pending = true;
591 wmb(); /* ensure done flag is clear */ 1385 wmb(); /* ensure done flag is clear */
592 1386
@@ -608,9 +1402,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
608 return; 1402 return;
609 1403
610 nic_data->stats_pending = false; 1404 nic_data->stats_pending = false;
611 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { 1405 if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
612 rmb(); /* read the done flag before the stats */ 1406 rmb(); /* read the done flag before the stats */
613 falcon_update_stats_xmac(efx); 1407 efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1408 falcon_stat_mask, nic_data->stats,
1409 efx->stats_buffer.addr, true);
614 } else { 1410 } else {
615 netif_err(efx, hw, efx->net_dev, 1411 netif_err(efx, hw, efx->net_dev,
616 "timed out waiting for statistics\n"); 1412 "timed out waiting for statistics\n");
@@ -678,6 +1474,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
678 return 0; 1474 return 0;
679} 1475}
680 1476
1477/* TX flow control may automatically turn itself off if the link
1478 * partner (intermittently) stops responding to pause frames. There
1479 * isn't any indication that this has happened, so the best we do is
1480 * leave it up to the user to spot this and fix it by cycling transmit
1481 * flow control on this end.
1482 */
1483
1484static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
1485{
1486 /* Schedule a reset to recover */
1487 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1488}
1489
1490static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
1491{
1492 /* Recover by resetting the EM block */
1493 falcon_stop_nic_stats(efx);
1494 falcon_drain_tx_fifo(efx);
1495 falcon_reconfigure_xmac(efx);
1496 falcon_start_nic_stats(efx);
1497}
1498
681/************************************************************************** 1499/**************************************************************************
682 * 1500 *
683 * PHY access via GMII 1501 * PHY access via GMII
@@ -861,7 +1679,7 @@ static int falcon_probe_port(struct efx_nic *efx)
861 1679
862 /* Allocate buffer for stats */ 1680 /* Allocate buffer for stats */
863 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 1681 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
864 FALCON_MAC_STATS_SIZE); 1682 FALCON_MAC_STATS_SIZE, GFP_KERNEL);
865 if (rc) 1683 if (rc)
866 return rc; 1684 return rc;
867 netif_dbg(efx, probe, efx->net_dev, 1685 netif_dbg(efx, probe, efx->net_dev,
@@ -869,7 +1687,6 @@ static int falcon_probe_port(struct efx_nic *efx)
869 (u64)efx->stats_buffer.dma_addr, 1687 (u64)efx->stats_buffer.dma_addr,
870 efx->stats_buffer.addr, 1688 efx->stats_buffer.addr,
871 (u64)virt_to_phys(efx->stats_buffer.addr)); 1689 (u64)virt_to_phys(efx->stats_buffer.addr));
872 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
873 1690
874 return 0; 1691 return 0;
875} 1692}
@@ -926,15 +1743,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
926{ 1743{
927 struct falcon_nic_data *nic_data = efx->nic_data; 1744 struct falcon_nic_data *nic_data = efx->nic_data;
928 struct falcon_nvconfig *nvconfig; 1745 struct falcon_nvconfig *nvconfig;
929 struct efx_spi_device *spi; 1746 struct falcon_spi_device *spi;
930 void *region; 1747 void *region;
931 int rc, magic_num, struct_ver; 1748 int rc, magic_num, struct_ver;
932 __le16 *word, *limit; 1749 __le16 *word, *limit;
933 u32 csum; 1750 u32 csum;
934 1751
935 if (efx_spi_present(&nic_data->spi_flash)) 1752 if (falcon_spi_present(&nic_data->spi_flash))
936 spi = &nic_data->spi_flash; 1753 spi = &nic_data->spi_flash;
937 else if (efx_spi_present(&nic_data->spi_eeprom)) 1754 else if (falcon_spi_present(&nic_data->spi_eeprom))
938 spi = &nic_data->spi_eeprom; 1755 spi = &nic_data->spi_eeprom;
939 else 1756 else
940 return -EINVAL; 1757 return -EINVAL;
@@ -949,7 +1766,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
949 mutex_unlock(&nic_data->spi_lock); 1766 mutex_unlock(&nic_data->spi_lock);
950 if (rc) { 1767 if (rc) {
951 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 1768 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
952 efx_spi_present(&nic_data->spi_flash) ? 1769 falcon_spi_present(&nic_data->spi_flash) ?
953 "flash" : "EEPROM"); 1770 "flash" : "EEPROM");
954 rc = -EIO; 1771 rc = -EIO;
955 goto out; 1772 goto out;
@@ -998,7 +1815,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
998 return falcon_read_nvram(efx, NULL); 1815 return falcon_read_nvram(efx, NULL);
999} 1816}
1000 1817
1001static const struct efx_nic_register_test falcon_b0_register_tests[] = { 1818static const struct efx_farch_register_test falcon_b0_register_tests[] = {
1002 { FR_AZ_ADR_REGION, 1819 { FR_AZ_ADR_REGION,
1003 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 1820 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1004 { FR_AZ_RX_CFG, 1821 { FR_AZ_RX_CFG,
@@ -1058,8 +1875,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1058 efx_reset_down(efx, reset_method); 1875 efx_reset_down(efx, reset_method);
1059 1876
1060 tests->registers = 1877 tests->registers =
1061 efx_nic_test_registers(efx, falcon_b0_register_tests, 1878 efx_farch_test_registers(efx, falcon_b0_register_tests,
1062 ARRAY_SIZE(falcon_b0_register_tests)) 1879 ARRAY_SIZE(falcon_b0_register_tests))
1063 ? -1 : 1; 1880 ? -1 : 1;
1064 1881
1065 rc = falcon_reset_hw(efx, reset_method); 1882 rc = falcon_reset_hw(efx, reset_method);
@@ -1078,8 +1895,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1078{ 1895{
1079 switch (reason) { 1896 switch (reason) {
1080 case RESET_TYPE_RX_RECOVERY: 1897 case RESET_TYPE_RX_RECOVERY:
1081 case RESET_TYPE_RX_DESC_FETCH: 1898 case RESET_TYPE_DMA_ERROR:
1082 case RESET_TYPE_TX_DESC_FETCH:
1083 case RESET_TYPE_TX_SKIP: 1899 case RESET_TYPE_TX_SKIP:
1084 /* These can occasionally occur due to hardware bugs. 1900 /* These can occasionally occur due to hardware bugs.
1085 * We try to reset without disrupting the link. 1901 * We try to reset without disrupting the link.
@@ -1294,7 +2110,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
1294} 2110}
1295 2111
1296static void falcon_spi_device_init(struct efx_nic *efx, 2112static void falcon_spi_device_init(struct efx_nic *efx,
1297 struct efx_spi_device *spi_device, 2113 struct falcon_spi_device *spi_device,
1298 unsigned int device_id, u32 device_type) 2114 unsigned int device_id, u32 device_type)
1299{ 2115{
1300 if (device_type != 0) { 2116 if (device_type != 0) {
@@ -1360,10 +2176,11 @@ out:
1360 return rc; 2176 return rc;
1361} 2177}
1362 2178
1363static void falcon_dimension_resources(struct efx_nic *efx) 2179static int falcon_dimension_resources(struct efx_nic *efx)
1364{ 2180{
1365 efx->rx_dc_base = 0x20000; 2181 efx->rx_dc_base = 0x20000;
1366 efx->tx_dc_base = 0x26000; 2182 efx->tx_dc_base = 0x26000;
2183 return 0;
1367} 2184}
1368 2185
1369/* Probe all SPI devices on the NIC */ 2186/* Probe all SPI devices on the NIC */
@@ -1410,6 +2227,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1410 large_eeprom_type); 2227 large_eeprom_type);
1411} 2228}
1412 2229
2230static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
2231{
2232 return 0x20000;
2233}
2234
2235static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
2236{
2237 /* Map everything up to and including the RSS indirection table.
2238 * The PCI core takes care of mapping the MSI-X tables.
2239 */
2240 return FR_BZ_RX_INDIRECTION_TBL +
2241 FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2242}
2243
1413static int falcon_probe_nic(struct efx_nic *efx) 2244static int falcon_probe_nic(struct efx_nic *efx)
1414{ 2245{
1415 struct falcon_nic_data *nic_data; 2246 struct falcon_nic_data *nic_data;
@@ -1424,7 +2255,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1424 2255
1425 rc = -ENODEV; 2256 rc = -ENODEV;
1426 2257
1427 if (efx_nic_fpga_ver(efx) != 0) { 2258 if (efx_farch_fpga_ver(efx) != 0) {
1428 netif_err(efx, probe, efx->net_dev, 2259 netif_err(efx, probe, efx->net_dev,
1429 "Falcon FPGA not supported\n"); 2260 "Falcon FPGA not supported\n");
1430 goto fail1; 2261 goto fail1;
@@ -1478,7 +2309,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1478 } 2309 }
1479 2310
1480 /* Allocate memory for INT_KER */ 2311 /* Allocate memory for INT_KER */
1481 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 2312 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
2313 GFP_KERNEL);
1482 if (rc) 2314 if (rc)
1483 goto fail4; 2315 goto fail4;
1484 BUG_ON(efx->irq_status.dma_addr & 0x0f); 2316 BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -1499,6 +2331,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1499 goto fail5; 2331 goto fail5;
1500 } 2332 }
1501 2333
2334 efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
2335 EFX_MAX_CHANNELS);
1502 efx->timer_quantum_ns = 4968; /* 621 cycles */ 2336 efx->timer_quantum_ns = 4968; /* 621 cycles */
1503 2337
1504 /* Initialise I2C adapter */ 2338 /* Initialise I2C adapter */
@@ -1657,7 +2491,7 @@ static int falcon_init_nic(struct efx_nic *efx)
1657 efx_writeo(efx, &temp, FR_BZ_DP_CTRL); 2491 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1658 } 2492 }
1659 2493
1660 efx_nic_init_common(efx); 2494 efx_farch_init_common(efx);
1661 2495
1662 return 0; 2496 return 0;
1663} 2497}
@@ -1688,24 +2522,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
1688 efx->nic_data = NULL; 2522 efx->nic_data = NULL;
1689} 2523}
1690 2524
1691static void falcon_update_nic_stats(struct efx_nic *efx) 2525static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
2526{
2527 return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2528 falcon_stat_mask, names);
2529}
2530
2531static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
2532 struct rtnl_link_stats64 *core_stats)
1692{ 2533{
1693 struct falcon_nic_data *nic_data = efx->nic_data; 2534 struct falcon_nic_data *nic_data = efx->nic_data;
2535 u64 *stats = nic_data->stats;
1694 efx_oword_t cnt; 2536 efx_oword_t cnt;
1695 2537
1696 if (nic_data->stats_disable_count) 2538 if (!nic_data->stats_disable_count) {
1697 return; 2539 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2540 stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2541 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2542
2543 if (nic_data->stats_pending &&
2544 FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2545 nic_data->stats_pending = false;
2546 rmb(); /* read the done flag before the stats */
2547 efx_nic_update_stats(
2548 falcon_stat_desc, FALCON_STAT_COUNT,
2549 falcon_stat_mask,
2550 stats, efx->stats_buffer.addr, true);
2551 }
1698 2552
1699 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); 2553 /* Update derived statistic */
1700 efx->n_rx_nodesc_drop_cnt += 2554 efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
1701 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); 2555 stats[FALCON_STAT_rx_bytes] -
2556 stats[FALCON_STAT_rx_good_bytes] -
2557 stats[FALCON_STAT_rx_control] * 64);
2558 }
1702 2559
1703 if (nic_data->stats_pending && 2560 if (full_stats)
1704 *nic_data->stats_dma_done == FALCON_STATS_DONE) { 2561 memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
1705 nic_data->stats_pending = false; 2562
1706 rmb(); /* read the done flag before the stats */ 2563 if (core_stats) {
1707 falcon_update_stats_xmac(efx); 2564 core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2565 core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2566 core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2567 core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2568 core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
2569 core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2570 core_stats->rx_length_errors =
2571 stats[FALCON_STAT_rx_gtjumbo] +
2572 stats[FALCON_STAT_rx_length_error];
2573 core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2574 core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2575 core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2576
2577 core_stats->rx_errors = (core_stats->rx_length_errors +
2578 core_stats->rx_crc_errors +
2579 core_stats->rx_frame_errors +
2580 stats[FALCON_STAT_rx_symbol_error]);
1708 } 2581 }
2582
2583 return FALCON_STAT_COUNT;
1709} 2584}
1710 2585
1711void falcon_start_nic_stats(struct efx_nic *efx) 2586void falcon_start_nic_stats(struct efx_nic *efx)
@@ -1734,7 +2609,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
1734 /* Wait enough time for the most recent transfer to 2609 /* Wait enough time for the most recent transfer to
1735 * complete. */ 2610 * complete. */
1736 for (i = 0; i < 4 && nic_data->stats_pending; i++) { 2611 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
1737 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) 2612 if (FALCON_XMAC_STATS_DMA_FLAG(efx))
1738 break; 2613 break;
1739 msleep(1); 2614 msleep(1);
1740 } 2615 }
@@ -1778,11 +2653,12 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
1778 */ 2653 */
1779 2654
1780const struct efx_nic_type falcon_a1_nic_type = { 2655const struct efx_nic_type falcon_a1_nic_type = {
2656 .mem_map_size = falcon_a1_mem_map_size,
1781 .probe = falcon_probe_nic, 2657 .probe = falcon_probe_nic,
1782 .remove = falcon_remove_nic, 2658 .remove = falcon_remove_nic,
1783 .init = falcon_init_nic, 2659 .init = falcon_init_nic,
1784 .dimension_resources = falcon_dimension_resources, 2660 .dimension_resources = falcon_dimension_resources,
1785 .fini = efx_port_dummy_op_void, 2661 .fini = falcon_irq_ack_a1,
1786 .monitor = falcon_monitor, 2662 .monitor = falcon_monitor,
1787 .map_reset_reason = falcon_map_reset_reason, 2663 .map_reset_reason = falcon_map_reset_reason,
1788 .map_reset_flags = falcon_map_reset_flags, 2664 .map_reset_flags = falcon_map_reset_flags,
@@ -1790,23 +2666,71 @@ const struct efx_nic_type falcon_a1_nic_type = {
1790 .probe_port = falcon_probe_port, 2666 .probe_port = falcon_probe_port,
1791 .remove_port = falcon_remove_port, 2667 .remove_port = falcon_remove_port,
1792 .handle_global_event = falcon_handle_global_event, 2668 .handle_global_event = falcon_handle_global_event,
2669 .fini_dmaq = efx_farch_fini_dmaq,
1793 .prepare_flush = falcon_prepare_flush, 2670 .prepare_flush = falcon_prepare_flush,
1794 .finish_flush = efx_port_dummy_op_void, 2671 .finish_flush = efx_port_dummy_op_void,
2672 .describe_stats = falcon_describe_nic_stats,
1795 .update_stats = falcon_update_nic_stats, 2673 .update_stats = falcon_update_nic_stats,
1796 .start_stats = falcon_start_nic_stats, 2674 .start_stats = falcon_start_nic_stats,
1797 .stop_stats = falcon_stop_nic_stats, 2675 .stop_stats = falcon_stop_nic_stats,
1798 .set_id_led = falcon_set_id_led, 2676 .set_id_led = falcon_set_id_led,
1799 .push_irq_moderation = falcon_push_irq_moderation, 2677 .push_irq_moderation = falcon_push_irq_moderation,
1800 .reconfigure_port = falcon_reconfigure_port, 2678 .reconfigure_port = falcon_reconfigure_port,
2679 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
1801 .reconfigure_mac = falcon_reconfigure_xmac, 2680 .reconfigure_mac = falcon_reconfigure_xmac,
1802 .check_mac_fault = falcon_xmac_check_fault, 2681 .check_mac_fault = falcon_xmac_check_fault,
1803 .get_wol = falcon_get_wol, 2682 .get_wol = falcon_get_wol,
1804 .set_wol = falcon_set_wol, 2683 .set_wol = falcon_set_wol,
1805 .resume_wol = efx_port_dummy_op_void, 2684 .resume_wol = efx_port_dummy_op_void,
1806 .test_nvram = falcon_test_nvram, 2685 .test_nvram = falcon_test_nvram,
2686 .irq_enable_master = efx_farch_irq_enable_master,
2687 .irq_test_generate = efx_farch_irq_test_generate,
2688 .irq_disable_non_ev = efx_farch_irq_disable_master,
2689 .irq_handle_msi = efx_farch_msi_interrupt,
2690 .irq_handle_legacy = falcon_legacy_interrupt_a1,
2691 .tx_probe = efx_farch_tx_probe,
2692 .tx_init = efx_farch_tx_init,
2693 .tx_remove = efx_farch_tx_remove,
2694 .tx_write = efx_farch_tx_write,
2695 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2696 .rx_probe = efx_farch_rx_probe,
2697 .rx_init = efx_farch_rx_init,
2698 .rx_remove = efx_farch_rx_remove,
2699 .rx_write = efx_farch_rx_write,
2700 .rx_defer_refill = efx_farch_rx_defer_refill,
2701 .ev_probe = efx_farch_ev_probe,
2702 .ev_init = efx_farch_ev_init,
2703 .ev_fini = efx_farch_ev_fini,
2704 .ev_remove = efx_farch_ev_remove,
2705 .ev_process = efx_farch_ev_process,
2706 .ev_read_ack = efx_farch_ev_read_ack,
2707 .ev_test_generate = efx_farch_ev_test_generate,
2708
2709 /* We don't expose the filter table on Falcon A1 as it is not
2710 * mapped into function 0, but these implementations still
2711 * work with a degenerate case of all tables set to size 0.
2712 */
2713 .filter_table_probe = efx_farch_filter_table_probe,
2714 .filter_table_restore = efx_farch_filter_table_restore,
2715 .filter_table_remove = efx_farch_filter_table_remove,
2716 .filter_insert = efx_farch_filter_insert,
2717 .filter_remove_safe = efx_farch_filter_remove_safe,
2718 .filter_get_safe = efx_farch_filter_get_safe,
2719 .filter_clear_rx = efx_farch_filter_clear_rx,
2720 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2721 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2722 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2723
2724#ifdef CONFIG_SFC_MTD
2725 .mtd_probe = falcon_mtd_probe,
2726 .mtd_rename = falcon_mtd_rename,
2727 .mtd_read = falcon_mtd_read,
2728 .mtd_erase = falcon_mtd_erase,
2729 .mtd_write = falcon_mtd_write,
2730 .mtd_sync = falcon_mtd_sync,
2731#endif
1807 2732
1808 .revision = EFX_REV_FALCON_A1, 2733 .revision = EFX_REV_FALCON_A1,
1809 .mem_map_size = 0x20000,
1810 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, 2734 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
1811 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, 2735 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
1812 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, 2736 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -1816,12 +2740,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
1816 .rx_buffer_padding = 0x24, 2740 .rx_buffer_padding = 0x24,
1817 .can_rx_scatter = false, 2741 .can_rx_scatter = false,
1818 .max_interrupt_mode = EFX_INT_MODE_MSI, 2742 .max_interrupt_mode = EFX_INT_MODE_MSI,
1819 .phys_addr_channels = 4,
1820 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2743 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1821 .offload_features = NETIF_F_IP_CSUM, 2744 .offload_features = NETIF_F_IP_CSUM,
2745 .mcdi_max_ver = -1,
1822}; 2746};
1823 2747
1824const struct efx_nic_type falcon_b0_nic_type = { 2748const struct efx_nic_type falcon_b0_nic_type = {
2749 .mem_map_size = falcon_b0_mem_map_size,
1825 .probe = falcon_probe_nic, 2750 .probe = falcon_probe_nic,
1826 .remove = falcon_remove_nic, 2751 .remove = falcon_remove_nic,
1827 .init = falcon_init_nic, 2752 .init = falcon_init_nic,
@@ -1834,14 +2759,17 @@ const struct efx_nic_type falcon_b0_nic_type = {
1834 .probe_port = falcon_probe_port, 2759 .probe_port = falcon_probe_port,
1835 .remove_port = falcon_remove_port, 2760 .remove_port = falcon_remove_port,
1836 .handle_global_event = falcon_handle_global_event, 2761 .handle_global_event = falcon_handle_global_event,
2762 .fini_dmaq = efx_farch_fini_dmaq,
1837 .prepare_flush = falcon_prepare_flush, 2763 .prepare_flush = falcon_prepare_flush,
1838 .finish_flush = efx_port_dummy_op_void, 2764 .finish_flush = efx_port_dummy_op_void,
2765 .describe_stats = falcon_describe_nic_stats,
1839 .update_stats = falcon_update_nic_stats, 2766 .update_stats = falcon_update_nic_stats,
1840 .start_stats = falcon_start_nic_stats, 2767 .start_stats = falcon_start_nic_stats,
1841 .stop_stats = falcon_stop_nic_stats, 2768 .stop_stats = falcon_stop_nic_stats,
1842 .set_id_led = falcon_set_id_led, 2769 .set_id_led = falcon_set_id_led,
1843 .push_irq_moderation = falcon_push_irq_moderation, 2770 .push_irq_moderation = falcon_push_irq_moderation,
1844 .reconfigure_port = falcon_reconfigure_port, 2771 .reconfigure_port = falcon_reconfigure_port,
2772 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
1845 .reconfigure_mac = falcon_reconfigure_xmac, 2773 .reconfigure_mac = falcon_reconfigure_xmac,
1846 .check_mac_fault = falcon_xmac_check_fault, 2774 .check_mac_fault = falcon_xmac_check_fault,
1847 .get_wol = falcon_get_wol, 2775 .get_wol = falcon_get_wol,
@@ -1849,28 +2777,67 @@ const struct efx_nic_type falcon_b0_nic_type = {
1849 .resume_wol = efx_port_dummy_op_void, 2777 .resume_wol = efx_port_dummy_op_void,
1850 .test_chip = falcon_b0_test_chip, 2778 .test_chip = falcon_b0_test_chip,
1851 .test_nvram = falcon_test_nvram, 2779 .test_nvram = falcon_test_nvram,
2780 .irq_enable_master = efx_farch_irq_enable_master,
2781 .irq_test_generate = efx_farch_irq_test_generate,
2782 .irq_disable_non_ev = efx_farch_irq_disable_master,
2783 .irq_handle_msi = efx_farch_msi_interrupt,
2784 .irq_handle_legacy = efx_farch_legacy_interrupt,
2785 .tx_probe = efx_farch_tx_probe,
2786 .tx_init = efx_farch_tx_init,
2787 .tx_remove = efx_farch_tx_remove,
2788 .tx_write = efx_farch_tx_write,
2789 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2790 .rx_probe = efx_farch_rx_probe,
2791 .rx_init = efx_farch_rx_init,
2792 .rx_remove = efx_farch_rx_remove,
2793 .rx_write = efx_farch_rx_write,
2794 .rx_defer_refill = efx_farch_rx_defer_refill,
2795 .ev_probe = efx_farch_ev_probe,
2796 .ev_init = efx_farch_ev_init,
2797 .ev_fini = efx_farch_ev_fini,
2798 .ev_remove = efx_farch_ev_remove,
2799 .ev_process = efx_farch_ev_process,
2800 .ev_read_ack = efx_farch_ev_read_ack,
2801 .ev_test_generate = efx_farch_ev_test_generate,
2802 .filter_table_probe = efx_farch_filter_table_probe,
2803 .filter_table_restore = efx_farch_filter_table_restore,
2804 .filter_table_remove = efx_farch_filter_table_remove,
2805 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
2806 .filter_insert = efx_farch_filter_insert,
2807 .filter_remove_safe = efx_farch_filter_remove_safe,
2808 .filter_get_safe = efx_farch_filter_get_safe,
2809 .filter_clear_rx = efx_farch_filter_clear_rx,
2810 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2811 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2812 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2813#ifdef CONFIG_RFS_ACCEL
2814 .filter_rfs_insert = efx_farch_filter_rfs_insert,
2815 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
2816#endif
2817#ifdef CONFIG_SFC_MTD
2818 .mtd_probe = falcon_mtd_probe,
2819 .mtd_rename = falcon_mtd_rename,
2820 .mtd_read = falcon_mtd_read,
2821 .mtd_erase = falcon_mtd_erase,
2822 .mtd_write = falcon_mtd_write,
2823 .mtd_sync = falcon_mtd_sync,
2824#endif
1852 2825
1853 .revision = EFX_REV_FALCON_B0, 2826 .revision = EFX_REV_FALCON_B0,
1854 /* Map everything up to and including the RSS indirection
1855 * table. Don't map MSI-X table, MSI-X PBA since Linux
1856 * requires that they not be mapped. */
1857 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
1858 FR_BZ_RX_INDIRECTION_TBL_STEP *
1859 FR_BZ_RX_INDIRECTION_TBL_ROWS),
1860 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 2827 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
1861 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 2828 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
1862 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 2829 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
1863 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 2830 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
1864 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 2831 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1865 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 2832 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1866 .rx_buffer_hash_size = 0x10, 2833 .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2834 .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
1867 .rx_buffer_padding = 0, 2835 .rx_buffer_padding = 0,
1868 .can_rx_scatter = true, 2836 .can_rx_scatter = true,
1869 .max_interrupt_mode = EFX_INT_MODE_MSIX, 2837 .max_interrupt_mode = EFX_INT_MODE_MSIX,
1870 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1871 * interrupt handler only supports 32
1872 * channels */
1873 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2838 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1874 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 2839 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2840 .mcdi_max_ver = -1,
2841 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
1875}; 2842};
1876 2843
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dcad..1736f4b806af 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2010 Solarflare Communications Inc. 3 * Copyright 2007-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
deleted file mode 100644
index 8333865d4c95..000000000000
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ /dev/null
@@ -1,362 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "nic.h"
15#include "regs.h"
16#include "io.h"
17#include "mdio_10g.h"
18#include "workarounds.h"
19
20/**************************************************************************
21 *
22 * MAC operations
23 *
24 *************************************************************************/
25
26/* Configure the XAUI driver that is an output from Falcon */
27void falcon_setup_xaui(struct efx_nic *efx)
28{
29 efx_oword_t sdctl, txdrv;
30
31 /* Move the XAUI into low power, unless there is no PHY, in
32 * which case the XAUI will have to drive a cable. */
33 if (efx->phy_type == PHY_TYPE_NONE)
34 return;
35
36 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
37 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
38 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
39 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
40 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
41 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
42 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
43 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
44 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
45 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
46
47 EFX_POPULATE_OWORD_8(txdrv,
48 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
49 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
50 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
51 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
52 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
53 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
54 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
55 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
56 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
57}
58
59int falcon_reset_xaui(struct efx_nic *efx)
60{
61 struct falcon_nic_data *nic_data = efx->nic_data;
62 efx_oword_t reg;
63 int count;
64
65 /* Don't fetch MAC statistics over an XMAC reset */
66 WARN_ON(nic_data->stats_disable_count == 0);
67
68 /* Start reset sequence */
69 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
70 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
71
72 /* Wait up to 10 ms for completion, then reinitialise */
73 for (count = 0; count < 1000; count++) {
74 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
75 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
76 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
77 falcon_setup_xaui(efx);
78 return 0;
79 }
80 udelay(10);
81 }
82 netif_err(efx, hw, efx->net_dev,
83 "timed out waiting for XAUI/XGXS reset\n");
84 return -ETIMEDOUT;
85}
86
87static void falcon_ack_status_intr(struct efx_nic *efx)
88{
89 struct falcon_nic_data *nic_data = efx->nic_data;
90 efx_oword_t reg;
91
92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
93 return;
94
95 /* We expect xgmii faults if the wireside link is down */
96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
97 return;
98
99 /* We can only use this interrupt to signal the negative edge of
100 * xaui_align [we have to poll the positive edge]. */
101 if (nic_data->xmac_poll_required)
102 return;
103
104 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
105}
106
107static bool falcon_xgxs_link_ok(struct efx_nic *efx)
108{
109 efx_oword_t reg;
110 bool align_done, link_ok = false;
111 int sync_status;
112
113 /* Read link status */
114 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
115
116 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
117 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
118 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
119 link_ok = true;
120
121 /* Clear link status ready for next read */
122 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
123 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
124 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
125 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
126
127 return link_ok;
128}
129
130static bool falcon_xmac_link_ok(struct efx_nic *efx)
131{
132 /*
133 * Check MAC's XGXS link status except when using XGMII loopback
134 * which bypasses the XGXS block.
135 * If possible, check PHY's XGXS link status except when using
136 * MAC loopback.
137 */
138 return (efx->loopback_mode == LOOPBACK_XGMII ||
139 falcon_xgxs_link_ok(efx)) &&
140 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
141 LOOPBACK_INTERNAL(efx) ||
142 efx_mdio_phyxgxs_lane_sync(efx));
143}
144
145static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
146{
147 unsigned int max_frame_len;
148 efx_oword_t reg;
149 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
150 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
151
152 /* Configure MAC - cut-thru mode is hard wired on */
153 EFX_POPULATE_OWORD_3(reg,
154 FRF_AB_XM_RX_JUMBO_MODE, 1,
155 FRF_AB_XM_TX_STAT_EN, 1,
156 FRF_AB_XM_RX_STAT_EN, 1);
157 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
158
159 /* Configure TX */
160 EFX_POPULATE_OWORD_6(reg,
161 FRF_AB_XM_TXEN, 1,
162 FRF_AB_XM_TX_PRMBL, 1,
163 FRF_AB_XM_AUTO_PAD, 1,
164 FRF_AB_XM_TXCRC, 1,
165 FRF_AB_XM_FCNTL, tx_fc,
166 FRF_AB_XM_IPG, 0x3);
167 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
168
169 /* Configure RX */
170 EFX_POPULATE_OWORD_5(reg,
171 FRF_AB_XM_RXEN, 1,
172 FRF_AB_XM_AUTO_DEPAD, 0,
173 FRF_AB_XM_ACPT_ALL_MCAST, 1,
174 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
175 FRF_AB_XM_PASS_CRC_ERR, 1);
176 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
177
178 /* Set frame length */
179 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
180 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
181 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
182 EFX_POPULATE_OWORD_2(reg,
183 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
184 FRF_AB_XM_TX_JUMBO_MODE, 1);
185 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
186
187 EFX_POPULATE_OWORD_2(reg,
188 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
189 FRF_AB_XM_DIS_FCNTL, !rx_fc);
190 efx_writeo(efx, &reg, FR_AB_XM_FC);
191
192 /* Set MAC address */
193 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
194 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
195 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
196 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
197}
198
199static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
200{
201 efx_oword_t reg;
202 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
203 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
204 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
205
206 /* XGXS block is flaky and will need to be reset if moving
207 * into our out of XGMII, XGXS or XAUI loopbacks. */
208 if (EFX_WORKAROUND_5147(efx)) {
209 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
210 bool reset_xgxs;
211
212 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
213 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
214 old_xgmii_loopback =
215 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
216
217 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
218 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
219
220 /* The PHY driver may have turned XAUI off */
221 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
222 (xaui_loopback != old_xaui_loopback) ||
223 (xgmii_loopback != old_xgmii_loopback));
224
225 if (reset_xgxs)
226 falcon_reset_xaui(efx);
227 }
228
229 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
230 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
231 (xgxs_loopback || xaui_loopback) ?
232 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
233 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
234 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
235 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
236
237 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
238 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
239 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
240 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
241 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
242 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
243}
244
245
246/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
247static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
248{
249 bool mac_up = falcon_xmac_link_ok(efx);
250
251 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
252 efx_phy_mode_disabled(efx->phy_mode))
253 /* XAUI link is expected to be down */
254 return mac_up;
255
256 falcon_stop_nic_stats(efx);
257
258 while (!mac_up && tries) {
259 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
260 falcon_reset_xaui(efx);
261 udelay(200);
262
263 mac_up = falcon_xmac_link_ok(efx);
264 --tries;
265 }
266
267 falcon_start_nic_stats(efx);
268
269 return mac_up;
270}
271
272bool falcon_xmac_check_fault(struct efx_nic *efx)
273{
274 return !falcon_xmac_link_ok_retry(efx, 5);
275}
276
277int falcon_reconfigure_xmac(struct efx_nic *efx)
278{
279 struct falcon_nic_data *nic_data = efx->nic_data;
280
281 falcon_reconfigure_xgxs_core(efx);
282 falcon_reconfigure_xmac_core(efx);
283
284 falcon_reconfigure_mac_wrapper(efx);
285
286 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
287 falcon_ack_status_intr(efx);
288
289 return 0;
290}
291
292void falcon_update_stats_xmac(struct efx_nic *efx)
293{
294 struct efx_mac_stats *mac_stats = &efx->mac_stats;
295
296 /* Update MAC stats from DMAed values */
297 FALCON_STAT(efx, XgRxOctets, rx_bytes);
298 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
299 FALCON_STAT(efx, XgRxPkts, rx_packets);
300 FALCON_STAT(efx, XgRxPktsOK, rx_good);
301 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
302 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
303 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
304 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
305 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
306 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
307 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
308 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
309 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
310 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
311 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
312 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
313 FALCON_STAT(efx, XgRxControlPkts, rx_control);
314 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
315 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
316 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
317 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
318 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
319 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
320 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
321 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
322 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
323 FALCON_STAT(efx, XgTxPkts, tx_packets);
324 FALCON_STAT(efx, XgTxOctets, tx_bytes);
325 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
326 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
327 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
328 FALCON_STAT(efx, XgTxControlPkts, tx_control);
329 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
330 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
331 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
332 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
333 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
334 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
335 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
336 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
337 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
338 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
339 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
340 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
341 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
342
343 /* Update derived statistics */
344 efx_update_diff_stat(&mac_stats->tx_good_bytes,
345 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
346 mac_stats->tx_control * 64);
347 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
348 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
349 mac_stats->rx_control * 64);
350}
351
352void falcon_poll_xmac(struct efx_nic *efx)
353{
354 struct falcon_nic_data *nic_data = efx->nic_data;
355
356 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
357 !nic_data->xmac_poll_required)
358 return;
359
360 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
361 falcon_ack_status_intr(efx);
362}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 000000000000..c0907d884d75
--- /dev/null
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2942 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include <linux/crc32.h>
18#include "net_driver.h"
19#include "bitfield.h"
20#include "efx.h"
21#include "nic.h"
22#include "farch_regs.h"
23#include "io.h"
24#include "workarounds.h"
25
26/* Falcon-architecture (SFC4000 and SFC9000-family) support */
27
28/**************************************************************************
29 *
30 * Configurable values
31 *
32 **************************************************************************
33 */
34
35/* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
40 * goes idle.
41 */
42#define TX_DC_ENTRIES 16
43#define TX_DC_ENTRIES_ORDER 1
44
45#define RX_DC_ENTRIES 64
46#define RX_DC_ENTRIES_ORDER 3
47
48/* If EFX_MAX_INT_ERRORS internal errors occur within
49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
50 * disable it.
51 */
52#define EFX_INT_ERROR_EXPIRE 3600
53#define EFX_MAX_INT_ERRORS 5
54
55/* Depth of RX flush request fifo */
56#define EFX_RX_FLUSH_COUNT 4
57
58/* Driver generated events */
59#define _EFX_CHANNEL_MAGIC_TEST 0x000101
60#define _EFX_CHANNEL_MAGIC_FILL 0x000102
61#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
62#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63
64#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
66
67#define EFX_CHANNEL_MAGIC_TEST(_channel) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
74 efx_rx_queue_index(_rx_queue))
75#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
77 (_tx_queue)->queue)
78
79static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
80
81/**************************************************************************
82 *
83 * Hardware access
84 *
85 **************************************************************************/
86
87static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
88 unsigned int index)
89{
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
91 value, index);
92}
93
94static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
95 const efx_oword_t *mask)
96{
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
99}
100
101int efx_farch_test_registers(struct efx_nic *efx,
102 const struct efx_farch_register_test *regs,
103 size_t n_regs)
104{
105 unsigned address = 0, i, j;
106 efx_oword_t mask, imask, original, reg, buf;
107
108 for (i = 0; i < n_regs; ++i) {
109 address = regs[i].address;
110 mask = imask = regs[i].mask;
111 EFX_INVERT_OWORD(imask);
112
113 efx_reado(efx, &original, address);
114
115 /* bit sweep on and off */
116 for (j = 0; j < 128; j++) {
117 if (!EFX_EXTRACT_OWORD32(mask, j, j))
118 continue;
119
120 /* Test this testable bit can be set in isolation */
121 EFX_AND_OWORD(reg, original, mask);
122 EFX_SET_OWORD32(reg, j, j, 1);
123
124 efx_writeo(efx, &reg, address);
125 efx_reado(efx, &buf, address);
126
127 if (efx_masked_compare_oword(&reg, &buf, &mask))
128 goto fail;
129
130 /* Test this testable bit can be cleared in isolation */
131 EFX_OR_OWORD(reg, original, mask);
132 EFX_SET_OWORD32(reg, j, j, 0);
133
134 efx_writeo(efx, &reg, address);
135 efx_reado(efx, &buf, address);
136
137 if (efx_masked_compare_oword(&reg, &buf, &mask))
138 goto fail;
139 }
140
141 efx_writeo(efx, &original, address);
142 }
143
144 return 0;
145
146fail:
147 netif_err(efx, hw, efx->net_dev,
148 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
149 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
150 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
151 return -EIO;
152}
153
154/**************************************************************************
155 *
156 * Special buffer handling
157 * Special buffers are used for event queues and the TX and RX
158 * descriptor rings.
159 *
160 *************************************************************************/
161
162/*
163 * Initialise a special buffer
164 *
165 * This will define a buffer (previously allocated via
166 * efx_alloc_special_buffer()) in the buffer table, allowing
167 * it to be used for event queues, descriptor rings etc.
168 */
169static void
170efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
171{
172 efx_qword_t buf_desc;
173 unsigned int index;
174 dma_addr_t dma_addr;
175 int i;
176
177 EFX_BUG_ON_PARANOID(!buffer->buf.addr);
178
179 /* Write buffer descriptors to NIC */
180 for (i = 0; i < buffer->entries; i++) {
181 index = buffer->index + i;
182 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
183 netif_dbg(efx, probe, efx->net_dev,
184 "mapping special buffer %d at %llx\n",
185 index, (unsigned long long)dma_addr);
186 EFX_POPULATE_QWORD_3(buf_desc,
187 FRF_AZ_BUF_ADR_REGION, 0,
188 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
189 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
190 efx_write_buf_tbl(efx, &buf_desc, index);
191 }
192}
193
194/* Unmaps a buffer and clears the buffer table entries */
195static void
196efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
197{
198 efx_oword_t buf_tbl_upd;
199 unsigned int start = buffer->index;
200 unsigned int end = (buffer->index + buffer->entries - 1);
201
202 if (!buffer->entries)
203 return;
204
205 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
206 buffer->index, buffer->index + buffer->entries - 1);
207
208 EFX_POPULATE_OWORD_4(buf_tbl_upd,
209 FRF_AZ_BUF_UPD_CMD, 0,
210 FRF_AZ_BUF_CLR_CMD, 1,
211 FRF_AZ_BUF_CLR_END_ID, end,
212 FRF_AZ_BUF_CLR_START_ID, start);
213 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
214}
215
216/*
217 * Allocate a new special buffer
218 *
219 * This allocates memory for a new buffer, clears it and allocates a
220 * new buffer ID range. It does not write into the buffer table.
221 *
222 * This call will allocate 4KB buffers, since 8KB buffers can't be
223 * used for event queues and descriptor rings.
224 */
225static int efx_alloc_special_buffer(struct efx_nic *efx,
226 struct efx_special_buffer *buffer,
227 unsigned int len)
228{
229 len = ALIGN(len, EFX_BUF_SIZE);
230
231 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
232 return -ENOMEM;
233 buffer->entries = len / EFX_BUF_SIZE;
234 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
235
236 /* Select new buffer ID */
237 buffer->index = efx->next_buffer_table;
238 efx->next_buffer_table += buffer->entries;
239#ifdef CONFIG_SFC_SRIOV
240 BUG_ON(efx_sriov_enabled(efx) &&
241 efx->vf_buftbl_base < efx->next_buffer_table);
242#endif
243
244 netif_dbg(efx, probe, efx->net_dev,
245 "allocating special buffers %d-%d at %llx+%x "
246 "(virt %p phys %llx)\n", buffer->index,
247 buffer->index + buffer->entries - 1,
248 (u64)buffer->buf.dma_addr, len,
249 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
250
251 return 0;
252}
253
254static void
255efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
256{
257 if (!buffer->buf.addr)
258 return;
259
260 netif_dbg(efx, hw, efx->net_dev,
261 "deallocating special buffers %d-%d at %llx+%x "
262 "(virt %p phys %llx)\n", buffer->index,
263 buffer->index + buffer->entries - 1,
264 (u64)buffer->buf.dma_addr, buffer->buf.len,
265 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
266
267 efx_nic_free_buffer(efx, &buffer->buf);
268 buffer->entries = 0;
269}
270
271/**************************************************************************
272 *
273 * TX path
274 *
275 **************************************************************************/
276
277/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
278static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
279{
280 unsigned write_ptr;
281 efx_dword_t reg;
282
283 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
284 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
285 efx_writed_page(tx_queue->efx, &reg,
286 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
287}
288
289/* Write pointer and first descriptor for TX descriptor ring */
290static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
291 const efx_qword_t *txd)
292{
293 unsigned write_ptr;
294 efx_oword_t reg;
295
296 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
297 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
298
299 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
300 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
301 FRF_AZ_TX_DESC_WPTR, write_ptr);
302 reg.qword[0] = *txd;
303 efx_writeo_page(tx_queue->efx, &reg,
304 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
305}
306
307
308/* For each entry inserted into the software descriptor ring, create a
309 * descriptor in the hardware TX descriptor ring (in host memory), and
310 * write a doorbell.
311 */
312void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
313{
314
315 struct efx_tx_buffer *buffer;
316 efx_qword_t *txd;
317 unsigned write_ptr;
318 unsigned old_write_count = tx_queue->write_count;
319
320 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
321
322 do {
323 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[write_ptr];
325 txd = efx_tx_desc(tx_queue, write_ptr);
326 ++tx_queue->write_count;
327
328 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
329
330 /* Create TX descriptor ring entry */
331 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
332 EFX_POPULATE_QWORD_4(*txd,
333 FSF_AZ_TX_KER_CONT,
334 buffer->flags & EFX_TX_BUF_CONT,
335 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
336 FSF_AZ_TX_KER_BUF_REGION, 0,
337 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
338 } while (tx_queue->write_count != tx_queue->insert_count);
339
340 wmb(); /* Ensure descriptors are written before they are fetched */
341
342 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
343 txd = efx_tx_desc(tx_queue,
344 old_write_count & tx_queue->ptr_mask);
345 efx_farch_push_tx_desc(tx_queue, txd);
346 ++tx_queue->pushes;
347 } else {
348 efx_farch_notify_tx_desc(tx_queue);
349 }
350}
351
352/* Allocate hardware resources for a TX queue */
353int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
354{
355 struct efx_nic *efx = tx_queue->efx;
356 unsigned entries;
357
358 entries = tx_queue->ptr_mask + 1;
359 return efx_alloc_special_buffer(efx, &tx_queue->txd,
360 entries * sizeof(efx_qword_t));
361}
362
363void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
364{
365 struct efx_nic *efx = tx_queue->efx;
366 efx_oword_t reg;
367
368 /* Pin TX descriptor ring */
369 efx_init_special_buffer(efx, &tx_queue->txd);
370
371 /* Push TX descriptor ring to card */
372 EFX_POPULATE_OWORD_10(reg,
373 FRF_AZ_TX_DESCQ_EN, 1,
374 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
375 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
376 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
377 FRF_AZ_TX_DESCQ_EVQ_ID,
378 tx_queue->channel->channel,
379 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
380 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
381 FRF_AZ_TX_DESCQ_SIZE,
382 __ffs(tx_queue->txd.entries),
383 FRF_AZ_TX_DESCQ_TYPE, 0,
384 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
385
386 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
387 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
388 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
389 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
390 !csum);
391 }
392
393 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
394 tx_queue->queue);
395
396 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
397 /* Only 128 bits in this register */
398 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
399
400 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
401 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
402 __clear_bit_le(tx_queue->queue, &reg);
403 else
404 __set_bit_le(tx_queue->queue, &reg);
405 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
406 }
407
408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
409 EFX_POPULATE_OWORD_1(reg,
410 FRF_BZ_TX_PACE,
411 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
412 FFE_BZ_TX_PACE_OFF :
413 FFE_BZ_TX_PACE_RESERVED);
414 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
415 tx_queue->queue);
416 }
417}
418
419static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
420{
421 struct efx_nic *efx = tx_queue->efx;
422 efx_oword_t tx_flush_descq;
423
424 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
425 atomic_set(&tx_queue->flush_outstanding, 1);
426
427 EFX_POPULATE_OWORD_2(tx_flush_descq,
428 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
429 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
430 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
431}
432
433void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
434{
435 struct efx_nic *efx = tx_queue->efx;
436 efx_oword_t tx_desc_ptr;
437
438 /* Remove TX descriptor ring from card */
439 EFX_ZERO_OWORD(tx_desc_ptr);
440 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
441 tx_queue->queue);
442
443 /* Unpin TX descriptor ring */
444 efx_fini_special_buffer(efx, &tx_queue->txd);
445}
446
447/* Free buffers backing TX queue */
448void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
449{
450 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
451}
452
453/**************************************************************************
454 *
455 * RX path
456 *
457 **************************************************************************/
458
459/* This creates an entry in the RX descriptor queue */
460static inline void
461efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
462{
463 struct efx_rx_buffer *rx_buf;
464 efx_qword_t *rxd;
465
466 rxd = efx_rx_desc(rx_queue, index);
467 rx_buf = efx_rx_buffer(rx_queue, index);
468 EFX_POPULATE_QWORD_3(*rxd,
469 FSF_AZ_RX_KER_BUF_SIZE,
470 rx_buf->len -
471 rx_queue->efx->type->rx_buffer_padding,
472 FSF_AZ_RX_KER_BUF_REGION, 0,
473 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
474}
475
476/* This writes to the RX_DESC_WPTR register for the specified receive
477 * descriptor ring.
478 */
479void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
480{
481 struct efx_nic *efx = rx_queue->efx;
482 efx_dword_t reg;
483 unsigned write_ptr;
484
485 while (rx_queue->notified_count != rx_queue->added_count) {
486 efx_farch_build_rx_desc(
487 rx_queue,
488 rx_queue->notified_count & rx_queue->ptr_mask);
489 ++rx_queue->notified_count;
490 }
491
492 wmb();
493 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
494 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
495 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
496 efx_rx_queue_index(rx_queue));
497}
498
499int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
500{
501 struct efx_nic *efx = rx_queue->efx;
502 unsigned entries;
503
504 entries = rx_queue->ptr_mask + 1;
505 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
506 entries * sizeof(efx_qword_t));
507}
508
509void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
510{
511 efx_oword_t rx_desc_ptr;
512 struct efx_nic *efx = rx_queue->efx;
513 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
514 bool iscsi_digest_en = is_b0;
515 bool jumbo_en;
516
517 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
518 * DMA to continue after a PCIe page boundary (and scattering
519 * is not possible). In Falcon B0 and Siena, it enables
520 * scatter.
521 */
522 jumbo_en = !is_b0 || efx->rx_scatter;
523
524 netif_dbg(efx, hw, efx->net_dev,
525 "RX queue %d ring in special buffers %d-%d\n",
526 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
527 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
528
529 rx_queue->scatter_n = 0;
530
531 /* Pin RX descriptor ring */
532 efx_init_special_buffer(efx, &rx_queue->rxd);
533
534 /* Push RX descriptor ring to card */
535 EFX_POPULATE_OWORD_10(rx_desc_ptr,
536 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
537 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
538 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
539 FRF_AZ_RX_DESCQ_EVQ_ID,
540 efx_rx_queue_channel(rx_queue)->channel,
541 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
542 FRF_AZ_RX_DESCQ_LABEL,
543 efx_rx_queue_index(rx_queue),
544 FRF_AZ_RX_DESCQ_SIZE,
545 __ffs(rx_queue->rxd.entries),
546 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
547 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
548 FRF_AZ_RX_DESCQ_EN, 1);
549 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
550 efx_rx_queue_index(rx_queue));
551}
552
553static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
554{
555 struct efx_nic *efx = rx_queue->efx;
556 efx_oword_t rx_flush_descq;
557
558 EFX_POPULATE_OWORD_2(rx_flush_descq,
559 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
560 FRF_AZ_RX_FLUSH_DESCQ,
561 efx_rx_queue_index(rx_queue));
562 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
563}
564
565void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
566{
567 efx_oword_t rx_desc_ptr;
568 struct efx_nic *efx = rx_queue->efx;
569
570 /* Remove RX descriptor ring from card */
571 EFX_ZERO_OWORD(rx_desc_ptr);
572 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
573 efx_rx_queue_index(rx_queue));
574
575 /* Unpin RX descriptor ring */
576 efx_fini_special_buffer(efx, &rx_queue->rxd);
577}
578
579/* Free buffers backing RX queue */
580void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
581{
582 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
583}
584
585/**************************************************************************
586 *
587 * Flush handling
588 *
589 **************************************************************************/
590
591/* efx_farch_flush_queues() must be woken up when all flushes are completed,
592 * or more RX flushes can be kicked off.
593 */
594static bool efx_farch_flush_wake(struct efx_nic *efx)
595{
596 /* Ensure that all updates are visible to efx_farch_flush_queues() */
597 smp_mb();
598
599 return (atomic_read(&efx->active_queues) == 0 ||
600 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
601 && atomic_read(&efx->rxq_flush_pending) > 0));
602}
603
604static bool efx_check_tx_flush_complete(struct efx_nic *efx)
605{
606 bool i = true;
607 efx_oword_t txd_ptr_tbl;
608 struct efx_channel *channel;
609 struct efx_tx_queue *tx_queue;
610
611 efx_for_each_channel(channel, efx) {
612 efx_for_each_channel_tx_queue(tx_queue, channel) {
613 efx_reado_table(efx, &txd_ptr_tbl,
614 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
615 if (EFX_OWORD_FIELD(txd_ptr_tbl,
616 FRF_AZ_TX_DESCQ_FLUSH) ||
617 EFX_OWORD_FIELD(txd_ptr_tbl,
618 FRF_AZ_TX_DESCQ_EN)) {
619 netif_dbg(efx, hw, efx->net_dev,
620 "flush did not complete on TXQ %d\n",
621 tx_queue->queue);
622 i = false;
623 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
624 1, 0)) {
625 /* The flush is complete, but we didn't
626 * receive a flush completion event
627 */
628 netif_dbg(efx, hw, efx->net_dev,
629 "flush complete on TXQ %d, so drain "
630 "the queue\n", tx_queue->queue);
631 /* Don't need to increment active_queues as it
632 * has already been incremented for the queues
633 * which did not drain
634 */
635 efx_farch_magic_event(channel,
636 EFX_CHANNEL_MAGIC_TX_DRAIN(
637 tx_queue));
638 }
639 }
640 }
641
642 return i;
643}
644
645/* Flush all the transmit queues, and continue flushing receive queues until
646 * they're all flushed. Wait for the DRAIN events to be recieved so that there
647 * are no more RX and TX events left on any channel. */
648static int efx_farch_do_flush(struct efx_nic *efx)
649{
650 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
651 struct efx_channel *channel;
652 struct efx_rx_queue *rx_queue;
653 struct efx_tx_queue *tx_queue;
654 int rc = 0;
655
656 efx_for_each_channel(channel, efx) {
657 efx_for_each_channel_tx_queue(tx_queue, channel) {
658 efx_farch_flush_tx_queue(tx_queue);
659 }
660 efx_for_each_channel_rx_queue(rx_queue, channel) {
661 rx_queue->flush_pending = true;
662 atomic_inc(&efx->rxq_flush_pending);
663 }
664 }
665
666 while (timeout && atomic_read(&efx->active_queues) > 0) {
667 /* If SRIOV is enabled, then offload receive queue flushing to
668 * the firmware (though we will still have to poll for
669 * completion). If that fails, fall back to the old scheme.
670 */
671 if (efx_sriov_enabled(efx)) {
672 rc = efx_mcdi_flush_rxqs(efx);
673 if (!rc)
674 goto wait;
675 }
676
677 /* The hardware supports four concurrent rx flushes, each of
678 * which may need to be retried if there is an outstanding
679 * descriptor fetch
680 */
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_rx_queue(rx_queue, channel) {
683 if (atomic_read(&efx->rxq_flush_outstanding) >=
684 EFX_RX_FLUSH_COUNT)
685 break;
686
687 if (rx_queue->flush_pending) {
688 rx_queue->flush_pending = false;
689 atomic_dec(&efx->rxq_flush_pending);
690 atomic_inc(&efx->rxq_flush_outstanding);
691 efx_farch_flush_rx_queue(rx_queue);
692 }
693 }
694 }
695
696 wait:
697 timeout = wait_event_timeout(efx->flush_wq,
698 efx_farch_flush_wake(efx),
699 timeout);
700 }
701
702 if (atomic_read(&efx->active_queues) &&
703 !efx_check_tx_flush_complete(efx)) {
704 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
705 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
706 atomic_read(&efx->rxq_flush_outstanding),
707 atomic_read(&efx->rxq_flush_pending));
708 rc = -ETIMEDOUT;
709
710 atomic_set(&efx->active_queues, 0);
711 atomic_set(&efx->rxq_flush_pending, 0);
712 atomic_set(&efx->rxq_flush_outstanding, 0);
713 }
714
715 return rc;
716}
717
718int efx_farch_fini_dmaq(struct efx_nic *efx)
719{
720 struct efx_channel *channel;
721 struct efx_tx_queue *tx_queue;
722 struct efx_rx_queue *rx_queue;
723 int rc = 0;
724
725 /* Do not attempt to write to the NIC during EEH recovery */
726 if (efx->state != STATE_RECOVERY) {
727 /* Only perform flush if DMA is enabled */
728 if (efx->pci_dev->is_busmaster) {
729 efx->type->prepare_flush(efx);
730 rc = efx_farch_do_flush(efx);
731 efx->type->finish_flush(efx);
732 }
733
734 efx_for_each_channel(channel, efx) {
735 efx_for_each_channel_rx_queue(rx_queue, channel)
736 efx_farch_rx_fini(rx_queue);
737 efx_for_each_channel_tx_queue(tx_queue, channel)
738 efx_farch_tx_fini(tx_queue);
739 }
740 }
741
742 return rc;
743}
744
745/**************************************************************************
746 *
747 * Event queue processing
748 * Event queues are processed by per-channel tasklets.
749 *
750 **************************************************************************/
751
752/* Update a channel's event queue's read pointer (RPTR) register
753 *
754 * This writes the EVQ_RPTR_REG register for the specified channel's
755 * event queue.
756 */
757void efx_farch_ev_read_ack(struct efx_channel *channel)
758{
759 efx_dword_t reg;
760 struct efx_nic *efx = channel->efx;
761
762 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
763 channel->eventq_read_ptr & channel->eventq_mask);
764
765 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
766 * of 4 bytes, but it is really 16 bytes just like later revisions.
767 */
768 efx_writed(efx, &reg,
769 efx->type->evq_rptr_tbl_base +
770 FR_BZ_EVQ_RPTR_STEP * channel->channel);
771}
772
773/* Use HW to insert a SW defined event */
774void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
775 efx_qword_t *event)
776{
777 efx_oword_t drv_ev_reg;
778
779 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
780 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
781 drv_ev_reg.u32[0] = event->u32[0];
782 drv_ev_reg.u32[1] = event->u32[1];
783 drv_ev_reg.u32[2] = 0;
784 drv_ev_reg.u32[3] = 0;
785 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
786 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
787}
788
789static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
790{
791 efx_qword_t event;
792
793 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
794 FSE_AZ_EV_CODE_DRV_GEN_EV,
795 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
796 efx_farch_generate_event(channel->efx, channel->channel, &event);
797}
798
799/* Handle a transmit completion event
800 *
801 * The NIC batches TX completion events; the message we receive is of
802 * the form "complete all TX events up to this index".
803 */
804static int
805efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
806{
807 unsigned int tx_ev_desc_ptr;
808 unsigned int tx_ev_q_label;
809 struct efx_tx_queue *tx_queue;
810 struct efx_nic *efx = channel->efx;
811 int tx_packets = 0;
812
813 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
814 return 0;
815
816 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
817 /* Transmit completion */
818 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
819 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
820 tx_queue = efx_channel_get_tx_queue(
821 channel, tx_ev_q_label % EFX_TXQ_TYPES);
822 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
823 tx_queue->ptr_mask);
824 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
825 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
826 /* Rewrite the FIFO write pointer */
827 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
828 tx_queue = efx_channel_get_tx_queue(
829 channel, tx_ev_q_label % EFX_TXQ_TYPES);
830
831 netif_tx_lock(efx->net_dev);
832 efx_farch_notify_tx_desc(tx_queue);
833 netif_tx_unlock(efx->net_dev);
834 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
835 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
836 } else {
837 netif_err(efx, tx_err, efx->net_dev,
838 "channel %d unexpected TX event "
839 EFX_QWORD_FMT"\n", channel->channel,
840 EFX_QWORD_VAL(*event));
841 }
842
843 return tx_packets;
844}
845
846/* Detect errors included in the rx_evt_pkt_ok bit. */
847static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
848 const efx_qword_t *event)
849{
850 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
851 struct efx_nic *efx = rx_queue->efx;
852 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
853 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
854 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
855 bool rx_ev_other_err, rx_ev_pause_frm;
856 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
857 unsigned rx_ev_pkt_type;
858
859 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
860 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
861 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
862 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
863 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
864 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
865 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
866 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
867 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
868 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
869 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
870 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
871 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
872 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
873 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
874
875 /* Every error apart from tobe_disc and pause_frm */
876 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
877 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
878 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
879
880 /* Count errors that are not in MAC stats. Ignore expected
881 * checksum errors during self-test. */
882 if (rx_ev_frm_trunc)
883 ++channel->n_rx_frm_trunc;
884 else if (rx_ev_tobe_disc)
885 ++channel->n_rx_tobe_disc;
886 else if (!efx->loopback_selftest) {
887 if (rx_ev_ip_hdr_chksum_err)
888 ++channel->n_rx_ip_hdr_chksum_err;
889 else if (rx_ev_tcp_udp_chksum_err)
890 ++channel->n_rx_tcp_udp_chksum_err;
891 }
892
893 /* TOBE_DISC is expected on unicast mismatches; don't print out an
894 * error message. FRM_TRUNC indicates RXDP dropped the packet due
895 * to a FIFO overflow.
896 */
897#ifdef DEBUG
898 if (rx_ev_other_err && net_ratelimit()) {
899 netif_dbg(efx, rx_err, efx->net_dev,
900 " RX queue %d unexpected RX event "
901 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
902 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
903 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
904 rx_ev_ip_hdr_chksum_err ?
905 " [IP_HDR_CHKSUM_ERR]" : "",
906 rx_ev_tcp_udp_chksum_err ?
907 " [TCP_UDP_CHKSUM_ERR]" : "",
908 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
909 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
910 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
911 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
912 rx_ev_pause_frm ? " [PAUSE]" : "");
913 }
914#endif
915
916 /* The frame must be discarded if any of these are true. */
917 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
918 rx_ev_tobe_disc | rx_ev_pause_frm) ?
919 EFX_RX_PKT_DISCARD : 0;
920}
921
922/* Handle receive events that are not in-order. Return true if this
923 * can be handled as a partial packet discard, false if it's more
924 * serious.
925 */
926static bool
927efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
928{
929 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
930 struct efx_nic *efx = rx_queue->efx;
931 unsigned expected, dropped;
932
933 if (rx_queue->scatter_n &&
934 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
935 rx_queue->ptr_mask)) {
936 ++channel->n_rx_nodesc_trunc;
937 return true;
938 }
939
940 expected = rx_queue->removed_count & rx_queue->ptr_mask;
941 dropped = (index - expected) & rx_queue->ptr_mask;
942 netif_info(efx, rx_err, efx->net_dev,
943 "dropped %d events (index=%d expected=%d)\n",
944 dropped, index, expected);
945
946 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
947 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
948 return false;
949}
950
951/* Handle a packet received event
952 *
953 * The NIC gives a "discard" flag if it's a unicast packet with the
954 * wrong destination address
955 * Also "is multicast" and "matches multicast filter" flags can be used to
956 * discard non-matching multicast packets.
957 */
958static void
959efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
960{
961 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
962 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
963 unsigned expected_ptr;
964 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
965 u16 flags;
966 struct efx_rx_queue *rx_queue;
967 struct efx_nic *efx = channel->efx;
968
969 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
970 return;
971
972 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
973 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
974 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
975 channel->channel);
976
977 rx_queue = efx_channel_get_rx_queue(channel);
978
979 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
980 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
981 rx_queue->ptr_mask);
982
983 /* Check for partial drops and other errors */
984 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
985 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
986 if (rx_ev_desc_ptr != expected_ptr &&
987 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
988 return;
989
990 /* Discard all pending fragments */
991 if (rx_queue->scatter_n) {
992 efx_rx_packet(
993 rx_queue,
994 rx_queue->removed_count & rx_queue->ptr_mask,
995 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
996 rx_queue->removed_count += rx_queue->scatter_n;
997 rx_queue->scatter_n = 0;
998 }
999
1000 /* Return if there is no new fragment */
1001 if (rx_ev_desc_ptr != expected_ptr)
1002 return;
1003
1004 /* Discard new fragment if not SOP */
1005 if (!rx_ev_sop) {
1006 efx_rx_packet(
1007 rx_queue,
1008 rx_queue->removed_count & rx_queue->ptr_mask,
1009 1, 0, EFX_RX_PKT_DISCARD);
1010 ++rx_queue->removed_count;
1011 return;
1012 }
1013 }
1014
1015 ++rx_queue->scatter_n;
1016 if (rx_ev_cont)
1017 return;
1018
1019 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1020 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1021 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1022
1023 if (likely(rx_ev_pkt_ok)) {
1024 /* If packet is marked as OK then we can rely on the
1025 * hardware checksum and classification.
1026 */
1027 flags = 0;
1028 switch (rx_ev_hdr_type) {
1029 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1030 flags |= EFX_RX_PKT_TCP;
1031 /* fall through */
1032 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1033 flags |= EFX_RX_PKT_CSUMMED;
1034 /* fall through */
1035 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1036 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1037 break;
1038 }
1039 } else {
1040 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1041 }
1042
1043 /* Detect multicast packets that didn't match the filter */
1044 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1045 if (rx_ev_mcast_pkt) {
1046 unsigned int rx_ev_mcast_hash_match =
1047 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1048
1049 if (unlikely(!rx_ev_mcast_hash_match)) {
1050 ++channel->n_rx_mcast_mismatch;
1051 flags |= EFX_RX_PKT_DISCARD;
1052 }
1053 }
1054
1055 channel->irq_mod_score += 2;
1056
1057 /* Handle received packet */
1058 efx_rx_packet(rx_queue,
1059 rx_queue->removed_count & rx_queue->ptr_mask,
1060 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1061 rx_queue->removed_count += rx_queue->scatter_n;
1062 rx_queue->scatter_n = 0;
1063}
1064
1065/* If this flush done event corresponds to a &struct efx_tx_queue, then
1066 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1067 * of all transmit completions.
1068 */
1069static void
1070efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1071{
1072 struct efx_tx_queue *tx_queue;
1073 int qid;
1074
1075 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1076 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1077 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1078 qid % EFX_TXQ_TYPES);
1079 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1080 efx_farch_magic_event(tx_queue->channel,
1081 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1082 }
1083 }
1084}
1085
1086/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1087 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1088 * the RX queue back to the mask of RX queues in need of flushing.
1089 */
1090static void
1091efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1092{
1093 struct efx_channel *channel;
1094 struct efx_rx_queue *rx_queue;
1095 int qid;
1096 bool failed;
1097
1098 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1099 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1100 if (qid >= efx->n_channels)
1101 return;
1102 channel = efx_get_channel(efx, qid);
1103 if (!efx_channel_has_rx_queue(channel))
1104 return;
1105 rx_queue = efx_channel_get_rx_queue(channel);
1106
1107 if (failed) {
1108 netif_info(efx, hw, efx->net_dev,
1109 "RXQ %d flush retry\n", qid);
1110 rx_queue->flush_pending = true;
1111 atomic_inc(&efx->rxq_flush_pending);
1112 } else {
1113 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1114 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1115 }
1116 atomic_dec(&efx->rxq_flush_outstanding);
1117 if (efx_farch_flush_wake(efx))
1118 wake_up(&efx->flush_wq);
1119}
1120
1121static void
1122efx_farch_handle_drain_event(struct efx_channel *channel)
1123{
1124 struct efx_nic *efx = channel->efx;
1125
1126 WARN_ON(atomic_read(&efx->active_queues) == 0);
1127 atomic_dec(&efx->active_queues);
1128 if (efx_farch_flush_wake(efx))
1129 wake_up(&efx->flush_wq);
1130}
1131
1132static void efx_farch_handle_generated_event(struct efx_channel *channel,
1133 efx_qword_t *event)
1134{
1135 struct efx_nic *efx = channel->efx;
1136 struct efx_rx_queue *rx_queue =
1137 efx_channel_has_rx_queue(channel) ?
1138 efx_channel_get_rx_queue(channel) : NULL;
1139 unsigned magic, code;
1140
1141 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1142 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1143
1144 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1145 channel->event_test_cpu = raw_smp_processor_id();
1146 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1147 /* The queue must be empty, so we won't receive any rx
1148 * events, so efx_process_channel() won't refill the
1149 * queue. Refill it here */
1150 efx_fast_push_rx_descriptors(rx_queue);
1151 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1152 efx_farch_handle_drain_event(channel);
1153 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1154 efx_farch_handle_drain_event(channel);
1155 } else {
1156 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1157 "generated event "EFX_QWORD_FMT"\n",
1158 channel->channel, EFX_QWORD_VAL(*event));
1159 }
1160}
1161
1162static void
1163efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1164{
1165 struct efx_nic *efx = channel->efx;
1166 unsigned int ev_sub_code;
1167 unsigned int ev_sub_data;
1168
1169 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1170 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1171
1172 switch (ev_sub_code) {
1173 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1174 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1175 channel->channel, ev_sub_data);
1176 efx_farch_handle_tx_flush_done(efx, event);
1177 efx_sriov_tx_flush_done(efx, event);
1178 break;
1179 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1180 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1181 channel->channel, ev_sub_data);
1182 efx_farch_handle_rx_flush_done(efx, event);
1183 efx_sriov_rx_flush_done(efx, event);
1184 break;
1185 case FSE_AZ_EVQ_INIT_DONE_EV:
1186 netif_dbg(efx, hw, efx->net_dev,
1187 "channel %d EVQ %d initialised\n",
1188 channel->channel, ev_sub_data);
1189 break;
1190 case FSE_AZ_SRM_UPD_DONE_EV:
1191 netif_vdbg(efx, hw, efx->net_dev,
1192 "channel %d SRAM update done\n", channel->channel);
1193 break;
1194 case FSE_AZ_WAKE_UP_EV:
1195 netif_vdbg(efx, hw, efx->net_dev,
1196 "channel %d RXQ %d wakeup event\n",
1197 channel->channel, ev_sub_data);
1198 break;
1199 case FSE_AZ_TIMER_EV:
1200 netif_vdbg(efx, hw, efx->net_dev,
1201 "channel %d RX queue %d timer expired\n",
1202 channel->channel, ev_sub_data);
1203 break;
1204 case FSE_AA_RX_RECOVER_EV:
1205 netif_err(efx, rx_err, efx->net_dev,
1206 "channel %d seen DRIVER RX_RESET event. "
1207 "Resetting.\n", channel->channel);
1208 atomic_inc(&efx->rx_reset);
1209 efx_schedule_reset(efx,
1210 EFX_WORKAROUND_6555(efx) ?
1211 RESET_TYPE_RX_RECOVERY :
1212 RESET_TYPE_DISABLE);
1213 break;
1214 case FSE_BZ_RX_DSC_ERROR_EV:
1215 if (ev_sub_data < EFX_VI_BASE) {
1216 netif_err(efx, rx_err, efx->net_dev,
1217 "RX DMA Q %d reports descriptor fetch error."
1218 " RX Q %d is disabled.\n", ev_sub_data,
1219 ev_sub_data);
1220 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1221 } else
1222 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1223 break;
1224 case FSE_BZ_TX_DSC_ERROR_EV:
1225 if (ev_sub_data < EFX_VI_BASE) {
1226 netif_err(efx, tx_err, efx->net_dev,
1227 "TX DMA Q %d reports descriptor fetch error."
1228 " TX Q %d is disabled.\n", ev_sub_data,
1229 ev_sub_data);
1230 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1231 } else
1232 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1233 break;
1234 default:
1235 netif_vdbg(efx, hw, efx->net_dev,
1236 "channel %d unknown driver event code %d "
1237 "data %04x\n", channel->channel, ev_sub_code,
1238 ev_sub_data);
1239 break;
1240 }
1241}
1242
1243int efx_farch_ev_process(struct efx_channel *channel, int budget)
1244{
1245 struct efx_nic *efx = channel->efx;
1246 unsigned int read_ptr;
1247 efx_qword_t event, *p_event;
1248 int ev_code;
1249 int tx_packets = 0;
1250 int spent = 0;
1251
1252 read_ptr = channel->eventq_read_ptr;
1253
1254 for (;;) {
1255 p_event = efx_event(channel, read_ptr);
1256 event = *p_event;
1257
1258 if (!efx_event_present(&event))
1259 /* End of events */
1260 break;
1261
1262 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1263 "channel %d event is "EFX_QWORD_FMT"\n",
1264 channel->channel, EFX_QWORD_VAL(event));
1265
1266 /* Clear this event by marking it all ones */
1267 EFX_SET_QWORD(*p_event);
1268
1269 ++read_ptr;
1270
1271 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1272
1273 switch (ev_code) {
1274 case FSE_AZ_EV_CODE_RX_EV:
1275 efx_farch_handle_rx_event(channel, &event);
1276 if (++spent == budget)
1277 goto out;
1278 break;
1279 case FSE_AZ_EV_CODE_TX_EV:
1280 tx_packets += efx_farch_handle_tx_event(channel,
1281 &event);
1282 if (tx_packets > efx->txq_entries) {
1283 spent = budget;
1284 goto out;
1285 }
1286 break;
1287 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1288 efx_farch_handle_generated_event(channel, &event);
1289 break;
1290 case FSE_AZ_EV_CODE_DRIVER_EV:
1291 efx_farch_handle_driver_event(channel, &event);
1292 break;
1293 case FSE_CZ_EV_CODE_USER_EV:
1294 efx_sriov_event(channel, &event);
1295 break;
1296 case FSE_CZ_EV_CODE_MCDI_EV:
1297 efx_mcdi_process_event(channel, &event);
1298 break;
1299 case FSE_AZ_EV_CODE_GLOBAL_EV:
1300 if (efx->type->handle_global_event &&
1301 efx->type->handle_global_event(channel, &event))
1302 break;
1303 /* else fall through */
1304 default:
1305 netif_err(channel->efx, hw, channel->efx->net_dev,
1306 "channel %d unknown event type %d (data "
1307 EFX_QWORD_FMT ")\n", channel->channel,
1308 ev_code, EFX_QWORD_VAL(event));
1309 }
1310 }
1311
1312out:
1313 channel->eventq_read_ptr = read_ptr;
1314 return spent;
1315}
1316
1317/* Allocate buffer table entries for event queue */
1318int efx_farch_ev_probe(struct efx_channel *channel)
1319{
1320 struct efx_nic *efx = channel->efx;
1321 unsigned entries;
1322
1323 entries = channel->eventq_mask + 1;
1324 return efx_alloc_special_buffer(efx, &channel->eventq,
1325 entries * sizeof(efx_qword_t));
1326}
1327
1328int efx_farch_ev_init(struct efx_channel *channel)
1329{
1330 efx_oword_t reg;
1331 struct efx_nic *efx = channel->efx;
1332
1333 netif_dbg(efx, hw, efx->net_dev,
1334 "channel %d event queue in special buffers %d-%d\n",
1335 channel->channel, channel->eventq.index,
1336 channel->eventq.index + channel->eventq.entries - 1);
1337
1338 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1339 EFX_POPULATE_OWORD_3(reg,
1340 FRF_CZ_TIMER_Q_EN, 1,
1341 FRF_CZ_HOST_NOTIFY_MODE, 0,
1342 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1343 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1344 }
1345
1346 /* Pin event queue buffer */
1347 efx_init_special_buffer(efx, &channel->eventq);
1348
1349 /* Fill event queue with all ones (i.e. empty events) */
1350 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1351
1352 /* Push event queue to card */
1353 EFX_POPULATE_OWORD_3(reg,
1354 FRF_AZ_EVQ_EN, 1,
1355 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1356 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1357 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1358 channel->channel);
1359
1360 return 0;
1361}
1362
1363void efx_farch_ev_fini(struct efx_channel *channel)
1364{
1365 efx_oword_t reg;
1366 struct efx_nic *efx = channel->efx;
1367
1368 /* Remove event queue from card */
1369 EFX_ZERO_OWORD(reg);
1370 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1371 channel->channel);
1372 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1373 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1374
1375 /* Unpin event queue */
1376 efx_fini_special_buffer(efx, &channel->eventq);
1377}
1378
1379/* Free buffers backing event queue */
1380void efx_farch_ev_remove(struct efx_channel *channel)
1381{
1382 efx_free_special_buffer(channel->efx, &channel->eventq);
1383}
1384
1385
1386void efx_farch_ev_test_generate(struct efx_channel *channel)
1387{
1388 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1389}
1390
1391void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1392{
1393 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1394 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1395}
1396
1397/**************************************************************************
1398 *
1399 * Hardware interrupts
1400 * The hardware interrupt handler does very little work; all the event
1401 * queue processing is carried out by per-channel tasklets.
1402 *
1403 **************************************************************************/
1404
1405/* Enable/disable/generate interrupts */
1406static inline void efx_farch_interrupts(struct efx_nic *efx,
1407 bool enabled, bool force)
1408{
1409 efx_oword_t int_en_reg_ker;
1410
1411 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1412 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1413 FRF_AZ_KER_INT_KER, force,
1414 FRF_AZ_DRV_INT_EN_KER, enabled);
1415 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1416}
1417
1418void efx_farch_irq_enable_master(struct efx_nic *efx)
1419{
1420 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1421 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1422
1423 efx_farch_interrupts(efx, true, false);
1424}
1425
1426void efx_farch_irq_disable_master(struct efx_nic *efx)
1427{
1428 /* Disable interrupts */
1429 efx_farch_interrupts(efx, false, false);
1430}
1431
1432/* Generate a test interrupt
1433 * Interrupt must already have been enabled, otherwise nasty things
1434 * may happen.
1435 */
1436void efx_farch_irq_test_generate(struct efx_nic *efx)
1437{
1438 efx_farch_interrupts(efx, true, true);
1439}
1440
1441/* Process a fatal interrupt
1442 * Disable bus mastering ASAP and schedule a reset
1443 */
1444irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1445{
1446 struct falcon_nic_data *nic_data = efx->nic_data;
1447 efx_oword_t *int_ker = efx->irq_status.addr;
1448 efx_oword_t fatal_intr;
1449 int error, mem_perr;
1450
1451 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1452 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1453
1454 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1455 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1456 EFX_OWORD_VAL(fatal_intr),
1457 error ? "disabling bus mastering" : "no recognised error");
1458
1459 /* If this is a memory parity error dump which blocks are offending */
1460 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1461 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1462 if (mem_perr) {
1463 efx_oword_t reg;
1464 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1465 netif_err(efx, hw, efx->net_dev,
1466 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1467 EFX_OWORD_VAL(reg));
1468 }
1469
1470 /* Disable both devices */
1471 pci_clear_master(efx->pci_dev);
1472 if (efx_nic_is_dual_func(efx))
1473 pci_clear_master(nic_data->pci_dev2);
1474 efx_farch_irq_disable_master(efx);
1475
1476 /* Count errors and reset or disable the NIC accordingly */
1477 if (efx->int_error_count == 0 ||
1478 time_after(jiffies, efx->int_error_expire)) {
1479 efx->int_error_count = 0;
1480 efx->int_error_expire =
1481 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1482 }
1483 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1484 netif_err(efx, hw, efx->net_dev,
1485 "SYSTEM ERROR - reset scheduled\n");
1486 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1487 } else {
1488 netif_err(efx, hw, efx->net_dev,
1489 "SYSTEM ERROR - max number of errors seen."
1490 "NIC will be disabled\n");
1491 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1492 }
1493
1494 return IRQ_HANDLED;
1495}
1496
1497/* Handle a legacy interrupt
1498 * Acknowledges the interrupt and schedule event queue processing.
1499 */
1500irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1501{
1502 struct efx_nic *efx = dev_id;
1503 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1504 efx_oword_t *int_ker = efx->irq_status.addr;
1505 irqreturn_t result = IRQ_NONE;
1506 struct efx_channel *channel;
1507 efx_dword_t reg;
1508 u32 queues;
1509 int syserr;
1510
1511 /* Read the ISR which also ACKs the interrupts */
1512 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1513 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1514
1515 /* Legacy interrupts are disabled too late by the EEH kernel
1516 * code. Disable them earlier.
1517 * If an EEH error occurred, the read will have returned all ones.
1518 */
1519 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1520 !efx->eeh_disabled_legacy_irq) {
1521 disable_irq_nosync(efx->legacy_irq);
1522 efx->eeh_disabled_legacy_irq = true;
1523 }
1524
1525 /* Handle non-event-queue sources */
1526 if (queues & (1U << efx->irq_level) && soft_enabled) {
1527 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1528 if (unlikely(syserr))
1529 return efx_farch_fatal_interrupt(efx);
1530 efx->last_irq_cpu = raw_smp_processor_id();
1531 }
1532
1533 if (queues != 0) {
1534 efx->irq_zero_count = 0;
1535
1536 /* Schedule processing of any interrupting queues */
1537 if (likely(soft_enabled)) {
1538 efx_for_each_channel(channel, efx) {
1539 if (queues & 1)
1540 efx_schedule_channel_irq(channel);
1541 queues >>= 1;
1542 }
1543 }
1544 result = IRQ_HANDLED;
1545
1546 } else {
1547 efx_qword_t *event;
1548
1549 /* Legacy ISR read can return zero once (SF bug 15783) */
1550
1551 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1552 * because this might be a shared interrupt. */
1553 if (efx->irq_zero_count++ == 0)
1554 result = IRQ_HANDLED;
1555
1556 /* Ensure we schedule or rearm all event queues */
1557 if (likely(soft_enabled)) {
1558 efx_for_each_channel(channel, efx) {
1559 event = efx_event(channel,
1560 channel->eventq_read_ptr);
1561 if (efx_event_present(event))
1562 efx_schedule_channel_irq(channel);
1563 else
1564 efx_farch_ev_read_ack(channel);
1565 }
1566 }
1567 }
1568
1569 if (result == IRQ_HANDLED)
1570 netif_vdbg(efx, intr, efx->net_dev,
1571 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1572 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1573
1574 return result;
1575}
1576
1577/* Handle an MSI interrupt
1578 *
1579 * Handle an MSI hardware interrupt. This routine schedules event
1580 * queue processing. No interrupt acknowledgement cycle is necessary.
1581 * Also, we never need to check that the interrupt is for us, since
1582 * MSI interrupts cannot be shared.
1583 */
1584irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1585{
1586 struct efx_msi_context *context = dev_id;
1587 struct efx_nic *efx = context->efx;
1588 efx_oword_t *int_ker = efx->irq_status.addr;
1589 int syserr;
1590
1591 netif_vdbg(efx, intr, efx->net_dev,
1592 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1593 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1594
1595 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
1596 return IRQ_HANDLED;
1597
1598 /* Handle non-event-queue sources */
1599 if (context->index == efx->irq_level) {
1600 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1601 if (unlikely(syserr))
1602 return efx_farch_fatal_interrupt(efx);
1603 efx->last_irq_cpu = raw_smp_processor_id();
1604 }
1605
1606 /* Schedule processing of the channel */
1607 efx_schedule_channel_irq(efx->channel[context->index]);
1608
1609 return IRQ_HANDLED;
1610}
1611
1612
1613/* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ
1615 */
1616void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1617{
1618 size_t i = 0;
1619 efx_dword_t dword;
1620
1621 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1622 return;
1623
1624 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1625 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1626
1627 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1628 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1629 efx->rx_indir_table[i]);
1630 efx_writed(efx, &dword,
1631 FR_BZ_RX_INDIRECTION_TBL +
1632 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1633 }
1634}
1635
1636/* Looks at available SRAM resources and works out how many queues we
1637 * can support, and where things like descriptor caches should live.
1638 *
1639 * SRAM is split up as follows:
1640 * 0 buftbl entries for channels
1641 * efx->vf_buftbl_base buftbl entries for SR-IOV
1642 * efx->rx_dc_base RX descriptor caches
1643 * efx->tx_dc_base TX descriptor caches
1644 */
1645void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1646{
1647 unsigned vi_count, buftbl_min;
1648
1649 /* Account for the buffer table entries backing the datapath channels
1650 * and the descriptor caches for those channels.
1651 */
1652 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1653 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1654 efx->n_channels * EFX_MAX_EVQ_SIZE)
1655 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1656 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1657
1658#ifdef CONFIG_SFC_SRIOV
1659 if (efx_sriov_wanted(efx)) {
1660 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1661
1662 efx->vf_buftbl_base = buftbl_min;
1663
1664 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1665 vi_count = max(vi_count, EFX_VI_BASE);
1666 buftbl_free = (sram_lim_qw - buftbl_min -
1667 vi_count * vi_dc_entries);
1668
1669 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1670 efx_vf_size(efx));
1671 vf_limit = min(buftbl_free / entries_per_vf,
1672 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1673
1674 if (efx->vf_count > vf_limit) {
1675 netif_err(efx, probe, efx->net_dev,
1676 "Reducing VF count from from %d to %d\n",
1677 efx->vf_count, vf_limit);
1678 efx->vf_count = vf_limit;
1679 }
1680 vi_count += efx->vf_count * efx_vf_size(efx);
1681 }
1682#endif
1683
1684 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1685 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1686}
1687
1688u32 efx_farch_fpga_ver(struct efx_nic *efx)
1689{
1690 efx_oword_t altera_build;
1691 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1692 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1693}
1694
1695void efx_farch_init_common(struct efx_nic *efx)
1696{
1697 efx_oword_t temp;
1698
1699 /* Set positions of descriptor caches in SRAM. */
1700 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1701 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1702 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1703 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1704
1705 /* Set TX descriptor cache size. */
1706 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1707 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1708 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1709
1710 /* Set RX descriptor cache size. Set low watermark to size-8, as
1711 * this allows most efficient prefetching.
1712 */
1713 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1714 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1715 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1716 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1717 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1718
1719 /* Program INT_KER address */
1720 EFX_POPULATE_OWORD_2(temp,
1721 FRF_AZ_NORM_INT_VEC_DIS_KER,
1722 EFX_INT_MODE_USE_MSI(efx),
1723 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1724 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1725
1726 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1727 /* Use an interrupt level unused by event queues */
1728 efx->irq_level = 0x1f;
1729 else
1730 /* Use a valid MSI-X vector */
1731 efx->irq_level = 0;
1732
1733 /* Enable all the genuinely fatal interrupts. (They are still
1734 * masked by the overall interrupt mask, controlled by
1735 * falcon_interrupts()).
1736 *
1737 * Note: All other fatal interrupts are enabled
1738 */
1739 EFX_POPULATE_OWORD_3(temp,
1740 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1741 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1742 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1743 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1744 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1745 EFX_INVERT_OWORD(temp);
1746 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1747
1748 efx_farch_rx_push_indir_table(efx);
1749
1750 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1751 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1752 */
1753 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1754 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1755 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1756 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1757 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1758 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1759 /* Enable SW_EV to inherit in char driver - assume harmless here */
1760 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1761 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1762 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1763 /* Disable hardware watchdog which can misfire */
1764 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1765 /* Squash TX of packets of 16 bytes or less */
1766 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1767 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1768 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1769
1770 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1771 EFX_POPULATE_OWORD_4(temp,
1772 /* Default values */
1773 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1774 FRF_BZ_TX_PACE_SB_AF, 0xb,
1775 FRF_BZ_TX_PACE_FB_BASE, 0,
1776 /* Allow large pace values in the
1777 * fast bin. */
1778 FRF_BZ_TX_PACE_BIN_TH,
1779 FFE_BZ_TX_PACE_RESERVED);
1780 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1781 }
1782}
1783
1784/**************************************************************************
1785 *
1786 * Filter tables
1787 *
1788 **************************************************************************
1789 */
1790
1791/* "Fudge factors" - difference between programmed value and actual depth.
1792 * Due to pipelined implementation we need to program H/W with a value that
1793 * is larger than the hop limit we want.
1794 */
1795#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1796#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1797
1798/* Hard maximum search limit. Hardware will time-out beyond 200-something.
1799 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1800 * table is full.
1801 */
1802#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1803
1804/* Don't try very hard to find space for performance hints, as this is
1805 * counter-productive. */
1806#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1807
1808enum efx_farch_filter_type {
1809 EFX_FARCH_FILTER_TCP_FULL = 0,
1810 EFX_FARCH_FILTER_TCP_WILD,
1811 EFX_FARCH_FILTER_UDP_FULL,
1812 EFX_FARCH_FILTER_UDP_WILD,
1813 EFX_FARCH_FILTER_MAC_FULL = 4,
1814 EFX_FARCH_FILTER_MAC_WILD,
1815 EFX_FARCH_FILTER_UC_DEF = 8,
1816 EFX_FARCH_FILTER_MC_DEF,
1817 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1818};
1819
1820enum efx_farch_filter_table_id {
1821 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1822 EFX_FARCH_FILTER_TABLE_RX_MAC,
1823 EFX_FARCH_FILTER_TABLE_RX_DEF,
1824 EFX_FARCH_FILTER_TABLE_TX_MAC,
1825 EFX_FARCH_FILTER_TABLE_COUNT,
1826};
1827
1828enum efx_farch_filter_index {
1829 EFX_FARCH_FILTER_INDEX_UC_DEF,
1830 EFX_FARCH_FILTER_INDEX_MC_DEF,
1831 EFX_FARCH_FILTER_SIZE_RX_DEF,
1832};
1833
1834struct efx_farch_filter_spec {
1835 u8 type:4;
1836 u8 priority:4;
1837 u8 flags;
1838 u16 dmaq_id;
1839 u32 data[3];
1840};
1841
1842struct efx_farch_filter_table {
1843 enum efx_farch_filter_table_id id;
1844 u32 offset; /* address of table relative to BAR */
1845 unsigned size; /* number of entries */
1846 unsigned step; /* step between entries */
1847 unsigned used; /* number currently used */
1848 unsigned long *used_bitmap;
1849 struct efx_farch_filter_spec *spec;
1850 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1851};
1852
1853struct efx_farch_filter_state {
1854 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1855};
1856
1857static void
1858efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1859 struct efx_farch_filter_table *table,
1860 unsigned int filter_idx);
1861
1862/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1863 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1864static u16 efx_farch_filter_hash(u32 key)
1865{
1866 u16 tmp;
1867
1868 /* First 16 rounds */
1869 tmp = 0x1fff ^ key >> 16;
1870 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1871 tmp = tmp ^ tmp >> 9;
1872 /* Last 16 rounds */
1873 tmp = tmp ^ tmp << 13 ^ key;
1874 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1875 return tmp ^ tmp >> 9;
1876}
1877
1878/* To allow for hash collisions, filter search continues at these
1879 * increments from the first possible entry selected by the hash. */
1880static u16 efx_farch_filter_increment(u32 key)
1881{
1882 return key * 2 - 1;
1883}
1884
1885static enum efx_farch_filter_table_id
1886efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1887{
1888 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1889 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1890 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1891 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1892 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1893 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1894 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1895 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1896 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1897 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1898 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1899 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1900 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1901 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1902 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1903}
1904
1905static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1906{
1907 struct efx_farch_filter_state *state = efx->filter_state;
1908 struct efx_farch_filter_table *table;
1909 efx_oword_t filter_ctl;
1910
1911 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1912
1913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1914 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1915 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1916 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1917 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1918 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1919 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1920 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1921 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1922 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1923 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1924 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1925 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1926
1927 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1928 if (table->size) {
1929 EFX_SET_OWORD_FIELD(
1930 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1931 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1932 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1933 EFX_SET_OWORD_FIELD(
1934 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1935 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1936 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1937 }
1938
1939 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1940 if (table->size) {
1941 EFX_SET_OWORD_FIELD(
1942 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1943 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1944 EFX_SET_OWORD_FIELD(
1945 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1946 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1947 EFX_FILTER_FLAG_RX_RSS));
1948 EFX_SET_OWORD_FIELD(
1949 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1950 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1951 EFX_SET_OWORD_FIELD(
1952 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1953 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1954 EFX_FILTER_FLAG_RX_RSS));
1955
1956 /* There is a single bit to enable RX scatter for all
1957 * unmatched packets. Only set it if scatter is
1958 * enabled in both filter specs.
1959 */
1960 EFX_SET_OWORD_FIELD(
1961 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1962 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1963 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1964 EFX_FILTER_FLAG_RX_SCATTER));
1965 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1966 /* We don't expose 'default' filters because unmatched
1967 * packets always go to the queue number found in the
1968 * RSS table. But we still need to set the RX scatter
1969 * bit here.
1970 */
1971 EFX_SET_OWORD_FIELD(
1972 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1973 efx->rx_scatter);
1974 }
1975
1976 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1977}
1978
1979static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
1980{
1981 struct efx_farch_filter_state *state = efx->filter_state;
1982 struct efx_farch_filter_table *table;
1983 efx_oword_t tx_cfg;
1984
1985 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1986
1987 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1988 if (table->size) {
1989 EFX_SET_OWORD_FIELD(
1990 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1991 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1992 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1993 EFX_SET_OWORD_FIELD(
1994 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1995 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1996 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1997 }
1998
1999 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2000}
2001
2002static int
2003efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2004 const struct efx_filter_spec *gen_spec)
2005{
2006 bool is_full = false;
2007
2008 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
2009 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
2010 return -EINVAL;
2011
2012 spec->priority = gen_spec->priority;
2013 spec->flags = gen_spec->flags;
2014 spec->dmaq_id = gen_spec->dmaq_id;
2015
2016 switch (gen_spec->match_flags) {
2017 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2018 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2019 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2020 is_full = true;
2021 /* fall through */
2022 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2023 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2024 __be32 rhost, host1, host2;
2025 __be16 rport, port1, port2;
2026
2027 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2028
2029 if (gen_spec->ether_type != htons(ETH_P_IP))
2030 return -EPROTONOSUPPORT;
2031 if (gen_spec->loc_port == 0 ||
2032 (is_full && gen_spec->rem_port == 0))
2033 return -EADDRNOTAVAIL;
2034 switch (gen_spec->ip_proto) {
2035 case IPPROTO_TCP:
2036 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2037 EFX_FARCH_FILTER_TCP_WILD);
2038 break;
2039 case IPPROTO_UDP:
2040 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2041 EFX_FARCH_FILTER_UDP_WILD);
2042 break;
2043 default:
2044 return -EPROTONOSUPPORT;
2045 }
2046
2047 /* Filter is constructed in terms of source and destination,
2048 * with the odd wrinkle that the ports are swapped in a UDP
2049 * wildcard filter. We need to convert from local and remote
2050 * (= zero for wildcard) addresses.
2051 */
2052 rhost = is_full ? gen_spec->rem_host[0] : 0;
2053 rport = is_full ? gen_spec->rem_port : 0;
2054 host1 = rhost;
2055 host2 = gen_spec->loc_host[0];
2056 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2057 port1 = gen_spec->loc_port;
2058 port2 = rport;
2059 } else {
2060 port1 = rport;
2061 port2 = gen_spec->loc_port;
2062 }
2063 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2064 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2065 spec->data[2] = ntohl(host2);
2066
2067 break;
2068 }
2069
2070 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2071 is_full = true;
2072 /* fall through */
2073 case EFX_FILTER_MATCH_LOC_MAC:
2074 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2075 EFX_FARCH_FILTER_MAC_WILD);
2076 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2077 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2078 gen_spec->loc_mac[3] << 16 |
2079 gen_spec->loc_mac[4] << 8 |
2080 gen_spec->loc_mac[5]);
2081 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2082 gen_spec->loc_mac[1]);
2083 break;
2084
2085 case EFX_FILTER_MATCH_LOC_MAC_IG:
2086 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2087 EFX_FARCH_FILTER_MC_DEF :
2088 EFX_FARCH_FILTER_UC_DEF);
2089 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2090 break;
2091
2092 default:
2093 return -EPROTONOSUPPORT;
2094 }
2095
2096 return 0;
2097}
2098
2099static void
2100efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2101 const struct efx_farch_filter_spec *spec)
2102{
2103 bool is_full = false;
2104
2105 /* *gen_spec should be completely initialised, to be consistent
2106 * with efx_filter_init_{rx,tx}() and in case we want to copy
2107 * it back to userland.
2108 */
2109 memset(gen_spec, 0, sizeof(*gen_spec));
2110
2111 gen_spec->priority = spec->priority;
2112 gen_spec->flags = spec->flags;
2113 gen_spec->dmaq_id = spec->dmaq_id;
2114
2115 switch (spec->type) {
2116 case EFX_FARCH_FILTER_TCP_FULL:
2117 case EFX_FARCH_FILTER_UDP_FULL:
2118 is_full = true;
2119 /* fall through */
2120 case EFX_FARCH_FILTER_TCP_WILD:
2121 case EFX_FARCH_FILTER_UDP_WILD: {
2122 __be32 host1, host2;
2123 __be16 port1, port2;
2124
2125 gen_spec->match_flags =
2126 EFX_FILTER_MATCH_ETHER_TYPE |
2127 EFX_FILTER_MATCH_IP_PROTO |
2128 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2129 if (is_full)
2130 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2131 EFX_FILTER_MATCH_REM_PORT);
2132 gen_spec->ether_type = htons(ETH_P_IP);
2133 gen_spec->ip_proto =
2134 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2135 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2136 IPPROTO_TCP : IPPROTO_UDP;
2137
2138 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2139 port1 = htons(spec->data[0]);
2140 host2 = htonl(spec->data[2]);
2141 port2 = htons(spec->data[1] >> 16);
2142 if (spec->flags & EFX_FILTER_FLAG_TX) {
2143 gen_spec->loc_host[0] = host1;
2144 gen_spec->rem_host[0] = host2;
2145 } else {
2146 gen_spec->loc_host[0] = host2;
2147 gen_spec->rem_host[0] = host1;
2148 }
2149 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2150 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2151 gen_spec->loc_port = port1;
2152 gen_spec->rem_port = port2;
2153 } else {
2154 gen_spec->loc_port = port2;
2155 gen_spec->rem_port = port1;
2156 }
2157
2158 break;
2159 }
2160
2161 case EFX_FARCH_FILTER_MAC_FULL:
2162 is_full = true;
2163 /* fall through */
2164 case EFX_FARCH_FILTER_MAC_WILD:
2165 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2166 if (is_full)
2167 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2168 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2169 gen_spec->loc_mac[1] = spec->data[2];
2170 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2171 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2172 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2173 gen_spec->loc_mac[5] = spec->data[1];
2174 gen_spec->outer_vid = htons(spec->data[0]);
2175 break;
2176
2177 case EFX_FARCH_FILTER_UC_DEF:
2178 case EFX_FARCH_FILTER_MC_DEF:
2179 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2180 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2181 break;
2182
2183 default:
2184 WARN_ON(1);
2185 break;
2186 }
2187}
2188
2189static void
2190efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
2191 struct efx_farch_filter_spec *spec)
2192{
2193 /* If there's only one channel then disable RSS for non VF
2194 * traffic, thereby allowing VFs to use RSS when the PF can't.
2195 */
2196 spec->priority = EFX_FILTER_PRI_REQUIRED;
2197 spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
2198 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
2199 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2200 spec->dmaq_id = 0;
2201}
2202
2203/* Build a filter entry and return its n-tuple key. */
2204static u32 efx_farch_filter_build(efx_oword_t *filter,
2205 struct efx_farch_filter_spec *spec)
2206{
2207 u32 data3;
2208
2209 switch (efx_farch_filter_spec_table_id(spec)) {
2210 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2211 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2212 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2213 EFX_POPULATE_OWORD_7(
2214 *filter,
2215 FRF_BZ_RSS_EN,
2216 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2217 FRF_BZ_SCATTER_EN,
2218 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2219 FRF_BZ_TCP_UDP, is_udp,
2220 FRF_BZ_RXQ_ID, spec->dmaq_id,
2221 EFX_DWORD_2, spec->data[2],
2222 EFX_DWORD_1, spec->data[1],
2223 EFX_DWORD_0, spec->data[0]);
2224 data3 = is_udp;
2225 break;
2226 }
2227
2228 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2229 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2230 EFX_POPULATE_OWORD_7(
2231 *filter,
2232 FRF_CZ_RMFT_RSS_EN,
2233 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2234 FRF_CZ_RMFT_SCATTER_EN,
2235 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2236 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2237 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2238 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2239 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2240 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2241 data3 = is_wild;
2242 break;
2243 }
2244
2245 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2246 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2247 EFX_POPULATE_OWORD_5(*filter,
2248 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2249 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2250 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2251 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2252 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2253 data3 = is_wild | spec->dmaq_id << 1;
2254 break;
2255 }
2256
2257 default:
2258 BUG();
2259 }
2260
2261 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2262}
2263
2264static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2265 const struct efx_farch_filter_spec *right)
2266{
2267 if (left->type != right->type ||
2268 memcmp(left->data, right->data, sizeof(left->data)))
2269 return false;
2270
2271 if (left->flags & EFX_FILTER_FLAG_TX &&
2272 left->dmaq_id != right->dmaq_id)
2273 return false;
2274
2275 return true;
2276}
2277
2278/*
2279 * Construct/deconstruct external filter IDs. At least the RX filter
2280 * IDs must be ordered by matching priority, for RX NFC semantics.
2281 *
2282 * Deconstruction needs to be robust against invalid IDs so that
2283 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2284 * accept user-provided IDs.
2285 */
2286
2287#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2288
2289static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2290 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2291 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2292 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2293 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2294 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2295 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2296 [EFX_FARCH_FILTER_UC_DEF] = 4,
2297 [EFX_FARCH_FILTER_MC_DEF] = 4,
2298};
2299
2300static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2301 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2302 EFX_FARCH_FILTER_TABLE_RX_IP,
2303 EFX_FARCH_FILTER_TABLE_RX_MAC,
2304 EFX_FARCH_FILTER_TABLE_RX_MAC,
2305 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2306 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2307 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2308};
2309
2310#define EFX_FARCH_FILTER_INDEX_WIDTH 13
2311#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2312
2313static inline u32
2314efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2315 unsigned int index)
2316{
2317 unsigned int range;
2318
2319 range = efx_farch_filter_type_match_pri[spec->type];
2320 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2321 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2322
2323 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2324}
2325
2326static inline enum efx_farch_filter_table_id
2327efx_farch_filter_id_table_id(u32 id)
2328{
2329 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2330
2331 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2332 return efx_farch_filter_range_table[range];
2333 else
2334 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2335}
2336
2337static inline unsigned int efx_farch_filter_id_index(u32 id)
2338{
2339 return id & EFX_FARCH_FILTER_INDEX_MASK;
2340}
2341
2342u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2343{
2344 struct efx_farch_filter_state *state = efx->filter_state;
2345 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2346 enum efx_farch_filter_table_id table_id;
2347
2348 do {
2349 table_id = efx_farch_filter_range_table[range];
2350 if (state->table[table_id].size != 0)
2351 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2352 state->table[table_id].size;
2353 } while (range--);
2354
2355 return 0;
2356}
2357
2358s32 efx_farch_filter_insert(struct efx_nic *efx,
2359 struct efx_filter_spec *gen_spec,
2360 bool replace_equal)
2361{
2362 struct efx_farch_filter_state *state = efx->filter_state;
2363 struct efx_farch_filter_table *table;
2364 struct efx_farch_filter_spec spec;
2365 efx_oword_t filter;
2366 int rep_index, ins_index;
2367 unsigned int depth = 0;
2368 int rc;
2369
2370 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2371 if (rc)
2372 return rc;
2373
2374 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2375 if (table->size == 0)
2376 return -EINVAL;
2377
2378 netif_vdbg(efx, hw, efx->net_dev,
2379 "%s: type %d search_limit=%d", __func__, spec.type,
2380 table->search_limit[spec.type]);
2381
2382 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2383 /* One filter spec per type */
2384 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2385 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2386 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2387 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2388 ins_index = rep_index;
2389
2390 spin_lock_bh(&efx->filter_lock);
2391 } else {
2392 /* Search concurrently for
2393 * (1) a filter to be replaced (rep_index): any filter
2394 * with the same match values, up to the current
2395 * search depth for this type, and
2396 * (2) the insertion point (ins_index): (1) or any
2397 * free slot before it or up to the maximum search
2398 * depth for this priority
2399 * We fail if we cannot find (2).
2400 *
2401 * We can stop once either
2402 * (a) we find (1), in which case we have definitely
2403 * found (2) as well; or
2404 * (b) we have searched exhaustively for (1), and have
2405 * either found (2) or searched exhaustively for it
2406 */
2407 u32 key = efx_farch_filter_build(&filter, &spec);
2408 unsigned int hash = efx_farch_filter_hash(key);
2409 unsigned int incr = efx_farch_filter_increment(key);
2410 unsigned int max_rep_depth = table->search_limit[spec.type];
2411 unsigned int max_ins_depth =
2412 spec.priority <= EFX_FILTER_PRI_HINT ?
2413 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2414 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2415 unsigned int i = hash & (table->size - 1);
2416
2417 ins_index = -1;
2418 depth = 1;
2419
2420 spin_lock_bh(&efx->filter_lock);
2421
2422 for (;;) {
2423 if (!test_bit(i, table->used_bitmap)) {
2424 if (ins_index < 0)
2425 ins_index = i;
2426 } else if (efx_farch_filter_equal(&spec,
2427 &table->spec[i])) {
2428 /* Case (a) */
2429 if (ins_index < 0)
2430 ins_index = i;
2431 rep_index = i;
2432 break;
2433 }
2434
2435 if (depth >= max_rep_depth &&
2436 (ins_index >= 0 || depth >= max_ins_depth)) {
2437 /* Case (b) */
2438 if (ins_index < 0) {
2439 rc = -EBUSY;
2440 goto out;
2441 }
2442 rep_index = -1;
2443 break;
2444 }
2445
2446 i = (i + incr) & (table->size - 1);
2447 ++depth;
2448 }
2449 }
2450
2451 /* If we found a filter to be replaced, check whether we
2452 * should do so
2453 */
2454 if (rep_index >= 0) {
2455 struct efx_farch_filter_spec *saved_spec =
2456 &table->spec[rep_index];
2457
2458 if (spec.priority == saved_spec->priority && !replace_equal) {
2459 rc = -EEXIST;
2460 goto out;
2461 }
2462 if (spec.priority < saved_spec->priority &&
2463 !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
2464 saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
2465 rc = -EPERM;
2466 goto out;
2467 }
2468 if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
2469 /* Just make sure it won't be removed */
2470 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2471 rc = 0;
2472 goto out;
2473 }
2474 /* Retain the RX_STACK flag */
2475 spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
2476 }
2477
2478 /* Insert the filter */
2479 if (ins_index != rep_index) {
2480 __set_bit(ins_index, table->used_bitmap);
2481 ++table->used;
2482 }
2483 table->spec[ins_index] = spec;
2484
2485 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2486 efx_farch_filter_push_rx_config(efx);
2487 } else {
2488 if (table->search_limit[spec.type] < depth) {
2489 table->search_limit[spec.type] = depth;
2490 if (spec.flags & EFX_FILTER_FLAG_TX)
2491 efx_farch_filter_push_tx_limits(efx);
2492 else
2493 efx_farch_filter_push_rx_config(efx);
2494 }
2495
2496 efx_writeo(efx, &filter,
2497 table->offset + table->step * ins_index);
2498
2499 /* If we were able to replace a filter by inserting
2500 * at a lower depth, clear the replaced filter
2501 */
2502 if (ins_index != rep_index && rep_index >= 0)
2503 efx_farch_filter_table_clear_entry(efx, table,
2504 rep_index);
2505 }
2506
2507 netif_vdbg(efx, hw, efx->net_dev,
2508 "%s: filter type %d index %d rxq %u set",
2509 __func__, spec.type, ins_index, spec.dmaq_id);
2510 rc = efx_farch_filter_make_id(&spec, ins_index);
2511
2512out:
2513 spin_unlock_bh(&efx->filter_lock);
2514 return rc;
2515}
2516
2517static void
2518efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2519 struct efx_farch_filter_table *table,
2520 unsigned int filter_idx)
2521{
2522 static efx_oword_t filter;
2523
2524 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2525 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2526
2527 __clear_bit(filter_idx, table->used_bitmap);
2528 --table->used;
2529 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2530
2531 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2532
2533 /* If this filter required a greater search depth than
2534 * any other, the search limit for its type can now be
2535 * decreased. However, it is hard to determine that
2536 * unless the table has become completely empty - in
2537 * which case, all its search limits can be set to 0.
2538 */
2539 if (unlikely(table->used == 0)) {
2540 memset(table->search_limit, 0, sizeof(table->search_limit));
2541 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2542 efx_farch_filter_push_tx_limits(efx);
2543 else
2544 efx_farch_filter_push_rx_config(efx);
2545 }
2546}
2547
2548static int efx_farch_filter_remove(struct efx_nic *efx,
2549 struct efx_farch_filter_table *table,
2550 unsigned int filter_idx,
2551 enum efx_filter_priority priority)
2552{
2553 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2554
2555 if (!test_bit(filter_idx, table->used_bitmap) ||
2556 spec->priority > priority)
2557 return -ENOENT;
2558
2559 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2560 efx_farch_filter_init_rx_for_stack(efx, spec);
2561 efx_farch_filter_push_rx_config(efx);
2562 } else {
2563 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2564 }
2565
2566 return 0;
2567}
2568
2569int efx_farch_filter_remove_safe(struct efx_nic *efx,
2570 enum efx_filter_priority priority,
2571 u32 filter_id)
2572{
2573 struct efx_farch_filter_state *state = efx->filter_state;
2574 enum efx_farch_filter_table_id table_id;
2575 struct efx_farch_filter_table *table;
2576 unsigned int filter_idx;
2577 struct efx_farch_filter_spec *spec;
2578 int rc;
2579
2580 table_id = efx_farch_filter_id_table_id(filter_id);
2581 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2582 return -ENOENT;
2583 table = &state->table[table_id];
2584
2585 filter_idx = efx_farch_filter_id_index(filter_id);
2586 if (filter_idx >= table->size)
2587 return -ENOENT;
2588 spec = &table->spec[filter_idx];
2589
2590 spin_lock_bh(&efx->filter_lock);
2591 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2592 spin_unlock_bh(&efx->filter_lock);
2593
2594 return rc;
2595}
2596
2597int efx_farch_filter_get_safe(struct efx_nic *efx,
2598 enum efx_filter_priority priority,
2599 u32 filter_id, struct efx_filter_spec *spec_buf)
2600{
2601 struct efx_farch_filter_state *state = efx->filter_state;
2602 enum efx_farch_filter_table_id table_id;
2603 struct efx_farch_filter_table *table;
2604 struct efx_farch_filter_spec *spec;
2605 unsigned int filter_idx;
2606 int rc;
2607
2608 table_id = efx_farch_filter_id_table_id(filter_id);
2609 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2610 return -ENOENT;
2611 table = &state->table[table_id];
2612
2613 filter_idx = efx_farch_filter_id_index(filter_id);
2614 if (filter_idx >= table->size)
2615 return -ENOENT;
2616 spec = &table->spec[filter_idx];
2617
2618 spin_lock_bh(&efx->filter_lock);
2619
2620 if (test_bit(filter_idx, table->used_bitmap) &&
2621 spec->priority == priority) {
2622 efx_farch_filter_to_gen_spec(spec_buf, spec);
2623 rc = 0;
2624 } else {
2625 rc = -ENOENT;
2626 }
2627
2628 spin_unlock_bh(&efx->filter_lock);
2629
2630 return rc;
2631}
2632
2633static void
2634efx_farch_filter_table_clear(struct efx_nic *efx,
2635 enum efx_farch_filter_table_id table_id,
2636 enum efx_filter_priority priority)
2637{
2638 struct efx_farch_filter_state *state = efx->filter_state;
2639 struct efx_farch_filter_table *table = &state->table[table_id];
2640 unsigned int filter_idx;
2641
2642 spin_lock_bh(&efx->filter_lock);
2643 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
2644 efx_farch_filter_remove(efx, table, filter_idx, priority);
2645 spin_unlock_bh(&efx->filter_lock);
2646}
2647
2648void efx_farch_filter_clear_rx(struct efx_nic *efx,
2649 enum efx_filter_priority priority)
2650{
2651 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2652 priority);
2653 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2654 priority);
2655 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2656 priority);
2657}
2658
2659u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2660 enum efx_filter_priority priority)
2661{
2662 struct efx_farch_filter_state *state = efx->filter_state;
2663 enum efx_farch_filter_table_id table_id;
2664 struct efx_farch_filter_table *table;
2665 unsigned int filter_idx;
2666 u32 count = 0;
2667
2668 spin_lock_bh(&efx->filter_lock);
2669
2670 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2671 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2672 table_id++) {
2673 table = &state->table[table_id];
2674 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2675 if (test_bit(filter_idx, table->used_bitmap) &&
2676 table->spec[filter_idx].priority == priority)
2677 ++count;
2678 }
2679 }
2680
2681 spin_unlock_bh(&efx->filter_lock);
2682
2683 return count;
2684}
2685
2686s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2687 enum efx_filter_priority priority,
2688 u32 *buf, u32 size)
2689{
2690 struct efx_farch_filter_state *state = efx->filter_state;
2691 enum efx_farch_filter_table_id table_id;
2692 struct efx_farch_filter_table *table;
2693 unsigned int filter_idx;
2694 s32 count = 0;
2695
2696 spin_lock_bh(&efx->filter_lock);
2697
2698 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2699 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2700 table_id++) {
2701 table = &state->table[table_id];
2702 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2703 if (test_bit(filter_idx, table->used_bitmap) &&
2704 table->spec[filter_idx].priority == priority) {
2705 if (count == size) {
2706 count = -EMSGSIZE;
2707 goto out;
2708 }
2709 buf[count++] = efx_farch_filter_make_id(
2710 &table->spec[filter_idx], filter_idx);
2711 }
2712 }
2713 }
2714out:
2715 spin_unlock_bh(&efx->filter_lock);
2716
2717 return count;
2718}
2719
2720/* Restore filter stater after reset */
2721void efx_farch_filter_table_restore(struct efx_nic *efx)
2722{
2723 struct efx_farch_filter_state *state = efx->filter_state;
2724 enum efx_farch_filter_table_id table_id;
2725 struct efx_farch_filter_table *table;
2726 efx_oword_t filter;
2727 unsigned int filter_idx;
2728
2729 spin_lock_bh(&efx->filter_lock);
2730
2731 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2732 table = &state->table[table_id];
2733
2734 /* Check whether this is a regular register table */
2735 if (table->step == 0)
2736 continue;
2737
2738 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2739 if (!test_bit(filter_idx, table->used_bitmap))
2740 continue;
2741 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2742 efx_writeo(efx, &filter,
2743 table->offset + table->step * filter_idx);
2744 }
2745 }
2746
2747 efx_farch_filter_push_rx_config(efx);
2748 efx_farch_filter_push_tx_limits(efx);
2749
2750 spin_unlock_bh(&efx->filter_lock);
2751}
2752
2753void efx_farch_filter_table_remove(struct efx_nic *efx)
2754{
2755 struct efx_farch_filter_state *state = efx->filter_state;
2756 enum efx_farch_filter_table_id table_id;
2757
2758 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2759 kfree(state->table[table_id].used_bitmap);
2760 vfree(state->table[table_id].spec);
2761 }
2762 kfree(state);
2763}
2764
2765int efx_farch_filter_table_probe(struct efx_nic *efx)
2766{
2767 struct efx_farch_filter_state *state;
2768 struct efx_farch_filter_table *table;
2769 unsigned table_id;
2770
2771 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2772 if (!state)
2773 return -ENOMEM;
2774 efx->filter_state = state;
2775
2776 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2777 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2778 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2779 table->offset = FR_BZ_RX_FILTER_TBL0;
2780 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2781 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2782 }
2783
2784 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
2785 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2786 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2787 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2788 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2789 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2790
2791 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2792 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2793 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2794
2795 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2796 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2797 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2798 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2799 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2800 }
2801
2802 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2803 table = &state->table[table_id];
2804 if (table->size == 0)
2805 continue;
2806 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2807 sizeof(unsigned long),
2808 GFP_KERNEL);
2809 if (!table->used_bitmap)
2810 goto fail;
2811 table->spec = vzalloc(table->size * sizeof(*table->spec));
2812 if (!table->spec)
2813 goto fail;
2814 }
2815
2816 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2817 if (table->size) {
2818 /* RX default filters must always exist */
2819 struct efx_farch_filter_spec *spec;
2820 unsigned i;
2821
2822 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2823 spec = &table->spec[i];
2824 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2825 efx_farch_filter_init_rx_for_stack(efx, spec);
2826 __set_bit(i, table->used_bitmap);
2827 }
2828 }
2829
2830 efx_farch_filter_push_rx_config(efx);
2831
2832 return 0;
2833
2834fail:
2835 efx_farch_filter_table_remove(efx);
2836 return -ENOMEM;
2837}
2838
2839/* Update scatter enable flags for filters pointing to our own RX queues */
2840void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2841{
2842 struct efx_farch_filter_state *state = efx->filter_state;
2843 enum efx_farch_filter_table_id table_id;
2844 struct efx_farch_filter_table *table;
2845 efx_oword_t filter;
2846 unsigned int filter_idx;
2847
2848 spin_lock_bh(&efx->filter_lock);
2849
2850 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2851 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2852 table_id++) {
2853 table = &state->table[table_id];
2854
2855 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2856 if (!test_bit(filter_idx, table->used_bitmap) ||
2857 table->spec[filter_idx].dmaq_id >=
2858 efx->n_rx_channels)
2859 continue;
2860
2861 if (efx->rx_scatter)
2862 table->spec[filter_idx].flags |=
2863 EFX_FILTER_FLAG_RX_SCATTER;
2864 else
2865 table->spec[filter_idx].flags &=
2866 ~EFX_FILTER_FLAG_RX_SCATTER;
2867
2868 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2869 /* Pushed by efx_farch_filter_push_rx_config() */
2870 continue;
2871
2872 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2873 efx_writeo(efx, &filter,
2874 table->offset + table->step * filter_idx);
2875 }
2876 }
2877
2878 efx_farch_filter_push_rx_config(efx);
2879
2880 spin_unlock_bh(&efx->filter_lock);
2881}
2882
2883#ifdef CONFIG_RFS_ACCEL
2884
2885s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
2886 struct efx_filter_spec *gen_spec)
2887{
2888 return efx_farch_filter_insert(efx, gen_spec, true);
2889}
2890
2891bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2892 unsigned int index)
2893{
2894 struct efx_farch_filter_state *state = efx->filter_state;
2895 struct efx_farch_filter_table *table =
2896 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2897
2898 if (test_bit(index, table->used_bitmap) &&
2899 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
2900 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2901 flow_id, index)) {
2902 efx_farch_filter_table_clear_entry(efx, table, index);
2903 return true;
2904 }
2905
2906 return false;
2907}
2908
2909#endif /* CONFIG_RFS_ACCEL */
2910
2911void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2912{
2913 struct net_device *net_dev = efx->net_dev;
2914 struct netdev_hw_addr *ha;
2915 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2916 u32 crc;
2917 int bit;
2918
2919 netif_addr_lock_bh(net_dev);
2920
2921 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2922
2923 /* Build multicast hash table */
2924 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2925 memset(mc_hash, 0xff, sizeof(*mc_hash));
2926 } else {
2927 memset(mc_hash, 0x00, sizeof(*mc_hash));
2928 netdev_for_each_mc_addr(ha, net_dev) {
2929 crc = ether_crc_le(ETH_ALEN, ha->addr);
2930 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2931 __set_bit_le(bit, mc_hash);
2932 }
2933
2934 /* Broadcast packets go through the multicast hash filter.
2935 * ether_crc_le() of the broadcast address is 0xbe2612ff
2936 * so we always add bit 0xff to the mask.
2937 */
2938 __set_bit_le(0xff, mc_hash);
2939 }
2940
2941 netif_addr_unlock_bh(net_dev);
2942}
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index ade4c4dc56ca..7019a712e799 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,15 +1,15 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11#ifndef EFX_REGS_H 11#ifndef EFX_FARCH_REGS_H
12#define EFX_REGS_H 12#define EFX_FARCH_REGS_H
13 13
14/* 14/*
15 * Falcon hardware architecture definitions have a name prefix following 15 * Falcon hardware architecture definitions have a name prefix following
@@ -2925,264 +2925,8 @@
2925#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 2925#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
2926#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 2926#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
2927 2927
2928/************************************************************************** 2928/* RX packet prefix */
2929 * 2929#define FS_BZ_RX_PREFIX_HASH_OFST 12
2930 * Falcon MAC stats 2930#define FS_BZ_RX_PREFIX_SIZE 16
2931 *
2932 **************************************************************************
2933 *
2934 */
2935
2936#define GRxGoodOct_offset 0x0
2937#define GRxGoodOct_WIDTH 48
2938#define GRxBadOct_offset 0x8
2939#define GRxBadOct_WIDTH 48
2940#define GRxMissPkt_offset 0x10
2941#define GRxMissPkt_WIDTH 32
2942#define GRxFalseCRS_offset 0x14
2943#define GRxFalseCRS_WIDTH 32
2944#define GRxPausePkt_offset 0x18
2945#define GRxPausePkt_WIDTH 32
2946#define GRxBadPkt_offset 0x1C
2947#define GRxBadPkt_WIDTH 32
2948#define GRxUcastPkt_offset 0x20
2949#define GRxUcastPkt_WIDTH 32
2950#define GRxMcastPkt_offset 0x24
2951#define GRxMcastPkt_WIDTH 32
2952#define GRxBcastPkt_offset 0x28
2953#define GRxBcastPkt_WIDTH 32
2954#define GRxGoodLt64Pkt_offset 0x2C
2955#define GRxGoodLt64Pkt_WIDTH 32
2956#define GRxBadLt64Pkt_offset 0x30
2957#define GRxBadLt64Pkt_WIDTH 32
2958#define GRx64Pkt_offset 0x34
2959#define GRx64Pkt_WIDTH 32
2960#define GRx65to127Pkt_offset 0x38
2961#define GRx65to127Pkt_WIDTH 32
2962#define GRx128to255Pkt_offset 0x3C
2963#define GRx128to255Pkt_WIDTH 32
2964#define GRx256to511Pkt_offset 0x40
2965#define GRx256to511Pkt_WIDTH 32
2966#define GRx512to1023Pkt_offset 0x44
2967#define GRx512to1023Pkt_WIDTH 32
2968#define GRx1024to15xxPkt_offset 0x48
2969#define GRx1024to15xxPkt_WIDTH 32
2970#define GRx15xxtoJumboPkt_offset 0x4C
2971#define GRx15xxtoJumboPkt_WIDTH 32
2972#define GRxGtJumboPkt_offset 0x50
2973#define GRxGtJumboPkt_WIDTH 32
2974#define GRxFcsErr64to15xxPkt_offset 0x54
2975#define GRxFcsErr64to15xxPkt_WIDTH 32
2976#define GRxFcsErr15xxtoJumboPkt_offset 0x58
2977#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
2978#define GRxFcsErrGtJumboPkt_offset 0x5C
2979#define GRxFcsErrGtJumboPkt_WIDTH 32
2980#define GTxGoodBadOct_offset 0x80
2981#define GTxGoodBadOct_WIDTH 48
2982#define GTxGoodOct_offset 0x88
2983#define GTxGoodOct_WIDTH 48
2984#define GTxSglColPkt_offset 0x90
2985#define GTxSglColPkt_WIDTH 32
2986#define GTxMultColPkt_offset 0x94
2987#define GTxMultColPkt_WIDTH 32
2988#define GTxExColPkt_offset 0x98
2989#define GTxExColPkt_WIDTH 32
2990#define GTxDefPkt_offset 0x9C
2991#define GTxDefPkt_WIDTH 32
2992#define GTxLateCol_offset 0xA0
2993#define GTxLateCol_WIDTH 32
2994#define GTxExDefPkt_offset 0xA4
2995#define GTxExDefPkt_WIDTH 32
2996#define GTxPausePkt_offset 0xA8
2997#define GTxPausePkt_WIDTH 32
2998#define GTxBadPkt_offset 0xAC
2999#define GTxBadPkt_WIDTH 32
3000#define GTxUcastPkt_offset 0xB0
3001#define GTxUcastPkt_WIDTH 32
3002#define GTxMcastPkt_offset 0xB4
3003#define GTxMcastPkt_WIDTH 32
3004#define GTxBcastPkt_offset 0xB8
3005#define GTxBcastPkt_WIDTH 32
3006#define GTxLt64Pkt_offset 0xBC
3007#define GTxLt64Pkt_WIDTH 32
3008#define GTx64Pkt_offset 0xC0
3009#define GTx64Pkt_WIDTH 32
3010#define GTx65to127Pkt_offset 0xC4
3011#define GTx65to127Pkt_WIDTH 32
3012#define GTx128to255Pkt_offset 0xC8
3013#define GTx128to255Pkt_WIDTH 32
3014#define GTx256to511Pkt_offset 0xCC
3015#define GTx256to511Pkt_WIDTH 32
3016#define GTx512to1023Pkt_offset 0xD0
3017#define GTx512to1023Pkt_WIDTH 32
3018#define GTx1024to15xxPkt_offset 0xD4
3019#define GTx1024to15xxPkt_WIDTH 32
3020#define GTx15xxtoJumboPkt_offset 0xD8
3021#define GTx15xxtoJumboPkt_WIDTH 32
3022#define GTxGtJumboPkt_offset 0xDC
3023#define GTxGtJumboPkt_WIDTH 32
3024#define GTxNonTcpUdpPkt_offset 0xE0
3025#define GTxNonTcpUdpPkt_WIDTH 16
3026#define GTxMacSrcErrPkt_offset 0xE4
3027#define GTxMacSrcErrPkt_WIDTH 16
3028#define GTxIpSrcErrPkt_offset 0xE8
3029#define GTxIpSrcErrPkt_WIDTH 16
3030#define GDmaDone_offset 0xEC
3031#define GDmaDone_WIDTH 32
3032
3033#define XgRxOctets_offset 0x0
3034#define XgRxOctets_WIDTH 48
3035#define XgRxOctetsOK_offset 0x8
3036#define XgRxOctetsOK_WIDTH 48
3037#define XgRxPkts_offset 0x10
3038#define XgRxPkts_WIDTH 32
3039#define XgRxPktsOK_offset 0x14
3040#define XgRxPktsOK_WIDTH 32
3041#define XgRxBroadcastPkts_offset 0x18
3042#define XgRxBroadcastPkts_WIDTH 32
3043#define XgRxMulticastPkts_offset 0x1C
3044#define XgRxMulticastPkts_WIDTH 32
3045#define XgRxUnicastPkts_offset 0x20
3046#define XgRxUnicastPkts_WIDTH 32
3047#define XgRxUndersizePkts_offset 0x24
3048#define XgRxUndersizePkts_WIDTH 32
3049#define XgRxOversizePkts_offset 0x28
3050#define XgRxOversizePkts_WIDTH 32
3051#define XgRxJabberPkts_offset 0x2C
3052#define XgRxJabberPkts_WIDTH 32
3053#define XgRxUndersizeFCSerrorPkts_offset 0x30
3054#define XgRxUndersizeFCSerrorPkts_WIDTH 32
3055#define XgRxDropEvents_offset 0x34
3056#define XgRxDropEvents_WIDTH 32
3057#define XgRxFCSerrorPkts_offset 0x38
3058#define XgRxFCSerrorPkts_WIDTH 32
3059#define XgRxAlignError_offset 0x3C
3060#define XgRxAlignError_WIDTH 32
3061#define XgRxSymbolError_offset 0x40
3062#define XgRxSymbolError_WIDTH 32
3063#define XgRxInternalMACError_offset 0x44
3064#define XgRxInternalMACError_WIDTH 32
3065#define XgRxControlPkts_offset 0x48
3066#define XgRxControlPkts_WIDTH 32
3067#define XgRxPausePkts_offset 0x4C
3068#define XgRxPausePkts_WIDTH 32
3069#define XgRxPkts64Octets_offset 0x50
3070#define XgRxPkts64Octets_WIDTH 32
3071#define XgRxPkts65to127Octets_offset 0x54
3072#define XgRxPkts65to127Octets_WIDTH 32
3073#define XgRxPkts128to255Octets_offset 0x58
3074#define XgRxPkts128to255Octets_WIDTH 32
3075#define XgRxPkts256to511Octets_offset 0x5C
3076#define XgRxPkts256to511Octets_WIDTH 32
3077#define XgRxPkts512to1023Octets_offset 0x60
3078#define XgRxPkts512to1023Octets_WIDTH 32
3079#define XgRxPkts1024to15xxOctets_offset 0x64
3080#define XgRxPkts1024to15xxOctets_WIDTH 32
3081#define XgRxPkts15xxtoMaxOctets_offset 0x68
3082#define XgRxPkts15xxtoMaxOctets_WIDTH 32
3083#define XgRxLengthError_offset 0x6C
3084#define XgRxLengthError_WIDTH 32
3085#define XgTxPkts_offset 0x80
3086#define XgTxPkts_WIDTH 32
3087#define XgTxOctets_offset 0x88
3088#define XgTxOctets_WIDTH 48
3089#define XgTxMulticastPkts_offset 0x90
3090#define XgTxMulticastPkts_WIDTH 32
3091#define XgTxBroadcastPkts_offset 0x94
3092#define XgTxBroadcastPkts_WIDTH 32
3093#define XgTxUnicastPkts_offset 0x98
3094#define XgTxUnicastPkts_WIDTH 32
3095#define XgTxControlPkts_offset 0x9C
3096#define XgTxControlPkts_WIDTH 32
3097#define XgTxPausePkts_offset 0xA0
3098#define XgTxPausePkts_WIDTH 32
3099#define XgTxPkts64Octets_offset 0xA4
3100#define XgTxPkts64Octets_WIDTH 32
3101#define XgTxPkts65to127Octets_offset 0xA8
3102#define XgTxPkts65to127Octets_WIDTH 32
3103#define XgTxPkts128to255Octets_offset 0xAC
3104#define XgTxPkts128to255Octets_WIDTH 32
3105#define XgTxPkts256to511Octets_offset 0xB0
3106#define XgTxPkts256to511Octets_WIDTH 32
3107#define XgTxPkts512to1023Octets_offset 0xB4
3108#define XgTxPkts512to1023Octets_WIDTH 32
3109#define XgTxPkts1024to15xxOctets_offset 0xB8
3110#define XgTxPkts1024to15xxOctets_WIDTH 32
3111#define XgTxPkts1519toMaxOctets_offset 0xBC
3112#define XgTxPkts1519toMaxOctets_WIDTH 32
3113#define XgTxUndersizePkts_offset 0xC0
3114#define XgTxUndersizePkts_WIDTH 32
3115#define XgTxOversizePkts_offset 0xC4
3116#define XgTxOversizePkts_WIDTH 32
3117#define XgTxNonTcpUdpPkt_offset 0xC8
3118#define XgTxNonTcpUdpPkt_WIDTH 16
3119#define XgTxMacSrcErrPkt_offset 0xCC
3120#define XgTxMacSrcErrPkt_WIDTH 16
3121#define XgTxIpSrcErrPkt_offset 0xD0
3122#define XgTxIpSrcErrPkt_WIDTH 16
3123#define XgDmaDone_offset 0xD4
3124#define XgDmaDone_WIDTH 32
3125
3126#define FALCON_STATS_NOT_DONE 0x00000000
3127#define FALCON_STATS_DONE 0xffffffff
3128
3129/**************************************************************************
3130 *
3131 * Falcon non-volatile configuration
3132 *
3133 **************************************************************************
3134 */
3135 2931
3136/* Board configuration v2 (v1 is obsolete; later versions are compatible) */ 2932#endif /* EFX_FARCH_REGS_H */
3137struct falcon_nvconfig_board_v2 {
3138 __le16 nports;
3139 u8 port0_phy_addr;
3140 u8 port0_phy_type;
3141 u8 port1_phy_addr;
3142 u8 port1_phy_type;
3143 __le16 asic_sub_revision;
3144 __le16 board_revision;
3145} __packed;
3146
3147/* Board configuration v3 extra information */
3148struct falcon_nvconfig_board_v3 {
3149 __le32 spi_device_type[2];
3150} __packed;
3151
3152/* Bit numbers for spi_device_type */
3153#define SPI_DEV_TYPE_SIZE_LBN 0
3154#define SPI_DEV_TYPE_SIZE_WIDTH 5
3155#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
3156#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
3157#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
3158#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
3159#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
3160#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
3161#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
3162#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
3163#define SPI_DEV_TYPE_FIELD(type, field) \
3164 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
3165
3166#define FALCON_NVCONFIG_OFFSET 0x300
3167
3168#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
3169struct falcon_nvconfig {
3170 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
3171 u8 mac_address[2][8]; /* 0x310 */
3172 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
3173 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
3174 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
3175 efx_oword_t hw_init_reg; /* 0x350 */
3176 efx_oword_t nic_stat_reg; /* 0x360 */
3177 efx_oword_t glb_ctl_reg; /* 0x370 */
3178 efx_oword_t srm_cfg_reg; /* 0x380 */
3179 efx_oword_t spare_reg; /* 0x390 */
3180 __le16 board_magic_num; /* 0x3A0 */
3181 __le16 board_struct_ver;
3182 __le16 board_checksum;
3183 struct falcon_nvconfig_board_v2 board_v2;
3184 efx_oword_t ee_base_page_reg; /* 0x3B0 */
3185 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
3186} __packed;
3187
3188#endif /* EFX_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index 30d744235d27..000000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1274 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/in.h>
11#include <net/ip.h>
12#include "efx.h"
13#include "filter.h"
14#include "io.h"
15#include "nic.h"
16#include "regs.h"
17
18/* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
21 */
22#define FILTER_CTL_SRCH_FUDGE_WILD 3
23#define FILTER_CTL_SRCH_FUDGE_FULL 1
24
25/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_filter_search() when the
27 * table is full.
28 */
29#define FILTER_CTL_SRCH_MAX 200
30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
35enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
40 EFX_FILTER_TABLE_COUNT,
41};
42
43enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
47};
48
49struct efx_filter_table {
50 enum efx_filter_table_id id;
51 u32 offset; /* address of table relative to BAR */
52 unsigned size; /* number of entries */
53 unsigned step; /* step between entries */
54 unsigned used; /* number currently used */
55 unsigned long *used_bitmap;
56 struct efx_filter_spec *spec;
57 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
58};
59
60struct efx_filter_state {
61 spinlock_t lock;
62 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
63#ifdef CONFIG_RFS_ACCEL
64 u32 *rps_flow_id;
65 unsigned rps_expire_index;
66#endif
67};
68
69static void efx_filter_table_clear_entry(struct efx_nic *efx,
70 struct efx_filter_table *table,
71 unsigned int filter_idx);
72
73/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
74 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
75static u16 efx_filter_hash(u32 key)
76{
77 u16 tmp;
78
79 /* First 16 rounds */
80 tmp = 0x1fff ^ key >> 16;
81 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82 tmp = tmp ^ tmp >> 9;
83 /* Last 16 rounds */
84 tmp = tmp ^ tmp << 13 ^ key;
85 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
86 return tmp ^ tmp >> 9;
87}
88
89/* To allow for hash collisions, filter search continues at these
90 * increments from the first possible entry selected by the hash. */
91static u16 efx_filter_increment(u32 key)
92{
93 return key * 2 - 1;
94}
95
96static enum efx_filter_table_id
97efx_filter_spec_table_id(const struct efx_filter_spec *spec)
98{
99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
102 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
103 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
104 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
105 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
106 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
107 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
108}
109
110static struct efx_filter_table *
111efx_filter_spec_table(struct efx_filter_state *state,
112 const struct efx_filter_spec *spec)
113{
114 if (spec->type == EFX_FILTER_UNSPEC)
115 return NULL;
116 else
117 return &state->table[efx_filter_spec_table_id(spec)];
118}
119
120static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
121{
122 memset(table->search_depth, 0, sizeof(table->search_depth));
123}
124
125static void efx_filter_push_rx_config(struct efx_nic *efx)
126{
127 struct efx_filter_state *state = efx->filter_state;
128 struct efx_filter_table *table;
129 efx_oword_t filter_ctl;
130
131 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
132
133 table = &state->table[EFX_FILTER_TABLE_RX_IP];
134 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
135 table->search_depth[EFX_FILTER_TCP_FULL] +
136 FILTER_CTL_SRCH_FUDGE_FULL);
137 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
138 table->search_depth[EFX_FILTER_TCP_WILD] +
139 FILTER_CTL_SRCH_FUDGE_WILD);
140 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
141 table->search_depth[EFX_FILTER_UDP_FULL] +
142 FILTER_CTL_SRCH_FUDGE_FULL);
143 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
144 table->search_depth[EFX_FILTER_UDP_WILD] +
145 FILTER_CTL_SRCH_FUDGE_WILD);
146
147 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
148 if (table->size) {
149 EFX_SET_OWORD_FIELD(
150 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
151 table->search_depth[EFX_FILTER_MAC_FULL] +
152 FILTER_CTL_SRCH_FUDGE_FULL);
153 EFX_SET_OWORD_FIELD(
154 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
155 table->search_depth[EFX_FILTER_MAC_WILD] +
156 FILTER_CTL_SRCH_FUDGE_WILD);
157 }
158
159 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
160 if (table->size) {
161 EFX_SET_OWORD_FIELD(
162 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
163 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_RSS));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175
176 /* There is a single bit to enable RX scatter for all
177 * unmatched packets. Only set it if scatter is
178 * enabled in both filter specs.
179 */
180 EFX_SET_OWORD_FIELD(
181 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
182 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
183 table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
184 EFX_FILTER_FLAG_RX_SCATTER));
185 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
186 /* We don't expose 'default' filters because unmatched
187 * packets always go to the queue number found in the
188 * RSS table. But we still need to set the RX scatter
189 * bit here.
190 */
191 EFX_SET_OWORD_FIELD(
192 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
193 efx->rx_scatter);
194 }
195
196 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
197}
198
199static void efx_filter_push_tx_limits(struct efx_nic *efx)
200{
201 struct efx_filter_state *state = efx->filter_state;
202 struct efx_filter_table *table;
203 efx_oword_t tx_cfg;
204
205 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
206
207 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
208 if (table->size) {
209 EFX_SET_OWORD_FIELD(
210 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
211 table->search_depth[EFX_FILTER_MAC_FULL] +
212 FILTER_CTL_SRCH_FUDGE_FULL);
213 EFX_SET_OWORD_FIELD(
214 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
215 table->search_depth[EFX_FILTER_MAC_WILD] +
216 FILTER_CTL_SRCH_FUDGE_WILD);
217 }
218
219 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
220}
221
222static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
223 __be32 host1, __be16 port1,
224 __be32 host2, __be16 port2)
225{
226 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
227 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
228 spec->data[2] = ntohl(host2);
229}
230
231static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
232 __be32 *host1, __be16 *port1,
233 __be32 *host2, __be16 *port2)
234{
235 *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
236 *port1 = htons(spec->data[0]);
237 *host2 = htonl(spec->data[2]);
238 *port2 = htons(spec->data[1] >> 16);
239}
240
241/**
242 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
243 * @spec: Specification to initialise
244 * @proto: Transport layer protocol number
245 * @host: Local host address (network byte order)
246 * @port: Local port (network byte order)
247 */
248int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
249 __be32 host, __be16 port)
250{
251 __be32 host1;
252 __be16 port1;
253
254 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
255
256 /* This cannot currently be combined with other filtering */
257 if (spec->type != EFX_FILTER_UNSPEC)
258 return -EPROTONOSUPPORT;
259
260 if (port == 0)
261 return -EINVAL;
262
263 switch (proto) {
264 case IPPROTO_TCP:
265 spec->type = EFX_FILTER_TCP_WILD;
266 break;
267 case IPPROTO_UDP:
268 spec->type = EFX_FILTER_UDP_WILD;
269 break;
270 default:
271 return -EPROTONOSUPPORT;
272 }
273
274 /* Filter is constructed in terms of source and destination,
275 * with the odd wrinkle that the ports are swapped in a UDP
276 * wildcard filter. We need to convert from local and remote
277 * (= zero for wildcard) addresses.
278 */
279 host1 = 0;
280 if (proto != IPPROTO_UDP) {
281 port1 = 0;
282 } else {
283 port1 = port;
284 port = 0;
285 }
286
287 __efx_filter_set_ipv4(spec, host1, port1, host, port);
288 return 0;
289}
290
291int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
292 u8 *proto, __be32 *host, __be16 *port)
293{
294 __be32 host1;
295 __be16 port1;
296
297 switch (spec->type) {
298 case EFX_FILTER_TCP_WILD:
299 *proto = IPPROTO_TCP;
300 __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
301 return 0;
302 case EFX_FILTER_UDP_WILD:
303 *proto = IPPROTO_UDP;
304 __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
305 return 0;
306 default:
307 return -EINVAL;
308 }
309}
310
311/**
312 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
313 * @spec: Specification to initialise
314 * @proto: Transport layer protocol number
315 * @host: Local host address (network byte order)
316 * @port: Local port (network byte order)
317 * @rhost: Remote host address (network byte order)
318 * @rport: Remote port (network byte order)
319 */
320int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
321 __be32 host, __be16 port,
322 __be32 rhost, __be16 rport)
323{
324 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
325
326 /* This cannot currently be combined with other filtering */
327 if (spec->type != EFX_FILTER_UNSPEC)
328 return -EPROTONOSUPPORT;
329
330 if (port == 0 || rport == 0)
331 return -EINVAL;
332
333 switch (proto) {
334 case IPPROTO_TCP:
335 spec->type = EFX_FILTER_TCP_FULL;
336 break;
337 case IPPROTO_UDP:
338 spec->type = EFX_FILTER_UDP_FULL;
339 break;
340 default:
341 return -EPROTONOSUPPORT;
342 }
343
344 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
345 return 0;
346}
347
348int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
349 u8 *proto, __be32 *host, __be16 *port,
350 __be32 *rhost, __be16 *rport)
351{
352 switch (spec->type) {
353 case EFX_FILTER_TCP_FULL:
354 *proto = IPPROTO_TCP;
355 break;
356 case EFX_FILTER_UDP_FULL:
357 *proto = IPPROTO_UDP;
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 __efx_filter_get_ipv4(spec, rhost, rport, host, port);
364 return 0;
365}
366
367/**
368 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
369 * @spec: Specification to initialise
370 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
371 * @addr: Local Ethernet MAC address
372 */
373int efx_filter_set_eth_local(struct efx_filter_spec *spec,
374 u16 vid, const u8 *addr)
375{
376 EFX_BUG_ON_PARANOID(!(spec->flags &
377 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
378
379 /* This cannot currently be combined with other filtering */
380 if (spec->type != EFX_FILTER_UNSPEC)
381 return -EPROTONOSUPPORT;
382
383 if (vid == EFX_FILTER_VID_UNSPEC) {
384 spec->type = EFX_FILTER_MAC_WILD;
385 spec->data[0] = 0;
386 } else {
387 spec->type = EFX_FILTER_MAC_FULL;
388 spec->data[0] = vid;
389 }
390
391 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
392 spec->data[2] = addr[0] << 8 | addr[1];
393 return 0;
394}
395
396/**
397 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
398 * @spec: Specification to initialise
399 */
400int efx_filter_set_uc_def(struct efx_filter_spec *spec)
401{
402 EFX_BUG_ON_PARANOID(!(spec->flags &
403 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
404
405 if (spec->type != EFX_FILTER_UNSPEC)
406 return -EINVAL;
407
408 spec->type = EFX_FILTER_UC_DEF;
409 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
410 return 0;
411}
412
413/**
414 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
415 * @spec: Specification to initialise
416 */
417int efx_filter_set_mc_def(struct efx_filter_spec *spec)
418{
419 EFX_BUG_ON_PARANOID(!(spec->flags &
420 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
421
422 if (spec->type != EFX_FILTER_UNSPEC)
423 return -EINVAL;
424
425 spec->type = EFX_FILTER_MC_DEF;
426 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
427 return 0;
428}
429
430static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
431{
432 struct efx_filter_state *state = efx->filter_state;
433 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
434 struct efx_filter_spec *spec = &table->spec[filter_idx];
435 enum efx_filter_flags flags = 0;
436
437 /* If there's only one channel then disable RSS for non VF
438 * traffic, thereby allowing VFs to use RSS when the PF can't.
439 */
440 if (efx->n_rx_channels > 1)
441 flags |= EFX_FILTER_FLAG_RX_RSS;
442
443 if (efx->rx_scatter)
444 flags |= EFX_FILTER_FLAG_RX_SCATTER;
445
446 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
447 spec->type = EFX_FILTER_UC_DEF + filter_idx;
448 table->used_bitmap[0] |= 1 << filter_idx;
449}
450
451int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
452 u16 *vid, u8 *addr)
453{
454 switch (spec->type) {
455 case EFX_FILTER_MAC_WILD:
456 *vid = EFX_FILTER_VID_UNSPEC;
457 break;
458 case EFX_FILTER_MAC_FULL:
459 *vid = spec->data[0];
460 break;
461 default:
462 return -EINVAL;
463 }
464
465 addr[0] = spec->data[2] >> 8;
466 addr[1] = spec->data[2];
467 addr[2] = spec->data[1] >> 24;
468 addr[3] = spec->data[1] >> 16;
469 addr[4] = spec->data[1] >> 8;
470 addr[5] = spec->data[1];
471 return 0;
472}
473
474/* Build a filter entry and return its n-tuple key. */
475static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
476{
477 u32 data3;
478
479 switch (efx_filter_spec_table_id(spec)) {
480 case EFX_FILTER_TABLE_RX_IP: {
481 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
482 spec->type == EFX_FILTER_UDP_WILD);
483 EFX_POPULATE_OWORD_7(
484 *filter,
485 FRF_BZ_RSS_EN,
486 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 FRF_BZ_SCATTER_EN,
488 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 FRF_BZ_TCP_UDP, is_udp,
490 FRF_BZ_RXQ_ID, spec->dmaq_id,
491 EFX_DWORD_2, spec->data[2],
492 EFX_DWORD_1, spec->data[1],
493 EFX_DWORD_0, spec->data[0]);
494 data3 = is_udp;
495 break;
496 }
497
498 case EFX_FILTER_TABLE_RX_MAC: {
499 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
500 EFX_POPULATE_OWORD_7(
501 *filter,
502 FRF_CZ_RMFT_RSS_EN,
503 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
504 FRF_CZ_RMFT_SCATTER_EN,
505 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
506 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
507 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
508 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
509 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
510 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
511 data3 = is_wild;
512 break;
513 }
514
515 case EFX_FILTER_TABLE_TX_MAC: {
516 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
517 EFX_POPULATE_OWORD_5(*filter,
518 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
519 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
520 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
521 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
522 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
523 data3 = is_wild | spec->dmaq_id << 1;
524 break;
525 }
526
527 default:
528 BUG();
529 }
530
531 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
532}
533
534static bool efx_filter_equal(const struct efx_filter_spec *left,
535 const struct efx_filter_spec *right)
536{
537 if (left->type != right->type ||
538 memcmp(left->data, right->data, sizeof(left->data)))
539 return false;
540
541 if (left->flags & EFX_FILTER_FLAG_TX &&
542 left->dmaq_id != right->dmaq_id)
543 return false;
544
545 return true;
546}
547
548/*
549 * Construct/deconstruct external filter IDs. At least the RX filter
550 * IDs must be ordered by matching priority, for RX NFC semantics.
551 *
552 * Deconstruction needs to be robust against invalid IDs so that
553 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
554 * accept user-provided IDs.
555 */
556
557#define EFX_FILTER_MATCH_PRI_COUNT 5
558
559static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
560 [EFX_FILTER_TCP_FULL] = 0,
561 [EFX_FILTER_UDP_FULL] = 0,
562 [EFX_FILTER_TCP_WILD] = 1,
563 [EFX_FILTER_UDP_WILD] = 1,
564 [EFX_FILTER_MAC_FULL] = 2,
565 [EFX_FILTER_MAC_WILD] = 3,
566 [EFX_FILTER_UC_DEF] = 4,
567 [EFX_FILTER_MC_DEF] = 4,
568};
569
570static const enum efx_filter_table_id efx_filter_range_table[] = {
571 EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
572 EFX_FILTER_TABLE_RX_IP,
573 EFX_FILTER_TABLE_RX_MAC,
574 EFX_FILTER_TABLE_RX_MAC,
575 EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
576 EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
577 EFX_FILTER_TABLE_COUNT, /* invalid */
578 EFX_FILTER_TABLE_TX_MAC,
579 EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
580};
581
582#define EFX_FILTER_INDEX_WIDTH 13
583#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
584
585static inline u32
586efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
587{
588 unsigned int range;
589
590 range = efx_filter_type_match_pri[spec->type];
591 if (!(spec->flags & EFX_FILTER_FLAG_RX))
592 range += EFX_FILTER_MATCH_PRI_COUNT;
593
594 return range << EFX_FILTER_INDEX_WIDTH | index;
595}
596
597static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
598{
599 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
600
601 if (range < ARRAY_SIZE(efx_filter_range_table))
602 return efx_filter_range_table[range];
603 else
604 return EFX_FILTER_TABLE_COUNT; /* invalid */
605}
606
607static inline unsigned int efx_filter_id_index(u32 id)
608{
609 return id & EFX_FILTER_INDEX_MASK;
610}
611
612static inline u8 efx_filter_id_flags(u32 id)
613{
614 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
615
616 if (range < EFX_FILTER_MATCH_PRI_COUNT)
617 return EFX_FILTER_FLAG_RX;
618 else
619 return EFX_FILTER_FLAG_TX;
620}
621
622u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
623{
624 struct efx_filter_state *state = efx->filter_state;
625 unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
626 enum efx_filter_table_id table_id;
627
628 do {
629 table_id = efx_filter_range_table[range];
630 if (state->table[table_id].size != 0)
631 return range << EFX_FILTER_INDEX_WIDTH |
632 state->table[table_id].size;
633 } while (range--);
634
635 return 0;
636}
637
638/**
639 * efx_filter_insert_filter - add or replace a filter
640 * @efx: NIC in which to insert the filter
641 * @spec: Specification for the filter
642 * @replace_equal: Flag for whether the specified filter may replace an
643 * existing filter with equal priority
644 *
645 * On success, return the filter ID.
646 * On failure, return a negative error code.
647 *
648 * If an existing filter has equal match values to the new filter
649 * spec, then the new filter might replace it, depending on the
650 * relative priorities. If the existing filter has lower priority, or
651 * if @replace_equal is set and it has equal priority, then it is
652 * replaced. Otherwise the function fails, returning -%EPERM if
653 * the existing filter has higher priority or -%EEXIST if it has
654 * equal priority.
655 */
656s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
657 bool replace_equal)
658{
659 struct efx_filter_state *state = efx->filter_state;
660 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
661 efx_oword_t filter;
662 int rep_index, ins_index;
663 unsigned int depth = 0;
664 int rc;
665
666 if (!table || table->size == 0)
667 return -EINVAL;
668
669 netif_vdbg(efx, hw, efx->net_dev,
670 "%s: type %d search_depth=%d", __func__, spec->type,
671 table->search_depth[spec->type]);
672
673 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
674 /* One filter spec per type */
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_UC_DEF;
679 ins_index = rep_index;
680
681 spin_lock_bh(&state->lock);
682 } else {
683 /* Search concurrently for
684 * (1) a filter to be replaced (rep_index): any filter
685 * with the same match values, up to the current
686 * search depth for this type, and
687 * (2) the insertion point (ins_index): (1) or any
688 * free slot before it or up to the maximum search
689 * depth for this priority
690 * We fail if we cannot find (2).
691 *
692 * We can stop once either
693 * (a) we find (1), in which case we have definitely
694 * found (2) as well; or
695 * (b) we have searched exhaustively for (1), and have
696 * either found (2) or searched exhaustively for it
697 */
698 u32 key = efx_filter_build(&filter, spec);
699 unsigned int hash = efx_filter_hash(key);
700 unsigned int incr = efx_filter_increment(key);
701 unsigned int max_rep_depth = table->search_depth[spec->type];
702 unsigned int max_ins_depth =
703 spec->priority <= EFX_FILTER_PRI_HINT ?
704 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
705 unsigned int i = hash & (table->size - 1);
706
707 ins_index = -1;
708 depth = 1;
709
710 spin_lock_bh(&state->lock);
711
712 for (;;) {
713 if (!test_bit(i, table->used_bitmap)) {
714 if (ins_index < 0)
715 ins_index = i;
716 } else if (efx_filter_equal(spec, &table->spec[i])) {
717 /* Case (a) */
718 if (ins_index < 0)
719 ins_index = i;
720 rep_index = i;
721 break;
722 }
723
724 if (depth >= max_rep_depth &&
725 (ins_index >= 0 || depth >= max_ins_depth)) {
726 /* Case (b) */
727 if (ins_index < 0) {
728 rc = -EBUSY;
729 goto out;
730 }
731 rep_index = -1;
732 break;
733 }
734
735 i = (i + incr) & (table->size - 1);
736 ++depth;
737 }
738 }
739
740 /* If we found a filter to be replaced, check whether we
741 * should do so
742 */
743 if (rep_index >= 0) {
744 struct efx_filter_spec *saved_spec = &table->spec[rep_index];
745
746 if (spec->priority == saved_spec->priority && !replace_equal) {
747 rc = -EEXIST;
748 goto out;
749 }
750 if (spec->priority < saved_spec->priority) {
751 rc = -EPERM;
752 goto out;
753 }
754 }
755
756 /* Insert the filter */
757 if (ins_index != rep_index) {
758 __set_bit(ins_index, table->used_bitmap);
759 ++table->used;
760 }
761 table->spec[ins_index] = *spec;
762
763 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
764 efx_filter_push_rx_config(efx);
765 } else {
766 if (table->search_depth[spec->type] < depth) {
767 table->search_depth[spec->type] = depth;
768 if (spec->flags & EFX_FILTER_FLAG_TX)
769 efx_filter_push_tx_limits(efx);
770 else
771 efx_filter_push_rx_config(efx);
772 }
773
774 efx_writeo(efx, &filter,
775 table->offset + table->step * ins_index);
776
777 /* If we were able to replace a filter by inserting
778 * at a lower depth, clear the replaced filter
779 */
780 if (ins_index != rep_index && rep_index >= 0)
781 efx_filter_table_clear_entry(efx, table, rep_index);
782 }
783
784 netif_vdbg(efx, hw, efx->net_dev,
785 "%s: filter type %d index %d rxq %u set",
786 __func__, spec->type, ins_index, spec->dmaq_id);
787 rc = efx_filter_make_id(spec, ins_index);
788
789out:
790 spin_unlock_bh(&state->lock);
791 return rc;
792}
793
794static void efx_filter_table_clear_entry(struct efx_nic *efx,
795 struct efx_filter_table *table,
796 unsigned int filter_idx)
797{
798 static efx_oword_t filter;
799
800 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
801 /* RX default filters must always exist */
802 efx_filter_reset_rx_def(efx, filter_idx);
803 efx_filter_push_rx_config(efx);
804 } else if (test_bit(filter_idx, table->used_bitmap)) {
805 __clear_bit(filter_idx, table->used_bitmap);
806 --table->used;
807 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
808
809 efx_writeo(efx, &filter,
810 table->offset + table->step * filter_idx);
811 }
812}
813
814/**
815 * efx_filter_remove_id_safe - remove a filter by ID, carefully
816 * @efx: NIC from which to remove the filter
817 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
818 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
819 *
820 * This function will range-check @filter_id, so it is safe to call
821 * with a value passed from userland.
822 */
823int efx_filter_remove_id_safe(struct efx_nic *efx,
824 enum efx_filter_priority priority,
825 u32 filter_id)
826{
827 struct efx_filter_state *state = efx->filter_state;
828 enum efx_filter_table_id table_id;
829 struct efx_filter_table *table;
830 unsigned int filter_idx;
831 struct efx_filter_spec *spec;
832 u8 filter_flags;
833 int rc;
834
835 table_id = efx_filter_id_table_id(filter_id);
836 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
837 return -ENOENT;
838 table = &state->table[table_id];
839
840 filter_idx = efx_filter_id_index(filter_id);
841 if (filter_idx >= table->size)
842 return -ENOENT;
843 spec = &table->spec[filter_idx];
844
845 filter_flags = efx_filter_id_flags(filter_id);
846
847 spin_lock_bh(&state->lock);
848
849 if (test_bit(filter_idx, table->used_bitmap) &&
850 spec->priority == priority) {
851 efx_filter_table_clear_entry(efx, table, filter_idx);
852 if (table->used == 0)
853 efx_filter_table_reset_search_depth(table);
854 rc = 0;
855 } else {
856 rc = -ENOENT;
857 }
858
859 spin_unlock_bh(&state->lock);
860
861 return rc;
862}
863
864/**
865 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
866 * @efx: NIC from which to remove the filter
867 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
868 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
869 * @spec: Buffer in which to store filter specification
870 *
871 * This function will range-check @filter_id, so it is safe to call
872 * with a value passed from userland.
873 */
874int efx_filter_get_filter_safe(struct efx_nic *efx,
875 enum efx_filter_priority priority,
876 u32 filter_id, struct efx_filter_spec *spec_buf)
877{
878 struct efx_filter_state *state = efx->filter_state;
879 enum efx_filter_table_id table_id;
880 struct efx_filter_table *table;
881 struct efx_filter_spec *spec;
882 unsigned int filter_idx;
883 u8 filter_flags;
884 int rc;
885
886 table_id = efx_filter_id_table_id(filter_id);
887 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
888 return -ENOENT;
889 table = &state->table[table_id];
890
891 filter_idx = efx_filter_id_index(filter_id);
892 if (filter_idx >= table->size)
893 return -ENOENT;
894 spec = &table->spec[filter_idx];
895
896 filter_flags = efx_filter_id_flags(filter_id);
897
898 spin_lock_bh(&state->lock);
899
900 if (test_bit(filter_idx, table->used_bitmap) &&
901 spec->priority == priority) {
902 *spec_buf = *spec;
903 rc = 0;
904 } else {
905 rc = -ENOENT;
906 }
907
908 spin_unlock_bh(&state->lock);
909
910 return rc;
911}
912
913static void efx_filter_table_clear(struct efx_nic *efx,
914 enum efx_filter_table_id table_id,
915 enum efx_filter_priority priority)
916{
917 struct efx_filter_state *state = efx->filter_state;
918 struct efx_filter_table *table = &state->table[table_id];
919 unsigned int filter_idx;
920
921 spin_lock_bh(&state->lock);
922
923 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
924 if (table->spec[filter_idx].priority <= priority)
925 efx_filter_table_clear_entry(efx, table, filter_idx);
926 if (table->used == 0)
927 efx_filter_table_reset_search_depth(table);
928
929 spin_unlock_bh(&state->lock);
930}
931
932/**
933 * efx_filter_clear_rx - remove RX filters by priority
934 * @efx: NIC from which to remove the filters
935 * @priority: Maximum priority to remove
936 */
937void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
938{
939 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
940 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
941}
942
943u32 efx_filter_count_rx_used(struct efx_nic *efx,
944 enum efx_filter_priority priority)
945{
946 struct efx_filter_state *state = efx->filter_state;
947 enum efx_filter_table_id table_id;
948 struct efx_filter_table *table;
949 unsigned int filter_idx;
950 u32 count = 0;
951
952 spin_lock_bh(&state->lock);
953
954 for (table_id = EFX_FILTER_TABLE_RX_IP;
955 table_id <= EFX_FILTER_TABLE_RX_DEF;
956 table_id++) {
957 table = &state->table[table_id];
958 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
959 if (test_bit(filter_idx, table->used_bitmap) &&
960 table->spec[filter_idx].priority == priority)
961 ++count;
962 }
963 }
964
965 spin_unlock_bh(&state->lock);
966
967 return count;
968}
969
970s32 efx_filter_get_rx_ids(struct efx_nic *efx,
971 enum efx_filter_priority priority,
972 u32 *buf, u32 size)
973{
974 struct efx_filter_state *state = efx->filter_state;
975 enum efx_filter_table_id table_id;
976 struct efx_filter_table *table;
977 unsigned int filter_idx;
978 s32 count = 0;
979
980 spin_lock_bh(&state->lock);
981
982 for (table_id = EFX_FILTER_TABLE_RX_IP;
983 table_id <= EFX_FILTER_TABLE_RX_DEF;
984 table_id++) {
985 table = &state->table[table_id];
986 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
987 if (test_bit(filter_idx, table->used_bitmap) &&
988 table->spec[filter_idx].priority == priority) {
989 if (count == size) {
990 count = -EMSGSIZE;
991 goto out;
992 }
993 buf[count++] = efx_filter_make_id(
994 &table->spec[filter_idx], filter_idx);
995 }
996 }
997 }
998out:
999 spin_unlock_bh(&state->lock);
1000
1001 return count;
1002}
1003
1004/* Restore filter stater after reset */
1005void efx_restore_filters(struct efx_nic *efx)
1006{
1007 struct efx_filter_state *state = efx->filter_state;
1008 enum efx_filter_table_id table_id;
1009 struct efx_filter_table *table;
1010 efx_oword_t filter;
1011 unsigned int filter_idx;
1012
1013 spin_lock_bh(&state->lock);
1014
1015 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1016 table = &state->table[table_id];
1017
1018 /* Check whether this is a regular register table */
1019 if (table->step == 0)
1020 continue;
1021
1022 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1023 if (!test_bit(filter_idx, table->used_bitmap))
1024 continue;
1025 efx_filter_build(&filter, &table->spec[filter_idx]);
1026 efx_writeo(efx, &filter,
1027 table->offset + table->step * filter_idx);
1028 }
1029 }
1030
1031 efx_filter_push_rx_config(efx);
1032 efx_filter_push_tx_limits(efx);
1033
1034 spin_unlock_bh(&state->lock);
1035}
1036
1037int efx_probe_filters(struct efx_nic *efx)
1038{
1039 struct efx_filter_state *state;
1040 struct efx_filter_table *table;
1041 unsigned table_id;
1042
1043 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
1044 if (!state)
1045 return -ENOMEM;
1046 efx->filter_state = state;
1047
1048 spin_lock_init(&state->lock);
1049
1050 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1051#ifdef CONFIG_RFS_ACCEL
1052 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
1053 sizeof(*state->rps_flow_id),
1054 GFP_KERNEL);
1055 if (!state->rps_flow_id)
1056 goto fail;
1057#endif
1058 table = &state->table[EFX_FILTER_TABLE_RX_IP];
1059 table->id = EFX_FILTER_TABLE_RX_IP;
1060 table->offset = FR_BZ_RX_FILTER_TBL0;
1061 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
1062 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1063 }
1064
1065 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1066 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1067 table->id = EFX_FILTER_TABLE_RX_MAC;
1068 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1069 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1070 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1071
1072 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1073 table->id = EFX_FILTER_TABLE_RX_DEF;
1074 table->size = EFX_FILTER_SIZE_RX_DEF;
1075
1076 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1077 table->id = EFX_FILTER_TABLE_TX_MAC;
1078 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1079 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1080 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1081 }
1082
1083 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1084 table = &state->table[table_id];
1085 if (table->size == 0)
1086 continue;
1087 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1088 sizeof(unsigned long),
1089 GFP_KERNEL);
1090 if (!table->used_bitmap)
1091 goto fail;
1092 table->spec = vzalloc(table->size * sizeof(*table->spec));
1093 if (!table->spec)
1094 goto fail;
1095 }
1096
1097 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1098 /* RX default filters must always exist */
1099 unsigned i;
1100 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1101 efx_filter_reset_rx_def(efx, i);
1102 }
1103
1104 efx_filter_push_rx_config(efx);
1105
1106 return 0;
1107
1108fail:
1109 efx_remove_filters(efx);
1110 return -ENOMEM;
1111}
1112
1113void efx_remove_filters(struct efx_nic *efx)
1114{
1115 struct efx_filter_state *state = efx->filter_state;
1116 enum efx_filter_table_id table_id;
1117
1118 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1119 kfree(state->table[table_id].used_bitmap);
1120 vfree(state->table[table_id].spec);
1121 }
1122#ifdef CONFIG_RFS_ACCEL
1123 kfree(state->rps_flow_id);
1124#endif
1125 kfree(state);
1126}
1127
1128/* Update scatter enable flags for filters pointing to our own RX queues */
1129void efx_filter_update_rx_scatter(struct efx_nic *efx)
1130{
1131 struct efx_filter_state *state = efx->filter_state;
1132 enum efx_filter_table_id table_id;
1133 struct efx_filter_table *table;
1134 efx_oword_t filter;
1135 unsigned int filter_idx;
1136
1137 spin_lock_bh(&state->lock);
1138
1139 for (table_id = EFX_FILTER_TABLE_RX_IP;
1140 table_id <= EFX_FILTER_TABLE_RX_DEF;
1141 table_id++) {
1142 table = &state->table[table_id];
1143
1144 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1145 if (!test_bit(filter_idx, table->used_bitmap) ||
1146 table->spec[filter_idx].dmaq_id >=
1147 efx->n_rx_channels)
1148 continue;
1149
1150 if (efx->rx_scatter)
1151 table->spec[filter_idx].flags |=
1152 EFX_FILTER_FLAG_RX_SCATTER;
1153 else
1154 table->spec[filter_idx].flags &=
1155 ~EFX_FILTER_FLAG_RX_SCATTER;
1156
1157 if (table_id == EFX_FILTER_TABLE_RX_DEF)
1158 /* Pushed by efx_filter_push_rx_config() */
1159 continue;
1160
1161 efx_filter_build(&filter, &table->spec[filter_idx]);
1162 efx_writeo(efx, &filter,
1163 table->offset + table->step * filter_idx);
1164 }
1165 }
1166
1167 efx_filter_push_rx_config(efx);
1168
1169 spin_unlock_bh(&state->lock);
1170}
1171
1172#ifdef CONFIG_RFS_ACCEL
1173
1174int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1175 u16 rxq_index, u32 flow_id)
1176{
1177 struct efx_nic *efx = netdev_priv(net_dev);
1178 struct efx_channel *channel;
1179 struct efx_filter_state *state = efx->filter_state;
1180 struct efx_filter_spec spec;
1181 const struct iphdr *ip;
1182 const __be16 *ports;
1183 int nhoff;
1184 int rc;
1185
1186 nhoff = skb_network_offset(skb);
1187
1188 if (skb->protocol == htons(ETH_P_8021Q)) {
1189 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
1190 nhoff + sizeof(struct vlan_hdr));
1191 if (((const struct vlan_hdr *)skb->data + nhoff)->
1192 h_vlan_encapsulated_proto != htons(ETH_P_IP))
1193 return -EPROTONOSUPPORT;
1194
1195 /* This is IP over 802.1q VLAN. We can't filter on the
1196 * IP 5-tuple and the vlan together, so just strip the
1197 * vlan header and filter on the IP part.
1198 */
1199 nhoff += sizeof(struct vlan_hdr);
1200 } else if (skb->protocol != htons(ETH_P_IP)) {
1201 return -EPROTONOSUPPORT;
1202 }
1203
1204 /* RFS must validate the IP header length before calling us */
1205 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1206 ip = (const struct iphdr *)(skb->data + nhoff);
1207 if (ip_is_fragment(ip))
1208 return -EPROTONOSUPPORT;
1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1211
1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1213 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1214 rxq_index);
1215 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1216 ip->daddr, ports[1], ip->saddr, ports[0]);
1217 if (rc)
1218 return rc;
1219
1220 rc = efx_filter_insert_filter(efx, &spec, true);
1221 if (rc < 0)
1222 return rc;
1223
1224 /* Remember this so we can check whether to expire the filter later */
1225 state->rps_flow_id[rc] = flow_id;
1226 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1227 ++channel->rfs_filters_added;
1228
1229 netif_info(efx, rx_status, efx->net_dev,
1230 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1231 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1232 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1233 rxq_index, flow_id, rc);
1234
1235 return rc;
1236}
1237
1238bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1239{
1240 struct efx_filter_state *state = efx->filter_state;
1241 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1242 unsigned mask = table->size - 1;
1243 unsigned index;
1244 unsigned stop;
1245
1246 if (!spin_trylock_bh(&state->lock))
1247 return false;
1248
1249 index = state->rps_expire_index;
1250 stop = (index + quota) & mask;
1251
1252 while (index != stop) {
1253 if (test_bit(index, table->used_bitmap) &&
1254 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1255 rps_may_expire_flow(efx->net_dev,
1256 table->spec[index].dmaq_id,
1257 state->rps_flow_id[index], index)) {
1258 netif_info(efx, rx_status, efx->net_dev,
1259 "expiring filter %d [flow %u]\n",
1260 index, state->rps_flow_id[index]);
1261 efx_filter_table_clear_entry(efx, table, index);
1262 }
1263 index = (index + 1) & mask;
1264 }
1265
1266 state->rps_expire_index = stop;
1267 if (table->used == 0)
1268 efx_filter_table_reset_search_depth(table);
1269
1270 spin_unlock_bh(&state->lock);
1271 return true;
1272}
1273
1274#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5cb54723b824..63c77a557178 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc. 3 * Copyright 2005-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,32 +11,49 @@
11#define EFX_FILTER_H 11#define EFX_FILTER_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/if_ether.h>
15#include <asm/byteorder.h>
14 16
15/** 17/**
16 * enum efx_filter_type - type of hardware filter 18 * enum efx_filter_match_flags - Flags for hardware filter match type
17 * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple 19 * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
18 * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port) 20 * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
19 * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple 21 * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) 22 * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID 23 * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address 24 * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
23 * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast 25 * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
24 * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast 26 * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
25 * @EFX_FILTER_UNSPEC: Match type is unspecified 27 * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
28 * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
29 * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
30 * Used for RX default unicast and multicast/broadcast filters.
26 * 31 *
27 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. 32 * Only some combinations are supported, depending on NIC type:
33 *
34 * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
35 * local 2-tuple (only implemented for Falcon B0)
36 *
37 * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
38 * or local 2-tuple, or local MAC with or without outer VID, and RX
39 * default filters
40 *
41 * - Huntington supports filter matching controlled by firmware, potentially
42 * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
43 * with or without outer and inner VID
28 */ 44 */
29enum efx_filter_type { 45enum efx_filter_match_flags {
30 EFX_FILTER_TCP_FULL = 0, 46 EFX_FILTER_MATCH_REM_HOST = 0x0001,
31 EFX_FILTER_TCP_WILD, 47 EFX_FILTER_MATCH_LOC_HOST = 0x0002,
32 EFX_FILTER_UDP_FULL, 48 EFX_FILTER_MATCH_REM_MAC = 0x0004,
33 EFX_FILTER_UDP_WILD, 49 EFX_FILTER_MATCH_REM_PORT = 0x0008,
34 EFX_FILTER_MAC_FULL = 4, 50 EFX_FILTER_MATCH_LOC_MAC = 0x0010,
35 EFX_FILTER_MAC_WILD, 51 EFX_FILTER_MATCH_LOC_PORT = 0x0020,
36 EFX_FILTER_UC_DEF = 8, 52 EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
37 EFX_FILTER_MC_DEF, 53 EFX_FILTER_MATCH_INNER_VID = 0x0080,
38 EFX_FILTER_TYPE_COUNT, /* number of specific types */ 54 EFX_FILTER_MATCH_OUTER_VID = 0x0100,
39 EFX_FILTER_UNSPEC = 0xf, 55 EFX_FILTER_MATCH_IP_PROTO = 0x0200,
56 EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
40}; 57};
41 58
42/** 59/**
@@ -61,37 +78,75 @@ enum efx_filter_priority {
61 * according to the indirection table. 78 * according to the indirection table.
62 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving 79 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
63 * queue. 80 * queue.
81 * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
82 * network stack. The filter must have a priority of
83 * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
84 * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
85 * request with priority %EFX_FILTER_PRI_MANUAL will reset the
86 * steering (but not remove the filter).
64 * @EFX_FILTER_FLAG_RX: Filter is for RX 87 * @EFX_FILTER_FLAG_RX: Filter is for RX
65 * @EFX_FILTER_FLAG_TX: Filter is for TX 88 * @EFX_FILTER_FLAG_TX: Filter is for TX
66 */ 89 */
67enum efx_filter_flags { 90enum efx_filter_flags {
68 EFX_FILTER_FLAG_RX_RSS = 0x01, 91 EFX_FILTER_FLAG_RX_RSS = 0x01,
69 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 92 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
93 EFX_FILTER_FLAG_RX_STACK = 0x04,
70 EFX_FILTER_FLAG_RX = 0x08, 94 EFX_FILTER_FLAG_RX = 0x08,
71 EFX_FILTER_FLAG_TX = 0x10, 95 EFX_FILTER_FLAG_TX = 0x10,
72}; 96};
73 97
74/** 98/**
75 * struct efx_filter_spec - specification for a hardware filter 99 * struct efx_filter_spec - specification for a hardware filter
76 * @type: Type of match to be performed, from &enum efx_filter_type 100 * @match_flags: Match type flags, from &enum efx_filter_match_flags
77 * @priority: Priority of the filter, from &enum efx_filter_priority 101 * @priority: Priority of the filter, from &enum efx_filter_priority
78 * @flags: Miscellaneous flags, from &enum efx_filter_flags 102 * @flags: Miscellaneous flags, from &enum efx_filter_flags
79 * @dmaq_id: Source/target queue index 103 * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
80 * @data: Match data (type-dependent) 104 * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
105 * an RX drop filter
106 * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
107 * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
108 * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
109 * %EFX_FILTER_MATCH_LOC_MAC_IG is set
110 * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
111 * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
112 * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
113 * is set
114 * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
115 * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
116 * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
117 * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
81 * 118 *
82 * Use the efx_filter_set_*() functions to initialise the @type and 119 * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
83 * @data fields. 120 * used to initialise the structure. The efx_filter_set_*() functions
121 * may then be used to set @rss_context, @match_flags and related
122 * fields.
84 * 123 *
85 * The @priority field is used by software to determine whether a new 124 * The @priority field is used by software to determine whether a new
86 * filter may replace an old one. The hardware priority of a filter 125 * filter may replace an old one. The hardware priority of a filter
87 * depends on the filter type. 126 * depends on which fields are matched.
88 */ 127 */
89struct efx_filter_spec { 128struct efx_filter_spec {
90 u8 type:4; 129 u32 match_flags:12;
91 u8 priority:4; 130 u32 priority:2;
92 u8 flags; 131 u32 flags:6;
93 u16 dmaq_id; 132 u32 dmaq_id:12;
94 u32 data[3]; 133 u32 rss_context;
134 __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
135 __be16 inner_vid;
136 u8 loc_mac[ETH_ALEN];
137 u8 rem_mac[ETH_ALEN];
138 __be16 ether_type;
139 u8 ip_proto;
140 __be32 loc_host[4];
141 __be32 rem_host[4];
142 __be16 loc_port;
143 __be16 rem_port;
144 /* total 64 bytes */
145};
146
147enum {
148 EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
149 EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
95}; 150};
96 151
97static inline void efx_filter_init_rx(struct efx_filter_spec *spec, 152static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
99 enum efx_filter_flags flags, 154 enum efx_filter_flags flags,
100 unsigned rxq_id) 155 unsigned rxq_id)
101{ 156{
102 spec->type = EFX_FILTER_UNSPEC; 157 memset(spec, 0, sizeof(*spec));
103 spec->priority = priority; 158 spec->priority = priority;
104 spec->flags = EFX_FILTER_FLAG_RX | flags; 159 spec->flags = EFX_FILTER_FLAG_RX | flags;
160 spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
105 spec->dmaq_id = rxq_id; 161 spec->dmaq_id = rxq_id;
106} 162}
107 163
108static inline void efx_filter_init_tx(struct efx_filter_spec *spec, 164static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
109 unsigned txq_id) 165 unsigned txq_id)
110{ 166{
111 spec->type = EFX_FILTER_UNSPEC; 167 memset(spec, 0, sizeof(*spec));
112 spec->priority = EFX_FILTER_PRI_REQUIRED; 168 spec->priority = EFX_FILTER_PRI_REQUIRED;
113 spec->flags = EFX_FILTER_FLAG_TX; 169 spec->flags = EFX_FILTER_FLAG_TX;
114 spec->dmaq_id = txq_id; 170 spec->dmaq_id = txq_id;
115} 171}
116 172
117extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, 173/**
118 __be32 host, __be16 port); 174 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
119extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, 175 * @spec: Specification to initialise
120 u8 *proto, __be32 *host, __be16 *port); 176 * @proto: Transport layer protocol number
121extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, 177 * @host: Local host address (network byte order)
122 __be32 host, __be16 port, 178 * @port: Local port (network byte order)
123 __be32 rhost, __be16 rport); 179 */
124extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, 180static inline int
125 u8 *proto, __be32 *host, __be16 *port, 181efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
126 __be32 *rhost, __be16 *rport); 182 __be32 host, __be16 port)
127extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, 183{
128 u16 vid, const u8 *addr); 184 spec->match_flags |=
129extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, 185 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
130 u16 *vid, u8 *addr); 186 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
131extern int efx_filter_set_uc_def(struct efx_filter_spec *spec); 187 spec->ether_type = htons(ETH_P_IP);
132extern int efx_filter_set_mc_def(struct efx_filter_spec *spec); 188 spec->ip_proto = proto;
189 spec->loc_host[0] = host;
190 spec->loc_port = port;
191 return 0;
192}
193
194/**
195 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
196 * @spec: Specification to initialise
197 * @proto: Transport layer protocol number
198 * @lhost: Local host address (network byte order)
199 * @lport: Local port (network byte order)
200 * @rhost: Remote host address (network byte order)
201 * @rport: Remote port (network byte order)
202 */
203static inline int
204efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
205 __be32 lhost, __be16 lport,
206 __be32 rhost, __be16 rport)
207{
208 spec->match_flags |=
209 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
210 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
211 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
212 spec->ether_type = htons(ETH_P_IP);
213 spec->ip_proto = proto;
214 spec->loc_host[0] = lhost;
215 spec->loc_port = lport;
216 spec->rem_host[0] = rhost;
217 spec->rem_port = rport;
218 return 0;
219}
220
133enum { 221enum {
134 EFX_FILTER_VID_UNSPEC = 0xffff, 222 EFX_FILTER_VID_UNSPEC = 0xffff,
135}; 223};
136 224
225/**
226 * efx_filter_set_eth_local - specify local Ethernet address and/or VID
227 * @spec: Specification to initialise
228 * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
229 * @addr: Local Ethernet MAC address, or %NULL
230 */
231static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
232 u16 vid, const u8 *addr)
233{
234 if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
235 return -EINVAL;
236
237 if (vid != EFX_FILTER_VID_UNSPEC) {
238 spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
239 spec->outer_vid = htons(vid);
240 }
241 if (addr != NULL) {
242 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
243 memcpy(spec->loc_mac, addr, ETH_ALEN);
244 }
245 return 0;
246}
247
248/**
249 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
250 * @spec: Specification to initialise
251 */
252static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
253{
254 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
255 return 0;
256}
257
258/**
259 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
260 * @spec: Specification to initialise
261 */
262static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
263{
264 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
265 spec->loc_mac[0] = 1;
266 return 0;
267}
268
137#endif /* EFX_FILTER_H */ 269#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96759aee1c6c..96ce507d8602 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
20 * 20 *
21 ************************************************************************** 21 **************************************************************************
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy for the Falcon architecture:
24 * 24 *
25 * Many CSRs are very wide and cannot be read or written atomically. 25 * Many CSRs are very wide and cannot be read or written atomically.
26 * Writes from the host are buffered by the Bus Interface Unit (BIU) 26 * Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
54 * register while the collector already holds values for some other 54 * register while the collector already holds values for some other
55 * register, the write is discarded and the collector maintains its 55 * register, the write is discarded and the collector maintains its
56 * current state. 56 * current state.
57 *
58 * The EF10 architecture exposes very few registers to the host and
59 * most of them are only 32 bits wide. The only exceptions are the MC
60 * doorbell register pair, which has its own latching, and
61 * TX_DESC_UPD, which works in a similar way to the Falcon
62 * architecture.
57 */ 63 */
58 64
59#if BITS_PER_LONG == 64 65#if BITS_PER_LONG == 64
@@ -83,7 +89,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
83} 89}
84 90
85/* Write a normal 128-bit CSR, locking as appropriate. */ 91/* Write a normal 128-bit CSR, locking as appropriate. */
86static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, 92static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
87 unsigned int reg) 93 unsigned int reg)
88{ 94{
89 unsigned long flags __attribute__ ((unused)); 95 unsigned long flags __attribute__ ((unused));
@@ -108,7 +114,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
108 114
109/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ 115/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
110static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, 116static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
111 efx_qword_t *value, unsigned int index) 117 const efx_qword_t *value, unsigned int index)
112{ 118{
113 unsigned int addr = index * sizeof(*value); 119 unsigned int addr = index * sizeof(*value);
114 unsigned long flags __attribute__ ((unused)); 120 unsigned long flags __attribute__ ((unused));
@@ -129,7 +135,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
129} 135}
130 136
131/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ 137/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
132static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, 138static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
133 unsigned int reg) 139 unsigned int reg)
134{ 140{
135 netif_vdbg(efx, hw, efx->net_dev, 141 netif_vdbg(efx, hw, efx->net_dev,
@@ -190,8 +196,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
190} 196}
191 197
192/* Write a 128-bit CSR forming part of a table */ 198/* Write a 128-bit CSR forming part of a table */
193static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, 199static inline void
194 unsigned int reg, unsigned int index) 200efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
201 unsigned int reg, unsigned int index)
195{ 202{
196 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); 203 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
197} 204}
@@ -203,12 +210,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
203 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 210 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
204} 211}
205 212
206/* Page-mapped register block size */ 213/* Page size used as step between per-VI registers */
207#define EFX_PAGE_BLOCK_SIZE 0x2000 214#define EFX_VI_PAGE_SIZE 0x2000
208 215
209/* Calculate offset to page-mapped register block */ 216/* Calculate offset to page-mapped register */
210#define EFX_PAGED_REG(page, reg) \ 217#define EFX_PAGED_REG(page, reg) \
211 ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) 218 ((page) * EFX_VI_PAGE_SIZE + (reg))
212 219
213/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ 220/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
214static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, 221static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
@@ -236,19 +243,24 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
236 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ 243 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
237 page) 244 page)
238 245
239/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of 246/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
240 * RX_DESC_UPD or TX_DESC_UPD) 247 * high bits of RX_DESC_UPD or TX_DESC_UPD)
241 */ 248 */
242static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, 249static inline void
243 unsigned int reg, unsigned int page) 250_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
251 unsigned int reg, unsigned int page)
244{ 252{
245 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 253 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
246} 254}
247#define efx_writed_page(efx, value, reg, page) \ 255#define efx_writed_page(efx, value, reg, page) \
248 _efx_writed_page(efx, value, \ 256 _efx_writed_page(efx, value, \
249 reg + \ 257 reg + \
250 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ 258 BUILD_BUG_ON_ZERO((reg) != 0x400 && \
251 && (reg) != 0xa1c), \ 259 (reg) != 0x420 && \
260 (reg) != 0x830 && \
261 (reg) != 0x83c && \
262 (reg) != 0xa18 && \
263 (reg) != 0xa1c), \
252 page) 264 page)
253 265
254/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug 266/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
@@ -256,7 +268,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
256 * collector register. 268 * collector register.
257 */ 269 */
258static inline void _efx_writed_page_locked(struct efx_nic *efx, 270static inline void _efx_writed_page_locked(struct efx_nic *efx,
259 efx_dword_t *value, 271 const efx_dword_t *value,
260 unsigned int reg, 272 unsigned int reg,
261 unsigned int page) 273 unsigned int page)
262{ 274{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 97dd8f18c001..128d7cdf9eb2 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc. 3 * Copyright 2008-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,11 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <asm/cmpxchg.h>
11#include "net_driver.h" 12#include "net_driver.h"
12#include "nic.h" 13#include "nic.h"
13#include "io.h" 14#include "io.h"
14#include "regs.h" 15#include "farch_regs.h"
15#include "mcdi_pcol.h" 16#include "mcdi_pcol.h"
16#include "phy.h" 17#include "phy.h"
17 18
@@ -24,112 +25,235 @@
24 25
25#define MCDI_RPC_TIMEOUT (10 * HZ) 26#define MCDI_RPC_TIMEOUT (10 * HZ)
26 27
27#define MCDI_PDU(efx) \
28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
29#define MCDI_DOORBELL(efx) \
30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
31#define MCDI_STATUS(efx) \
32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
33
34/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
35 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
36 * via these mechanisms then wait 10ms for the status word to be set. */ 30 * via these mechanisms then wait 20ms for the status word to be set.
31 */
37#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
38#define MCDI_STATUS_DELAY_COUNT 100 33#define MCDI_STATUS_DELAY_COUNT 200
39#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
41 36
42#define SEQ_MASK \ 37#define SEQ_MASK \
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
44 39
40struct efx_mcdi_async_param {
41 struct list_head list;
42 unsigned int cmd;
43 size_t inlen;
44 size_t outlen;
45 efx_mcdi_async_completer *complete;
46 unsigned long cookie;
47 /* followed by request/response buffer */
48};
49
50static void efx_mcdi_timeout_async(unsigned long context);
51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out);
53
45static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 54static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
46{ 55{
47 struct siena_nic_data *nic_data; 56 EFX_BUG_ON_PARANOID(!efx->mcdi);
48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 57 return &efx->mcdi->iface;
49 nic_data = efx->nic_data;
50 return &nic_data->mcdi;
51} 58}
52 59
53void efx_mcdi_init(struct efx_nic *efx) 60int efx_mcdi_init(struct efx_nic *efx)
54{ 61{
55 struct efx_mcdi_iface *mcdi; 62 struct efx_mcdi_iface *mcdi;
63 bool already_attached;
64 int rc;
56 65
57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 66 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
58 return; 67 if (!efx->mcdi)
68 return -ENOMEM;
59 69
60 mcdi = efx_mcdi(efx); 70 mcdi = efx_mcdi(efx);
71 mcdi->efx = efx;
61 init_waitqueue_head(&mcdi->wq); 72 init_waitqueue_head(&mcdi->wq);
62 spin_lock_init(&mcdi->iface_lock); 73 spin_lock_init(&mcdi->iface_lock);
63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 74 mcdi->state = MCDI_STATE_QUIESCENT;
64 mcdi->mode = MCDI_MODE_POLL; 75 mcdi->mode = MCDI_MODE_POLL;
76 spin_lock_init(&mcdi->async_lock);
77 INIT_LIST_HEAD(&mcdi->async_list);
78 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
79 (unsigned long)mcdi);
65 80
66 (void) efx_mcdi_poll_reboot(efx); 81 (void) efx_mcdi_poll_reboot(efx);
82 mcdi->new_epoch = true;
83
84 /* Recover from a failed assertion before probing */
85 rc = efx_mcdi_handle_assertion(efx);
86 if (rc)
87 return rc;
88
89 /* Let the MC (and BMC, if this is a LOM) know that the driver
90 * is loaded. We should do this before we reset the NIC.
91 */
92 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
93 if (rc) {
94 netif_err(efx, probe, efx->net_dev,
95 "Unable to register driver with MCPU\n");
96 return rc;
97 }
98 if (already_attached)
99 /* Not a fatal error */
100 netif_err(efx, probe, efx->net_dev,
101 "Host already registered with MCPU\n");
102
103 return 0;
67} 104}
68 105
69static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, 106void efx_mcdi_fini(struct efx_nic *efx)
70 const u8 *inbuf, size_t inlen) 107{
108 if (!efx->mcdi)
109 return;
110
111 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
112
113 /* Relinquish the device (back to the BMC, if this is a LOM) */
114 efx_mcdi_drv_attach(efx, false, NULL);
115
116 kfree(efx->mcdi);
117}
118
119static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
120 const efx_dword_t *inbuf, size_t inlen)
71{ 121{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 123 efx_dword_t hdr[2];
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 124 size_t hdr_len;
75 unsigned int i;
76 efx_dword_t hdr;
77 u32 xflags, seqno; 125 u32 xflags, seqno;
78 126
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 127 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); 128
129 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
130 spin_lock_bh(&mcdi->iface_lock);
131 ++mcdi->seqno;
132 spin_unlock_bh(&mcdi->iface_lock);
81 133
82 seqno = mcdi->seqno & SEQ_MASK; 134 seqno = mcdi->seqno & SEQ_MASK;
83 xflags = 0; 135 xflags = 0;
84 if (mcdi->mode == MCDI_MODE_EVENTS) 136 if (mcdi->mode == MCDI_MODE_EVENTS)
85 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 137 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
86 138
87 EFX_POPULATE_DWORD_6(hdr, 139 if (efx->type->mcdi_max_ver == 1) {
88 MCDI_HEADER_RESPONSE, 0, 140 /* MCDI v1 */
89 MCDI_HEADER_RESYNC, 1, 141 EFX_POPULATE_DWORD_7(hdr[0],
90 MCDI_HEADER_CODE, cmd, 142 MCDI_HEADER_RESPONSE, 0,
91 MCDI_HEADER_DATALEN, inlen, 143 MCDI_HEADER_RESYNC, 1,
92 MCDI_HEADER_SEQ, seqno, 144 MCDI_HEADER_CODE, cmd,
93 MCDI_HEADER_XFLAGS, xflags); 145 MCDI_HEADER_DATALEN, inlen,
94 146 MCDI_HEADER_SEQ, seqno,
95 efx_writed(efx, &hdr, pdu); 147 MCDI_HEADER_XFLAGS, xflags,
148 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
149 hdr_len = 4;
150 } else {
151 /* MCDI v2 */
152 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
153 EFX_POPULATE_DWORD_7(hdr[0],
154 MCDI_HEADER_RESPONSE, 0,
155 MCDI_HEADER_RESYNC, 1,
156 MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
157 MCDI_HEADER_DATALEN, 0,
158 MCDI_HEADER_SEQ, seqno,
159 MCDI_HEADER_XFLAGS, xflags,
160 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
161 EFX_POPULATE_DWORD_2(hdr[1],
162 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
163 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
164 hdr_len = 8;
165 }
96 166
97 for (i = 0; i < inlen; i += 4) 167 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99 168
100 /* Ensure the payload is written out before the header */ 169 mcdi->new_epoch = false;
101 wmb(); 170}
102 171
103 /* ring the doorbell with a distinctive value */ 172static int efx_mcdi_errno(unsigned int mcdi_err)
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 173{
174 switch (mcdi_err) {
175 case 0:
176 return 0;
177#define TRANSLATE_ERROR(name) \
178 case MC_CMD_ERR_ ## name: \
179 return -name;
180 TRANSLATE_ERROR(EPERM);
181 TRANSLATE_ERROR(ENOENT);
182 TRANSLATE_ERROR(EINTR);
183 TRANSLATE_ERROR(EAGAIN);
184 TRANSLATE_ERROR(EACCES);
185 TRANSLATE_ERROR(EBUSY);
186 TRANSLATE_ERROR(EINVAL);
187 TRANSLATE_ERROR(EDEADLK);
188 TRANSLATE_ERROR(ENOSYS);
189 TRANSLATE_ERROR(ETIME);
190 TRANSLATE_ERROR(EALREADY);
191 TRANSLATE_ERROR(ENOSPC);
192#undef TRANSLATE_ERROR
193 case MC_CMD_ERR_ALLOC_FAIL:
194 return -ENOBUFS;
195 case MC_CMD_ERR_MAC_EXIST:
196 return -EADDRINUSE;
197 default:
198 return -EPROTO;
199 }
105} 200}
106 201
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 202static void efx_mcdi_read_response_header(struct efx_nic *efx)
108{ 203{
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 204 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 205 unsigned int respseq, respcmd, error;
111 int i; 206 efx_dword_t hdr;
207
208 efx->type->mcdi_read_response(efx, &hdr, 0, 4);
209 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
210 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
211 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
112 212
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 213 if (respcmd != MC_CMD_V2_EXTN) {
114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); 214 mcdi->resp_hdr_len = 4;
215 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
216 } else {
217 efx->type->mcdi_read_response(efx, &hdr, 4, 4);
218 mcdi->resp_hdr_len = 8;
219 mcdi->resp_data_len =
220 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
221 }
115 222
116 for (i = 0; i < outlen; i += 4) 223 if (error && mcdi->resp_data_len == 0) {
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 224 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
225 mcdi->resprc = -EIO;
226 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
227 netif_err(efx, hw, efx->net_dev,
228 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
229 respseq, mcdi->seqno);
230 mcdi->resprc = -EIO;
231 } else if (error) {
232 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
233 mcdi->resprc =
234 efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
235 } else {
236 mcdi->resprc = 0;
237 }
118} 238}
119 239
120static int efx_mcdi_poll(struct efx_nic *efx) 240static int efx_mcdi_poll(struct efx_nic *efx)
121{ 241{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned long time, finish; 243 unsigned long time, finish;
124 unsigned int respseq, respcmd, error; 244 unsigned int spins;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 245 int rc;
126 unsigned int rc, spins;
127 efx_dword_t reg;
128 246
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 247 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = -efx_mcdi_poll_reboot(efx); 248 rc = efx_mcdi_poll_reboot(efx);
131 if (rc) 249 if (rc) {
132 goto out; 250 spin_lock_bh(&mcdi->iface_lock);
251 mcdi->resprc = rc;
252 mcdi->resp_hdr_len = 0;
253 mcdi->resp_data_len = 0;
254 spin_unlock_bh(&mcdi->iface_lock);
255 return 0;
256 }
133 257
134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 258 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
135 * because generally mcdi responses are fast. After that, back off 259 * because generally mcdi responses are fast. After that, back off
@@ -149,59 +273,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
149 time = jiffies; 273 time = jiffies;
150 274
151 rmb(); 275 rmb();
152 efx_readd(efx, &reg, pdu); 276 if (efx->type->mcdi_poll_response(efx))
153
154 /* All 1's indicates that shared memory is in reset (and is
155 * not a valid header). Wait for it to come out reset before
156 * completing the command */
157 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
159 break; 277 break;
160 278
161 if (time_after(time, finish)) 279 if (time_after(time, finish))
162 return -ETIMEDOUT; 280 return -ETIMEDOUT;
163 } 281 }
164 282
165 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); 283 spin_lock_bh(&mcdi->iface_lock);
166 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); 284 efx_mcdi_read_response_header(efx);
167 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); 285 spin_unlock_bh(&mcdi->iface_lock);
168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
169
170 if (error && mcdi->resplen == 0) {
171 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
172 rc = EIO;
173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
174 netif_err(efx, hw, efx->net_dev,
175 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
176 respseq, mcdi->seqno);
177 rc = EIO;
178 } else if (error) {
179 efx_readd(efx, &reg, pdu + 4);
180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
181#define TRANSLATE_ERROR(name) \
182 case MC_CMD_ERR_ ## name: \
183 rc = name; \
184 break
185 TRANSLATE_ERROR(ENOENT);
186 TRANSLATE_ERROR(EINTR);
187 TRANSLATE_ERROR(EACCES);
188 TRANSLATE_ERROR(EBUSY);
189 TRANSLATE_ERROR(EINVAL);
190 TRANSLATE_ERROR(EDEADLK);
191 TRANSLATE_ERROR(ENOSYS);
192 TRANSLATE_ERROR(ETIME);
193#undef TRANSLATE_ERROR
194 default:
195 rc = EIO;
196 break;
197 }
198 } else
199 rc = 0;
200
201out:
202 mcdi->resprc = rc;
203 if (rc)
204 mcdi->resplen = 0;
205 286
206 /* Return rc=0 like wait_event_timeout() */ 287 /* Return rc=0 like wait_event_timeout() */
207 return 0; 288 return 0;
@@ -212,52 +293,36 @@ out:
212 */ 293 */
213int efx_mcdi_poll_reboot(struct efx_nic *efx) 294int efx_mcdi_poll_reboot(struct efx_nic *efx)
214{ 295{
215 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 296 if (!efx->mcdi)
216 efx_dword_t reg;
217 uint32_t value;
218
219 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
220 return false;
221
222 efx_readd(efx, &reg, addr);
223 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
224
225 if (value == 0)
226 return 0; 297 return 0;
227 298
228 /* MAC statistics have been cleared on the NIC; clear our copy 299 return efx->type->mcdi_poll_reboot(efx);
229 * so that efx_update_diff_stat() can continue to work. 300}
230 */
231 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
232
233 EFX_ZERO_DWORD(reg);
234 efx_writed(efx, &reg, addr);
235 301
236 if (value == MC_STATUS_DWORD_ASSERT) 302static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
237 return -EINTR; 303{
238 else 304 return cmpxchg(&mcdi->state,
239 return -EIO; 305 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
306 MCDI_STATE_QUIESCENT;
240} 307}
241 308
242static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 309static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
243{ 310{
244 /* Wait until the interface becomes QUIESCENT and we win the race 311 /* Wait until the interface becomes QUIESCENT and we win the race
245 * to mark it RUNNING. */ 312 * to mark it RUNNING_SYNC.
313 */
246 wait_event(mcdi->wq, 314 wait_event(mcdi->wq,
247 atomic_cmpxchg(&mcdi->state, 315 cmpxchg(&mcdi->state,
248 MCDI_STATE_QUIESCENT, 316 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
249 MCDI_STATE_RUNNING) 317 MCDI_STATE_QUIESCENT);
250 == MCDI_STATE_QUIESCENT);
251} 318}
252 319
253static int efx_mcdi_await_completion(struct efx_nic *efx) 320static int efx_mcdi_await_completion(struct efx_nic *efx)
254{ 321{
255 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 322 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
256 323
257 if (wait_event_timeout( 324 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
258 mcdi->wq, 325 MCDI_RPC_TIMEOUT) == 0)
259 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
260 MCDI_RPC_TIMEOUT) == 0)
261 return -ETIMEDOUT; 326 return -ETIMEDOUT;
262 327
263 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 328 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -274,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
274 return 0; 339 return 0;
275} 340}
276 341
277static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 342/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
343 * requester. Return whether this was done. Does not take any locks.
344 */
345static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
278{ 346{
279 /* If the interface is RUNNING, then move to COMPLETED and wake any 347 if (cmpxchg(&mcdi->state,
280 * waiters. If the interface isn't in RUNNING then we've received a 348 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
281 * duplicate completion after we've already transitioned back to 349 MCDI_STATE_RUNNING_SYNC) {
282 * QUIESCENT. [A subsequent invocation would increment seqno, so would
283 * have failed the seqno check].
284 */
285 if (atomic_cmpxchg(&mcdi->state,
286 MCDI_STATE_RUNNING,
287 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
288 wake_up(&mcdi->wq); 350 wake_up(&mcdi->wq);
289 return true; 351 return true;
290 } 352 }
@@ -294,12 +356,93 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
294 356
295static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 357static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
296{ 358{
297 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 359 if (mcdi->mode == MCDI_MODE_EVENTS) {
360 struct efx_mcdi_async_param *async;
361 struct efx_nic *efx = mcdi->efx;
362
363 /* Process the asynchronous request queue */
364 spin_lock_bh(&mcdi->async_lock);
365 async = list_first_entry_or_null(
366 &mcdi->async_list, struct efx_mcdi_async_param, list);
367 if (async) {
368 mcdi->state = MCDI_STATE_RUNNING_ASYNC;
369 efx_mcdi_send_request(efx, async->cmd,
370 (const efx_dword_t *)(async + 1),
371 async->inlen);
372 mod_timer(&mcdi->async_timer,
373 jiffies + MCDI_RPC_TIMEOUT);
374 }
375 spin_unlock_bh(&mcdi->async_lock);
376
377 if (async)
378 return;
379 }
380
381 mcdi->state = MCDI_STATE_QUIESCENT;
298 wake_up(&mcdi->wq); 382 wake_up(&mcdi->wq);
299} 383}
300 384
385/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
386 * asynchronous completion function, and release the interface.
387 * Return whether this was done. Must be called in bh-disabled
388 * context. Will take iface_lock and async_lock.
389 */
390static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
391{
392 struct efx_nic *efx = mcdi->efx;
393 struct efx_mcdi_async_param *async;
394 size_t hdr_len, data_len;
395 efx_dword_t *outbuf;
396 int rc;
397
398 if (cmpxchg(&mcdi->state,
399 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
400 MCDI_STATE_RUNNING_ASYNC)
401 return false;
402
403 spin_lock(&mcdi->iface_lock);
404 if (timeout) {
405 /* Ensure that if the completion event arrives later,
406 * the seqno check in efx_mcdi_ev_cpl() will fail
407 */
408 ++mcdi->seqno;
409 ++mcdi->credits;
410 rc = -ETIMEDOUT;
411 hdr_len = 0;
412 data_len = 0;
413 } else {
414 rc = mcdi->resprc;
415 hdr_len = mcdi->resp_hdr_len;
416 data_len = mcdi->resp_data_len;
417 }
418 spin_unlock(&mcdi->iface_lock);
419
420 /* Stop the timer. In case the timer function is running, we
421 * must wait for it to return so that there is no possibility
422 * of it aborting the next request.
423 */
424 if (!timeout)
425 del_timer_sync(&mcdi->async_timer);
426
427 spin_lock(&mcdi->async_lock);
428 async = list_first_entry(&mcdi->async_list,
429 struct efx_mcdi_async_param, list);
430 list_del(&async->list);
431 spin_unlock(&mcdi->async_lock);
432
433 outbuf = (efx_dword_t *)(async + 1);
434 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
435 min(async->outlen, data_len));
436 async->complete(efx, async->cookie, rc, outbuf, data_len);
437 kfree(async);
438
439 efx_mcdi_release(mcdi);
440
441 return true;
442}
443
301static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 444static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
302 unsigned int datalen, unsigned int errno) 445 unsigned int datalen, unsigned int mcdi_err)
303{ 446{
304 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 447 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
305 bool wake = false; 448 bool wake = false;
@@ -315,52 +458,161 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
315 "MC response mismatch tx seq 0x%x rx " 458 "MC response mismatch tx seq 0x%x rx "
316 "seq 0x%x\n", seqno, mcdi->seqno); 459 "seq 0x%x\n", seqno, mcdi->seqno);
317 } else { 460 } else {
318 mcdi->resprc = errno; 461 if (efx->type->mcdi_max_ver >= 2) {
319 mcdi->resplen = datalen; 462 /* MCDI v2 responses don't fit in an event */
463 efx_mcdi_read_response_header(efx);
464 } else {
465 mcdi->resprc = efx_mcdi_errno(mcdi_err);
466 mcdi->resp_hdr_len = 4;
467 mcdi->resp_data_len = datalen;
468 }
320 469
321 wake = true; 470 wake = true;
322 } 471 }
323 472
324 spin_unlock(&mcdi->iface_lock); 473 spin_unlock(&mcdi->iface_lock);
325 474
326 if (wake) 475 if (wake) {
327 efx_mcdi_complete(mcdi); 476 if (!efx_mcdi_complete_async(mcdi, false))
477 (void) efx_mcdi_complete_sync(mcdi);
478
479 /* If the interface isn't RUNNING_ASYNC or
480 * RUNNING_SYNC then we've received a duplicate
481 * completion after we've already transitioned back to
482 * QUIESCENT. [A subsequent invocation would increment
483 * seqno, so would have failed the seqno check].
484 */
485 }
486}
487
488static void efx_mcdi_timeout_async(unsigned long context)
489{
490 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
491
492 efx_mcdi_complete_async(mcdi, true);
493}
494
495static int
496efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
497{
498 if (efx->type->mcdi_max_ver < 0 ||
499 (efx->type->mcdi_max_ver < 2 &&
500 cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
501 return -EINVAL;
502
503 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
504 (efx->type->mcdi_max_ver < 2 &&
505 inlen > MCDI_CTL_SDU_LEN_MAX_V1))
506 return -EMSGSIZE;
507
508 return 0;
328} 509}
329 510
330int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 511int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
331 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 512 const efx_dword_t *inbuf, size_t inlen,
513 efx_dword_t *outbuf, size_t outlen,
332 size_t *outlen_actual) 514 size_t *outlen_actual)
333{ 515{
334 efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 516 int rc;
517
518 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
519 if (rc)
520 return rc;
335 return efx_mcdi_rpc_finish(efx, cmd, inlen, 521 return efx_mcdi_rpc_finish(efx, cmd, inlen,
336 outbuf, outlen, outlen_actual); 522 outbuf, outlen, outlen_actual);
337} 523}
338 524
339void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 525int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
340 size_t inlen) 526 const efx_dword_t *inbuf, size_t inlen)
341{ 527{
342 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 528 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
529 int rc;
343 530
344 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 531 rc = efx_mcdi_check_supported(efx, cmd, inlen);
532 if (rc)
533 return rc;
345 534
346 efx_mcdi_acquire(mcdi); 535 efx_mcdi_acquire_sync(mcdi);
536 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
537 return 0;
538}
347 539
348 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 540/**
349 spin_lock_bh(&mcdi->iface_lock); 541 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
350 ++mcdi->seqno; 542 * @efx: NIC through which to issue the command
351 spin_unlock_bh(&mcdi->iface_lock); 543 * @cmd: Command type number
544 * @inbuf: Command parameters
545 * @inlen: Length of command parameters, in bytes
546 * @outlen: Length to allocate for response buffer, in bytes
547 * @complete: Function to be called on completion or cancellation.
548 * @cookie: Arbitrary value to be passed to @complete.
549 *
550 * This function does not sleep and therefore may be called in atomic
551 * context. It will fail if event queues are disabled or if MCDI
552 * event completions have been disabled due to an error.
553 *
554 * If it succeeds, the @complete function will be called exactly once
555 * in atomic context, when one of the following occurs:
556 * (a) the completion event is received (in NAPI context)
557 * (b) event queues are disabled (in the process that disables them)
558 * (c) the request times-out (in timer context)
559 */
560int
561efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
562 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
563 efx_mcdi_async_completer *complete, unsigned long cookie)
564{
565 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
566 struct efx_mcdi_async_param *async;
567 int rc;
568
569 rc = efx_mcdi_check_supported(efx, cmd, inlen);
570 if (rc)
571 return rc;
572
573 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
574 GFP_ATOMIC);
575 if (!async)
576 return -ENOMEM;
577
578 async->cmd = cmd;
579 async->inlen = inlen;
580 async->outlen = outlen;
581 async->complete = complete;
582 async->cookie = cookie;
583 memcpy(async + 1, inbuf, inlen);
584
585 spin_lock_bh(&mcdi->async_lock);
586
587 if (mcdi->mode == MCDI_MODE_EVENTS) {
588 list_add_tail(&async->list, &mcdi->async_list);
589
590 /* If this is at the front of the queue, try to start it
591 * immediately
592 */
593 if (mcdi->async_list.next == &async->list &&
594 efx_mcdi_acquire_async(mcdi)) {
595 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
596 mod_timer(&mcdi->async_timer,
597 jiffies + MCDI_RPC_TIMEOUT);
598 }
599 } else {
600 kfree(async);
601 rc = -ENETDOWN;
602 }
603
604 spin_unlock_bh(&mcdi->async_lock);
352 605
353 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 606 return rc;
354} 607}
355 608
356int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 609int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
357 u8 *outbuf, size_t outlen, size_t *outlen_actual) 610 efx_dword_t *outbuf, size_t outlen,
611 size_t *outlen_actual)
358{ 612{
359 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 613 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
360 int rc; 614 int rc;
361 615
362 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
363
364 if (mcdi->mode == MCDI_MODE_POLL) 616 if (mcdi->mode == MCDI_MODE_POLL)
365 rc = efx_mcdi_poll(efx); 617 rc = efx_mcdi_poll(efx);
366 else 618 else
@@ -380,22 +632,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
380 "MC command 0x%x inlen %d mode %d timed out\n", 632 "MC command 0x%x inlen %d mode %d timed out\n",
381 cmd, (int)inlen, mcdi->mode); 633 cmd, (int)inlen, mcdi->mode);
382 } else { 634 } else {
383 size_t resplen; 635 size_t hdr_len, data_len;
384 636
385 /* At the very least we need a memory barrier here to ensure 637 /* At the very least we need a memory barrier here to ensure
386 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 638 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
387 * a spurious efx_mcdi_ev_cpl() running concurrently by 639 * a spurious efx_mcdi_ev_cpl() running concurrently by
388 * acquiring the iface_lock. */ 640 * acquiring the iface_lock. */
389 spin_lock_bh(&mcdi->iface_lock); 641 spin_lock_bh(&mcdi->iface_lock);
390 rc = -mcdi->resprc; 642 rc = mcdi->resprc;
391 resplen = mcdi->resplen; 643 hdr_len = mcdi->resp_hdr_len;
644 data_len = mcdi->resp_data_len;
392 spin_unlock_bh(&mcdi->iface_lock); 645 spin_unlock_bh(&mcdi->iface_lock);
393 646
647 BUG_ON(rc > 0);
648
394 if (rc == 0) { 649 if (rc == 0) {
395 efx_mcdi_copyout(efx, outbuf, 650 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
396 min(outlen, mcdi->resplen + 3) & ~0x3); 651 min(outlen, data_len));
397 if (outlen_actual != NULL) 652 if (outlen_actual != NULL)
398 *outlen_actual = resplen; 653 *outlen_actual = data_len;
399 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 654 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
400 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 655 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
401 else if (rc == -EIO || rc == -EINTR) { 656 else if (rc == -EIO || rc == -EINTR) {
@@ -410,6 +665,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
410 if (rc == -EIO || rc == -EINTR) { 665 if (rc == -EIO || rc == -EINTR) {
411 msleep(MCDI_STATUS_SLEEP_MS); 666 msleep(MCDI_STATUS_SLEEP_MS);
412 efx_mcdi_poll_reboot(efx); 667 efx_mcdi_poll_reboot(efx);
668 mcdi->new_epoch = true;
413 } 669 }
414 } 670 }
415 671
@@ -417,11 +673,15 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
417 return rc; 673 return rc;
418} 674}
419 675
676/* Switch to polled MCDI completions. This can be called in various
677 * error conditions with various locks held, so it must be lockless.
678 * Caller is responsible for flushing asynchronous requests later.
679 */
420void efx_mcdi_mode_poll(struct efx_nic *efx) 680void efx_mcdi_mode_poll(struct efx_nic *efx)
421{ 681{
422 struct efx_mcdi_iface *mcdi; 682 struct efx_mcdi_iface *mcdi;
423 683
424 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 684 if (!efx->mcdi)
425 return; 685 return;
426 686
427 mcdi = efx_mcdi(efx); 687 mcdi = efx_mcdi(efx);
@@ -434,18 +694,57 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
434 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 694 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
435 * 695 *
436 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 696 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
437 * which efx_mcdi_complete() provides for us. 697 * which efx_mcdi_complete_sync() provides for us.
438 */ 698 */
439 mcdi->mode = MCDI_MODE_POLL; 699 mcdi->mode = MCDI_MODE_POLL;
440 700
441 efx_mcdi_complete(mcdi); 701 efx_mcdi_complete_sync(mcdi);
702}
703
704/* Flush any running or queued asynchronous requests, after event processing
705 * is stopped
706 */
707void efx_mcdi_flush_async(struct efx_nic *efx)
708{
709 struct efx_mcdi_async_param *async, *next;
710 struct efx_mcdi_iface *mcdi;
711
712 if (!efx->mcdi)
713 return;
714
715 mcdi = efx_mcdi(efx);
716
717 /* We must be in polling mode so no more requests can be queued */
718 BUG_ON(mcdi->mode != MCDI_MODE_POLL);
719
720 del_timer_sync(&mcdi->async_timer);
721
722 /* If a request is still running, make sure we give the MC
723 * time to complete it so that the response won't overwrite our
724 * next request.
725 */
726 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
727 efx_mcdi_poll(efx);
728 mcdi->state = MCDI_STATE_QUIESCENT;
729 }
730
731 /* Nothing else will access the async list now, so it is safe
732 * to walk it without holding async_lock. If we hold it while
733 * calling a completer then lockdep may warn that we have
734 * acquired locks in the wrong order.
735 */
736 list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
737 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
738 list_del(&async->list);
739 kfree(async);
740 }
442} 741}
443 742
444void efx_mcdi_mode_event(struct efx_nic *efx) 743void efx_mcdi_mode_event(struct efx_nic *efx)
445{ 744{
446 struct efx_mcdi_iface *mcdi; 745 struct efx_mcdi_iface *mcdi;
447 746
448 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 747 if (!efx->mcdi)
449 return; 748 return;
450 749
451 mcdi = efx_mcdi(efx); 750 mcdi = efx_mcdi(efx);
@@ -460,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
460 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 759 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
461 * efx_mcdi_acquire() provides. 760 * efx_mcdi_acquire() provides.
462 */ 761 */
463 efx_mcdi_acquire(mcdi); 762 efx_mcdi_acquire_sync(mcdi);
464 mcdi->mode = MCDI_MODE_EVENTS; 763 mcdi->mode = MCDI_MODE_EVENTS;
465 efx_mcdi_release(mcdi); 764 efx_mcdi_release(mcdi);
466} 765}
@@ -477,19 +776,25 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
477 * are sent to the same queue, we can't be racing with 776 * are sent to the same queue, we can't be racing with
478 * efx_mcdi_ev_cpl()] 777 * efx_mcdi_ev_cpl()]
479 * 778 *
480 * There's a race here with efx_mcdi_rpc(), because we might receive 779 * If there is an outstanding asynchronous request, we can't
481 * a REBOOT event *before* the request has been copied out. In polled 780 * complete it now (efx_mcdi_complete() would deadlock). The
482 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 781 * reset process will take care of this.
483 * is ignored. In event mode, this condition is just an edge-case of 782 *
484 * receiving a REBOOT event after posting the MCDI request. Did the mc 783 * There's a race here with efx_mcdi_send_request(), because
485 * reboot before or after the copyout? The best we can do always is 784 * we might receive a REBOOT event *before* the request has
486 * just return failure. 785 * been copied out. In polled mode (during startup) this is
786 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
787 * event mode, this condition is just an edge-case of
788 * receiving a REBOOT event after posting the MCDI
789 * request. Did the mc reboot before or after the copyout? The
790 * best we can do always is just return failure.
487 */ 791 */
488 spin_lock(&mcdi->iface_lock); 792 spin_lock(&mcdi->iface_lock);
489 if (efx_mcdi_complete(mcdi)) { 793 if (efx_mcdi_complete_sync(mcdi)) {
490 if (mcdi->mode == MCDI_MODE_EVENTS) { 794 if (mcdi->mode == MCDI_MODE_EVENTS) {
491 mcdi->resprc = rc; 795 mcdi->resprc = rc;
492 mcdi->resplen = 0; 796 mcdi->resp_hdr_len = 0;
797 mcdi->resp_data_len = 0;
493 ++mcdi->credits; 798 ++mcdi->credits;
494 } 799 }
495 } else { 800 } else {
@@ -504,41 +809,12 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
504 break; 809 break;
505 udelay(MCDI_STATUS_DELAY_US); 810 udelay(MCDI_STATUS_DELAY_US);
506 } 811 }
812 mcdi->new_epoch = true;
507 } 813 }
508 814
509 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
510} 816}
511 817
512static unsigned int efx_mcdi_event_link_speed[] = {
513 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
514 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
515 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
516};
517
518
519static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
520{
521 u32 flags, fcntl, speed, lpa;
522
523 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
524 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
525 speed = efx_mcdi_event_link_speed[speed];
526
527 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
528 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
529 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
530
531 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
532 * which is only run after flushing the event queues. Therefore, it
533 * is safe to modify the link state outside of the mac_lock here.
534 */
535 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
536
537 efx_mcdi_phy_check_fcntl(efx, lpa);
538
539 efx_link_status_changed(efx);
540}
541
542/* Called from falcon_process_eventq for MCDI events */ 818/* Called from falcon_process_eventq for MCDI events */
543void efx_mcdi_process_event(struct efx_channel *channel, 819void efx_mcdi_process_event(struct efx_channel *channel,
544 efx_qword_t *event) 820 efx_qword_t *event)
@@ -551,7 +827,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
551 case MCDI_EVENT_CODE_BADSSERT: 827 case MCDI_EVENT_CODE_BADSSERT:
552 netif_err(efx, hw, efx->net_dev, 828 netif_err(efx, hw, efx->net_dev,
553 "MC watchdog or assertion failure at 0x%x\n", data); 829 "MC watchdog or assertion failure at 0x%x\n", data);
554 efx_mcdi_ev_death(efx, EINTR); 830 efx_mcdi_ev_death(efx, -EINTR);
555 break; 831 break;
556 832
557 case MCDI_EVENT_CODE_PMNOTICE: 833 case MCDI_EVENT_CODE_PMNOTICE:
@@ -576,8 +852,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
576 "MC Scheduler error address=0x%x\n", data); 852 "MC Scheduler error address=0x%x\n", data);
577 break; 853 break;
578 case MCDI_EVENT_CODE_REBOOT: 854 case MCDI_EVENT_CODE_REBOOT:
855 case MCDI_EVENT_CODE_MC_REBOOT:
579 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 856 netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
580 efx_mcdi_ev_death(efx, EIO); 857 efx_mcdi_ev_death(efx, -EIO);
581 break; 858 break;
582 case MCDI_EVENT_CODE_MAC_STATS_DMA: 859 case MCDI_EVENT_CODE_MAC_STATS_DMA:
583 /* MAC stats are gather lazily. We can ignore this. */ 860 /* MAC stats are gather lazily. We can ignore this. */
@@ -590,7 +867,27 @@ void efx_mcdi_process_event(struct efx_channel *channel,
590 case MCDI_EVENT_CODE_PTP_PPS: 867 case MCDI_EVENT_CODE_PTP_PPS:
591 efx_ptp_event(efx, event); 868 efx_ptp_event(efx, event);
592 break; 869 break;
593 870 case MCDI_EVENT_CODE_TX_FLUSH:
871 case MCDI_EVENT_CODE_RX_FLUSH:
872 /* Two flush events will be sent: one to the same event
873 * queue as completions, and one to event queue 0.
874 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
875 * flag will be set, and we should ignore the event
876 * because we want to wait for all completions.
877 */
878 BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
879 MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
880 if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
881 efx_ef10_handle_drain_event(efx);
882 break;
883 case MCDI_EVENT_CODE_TX_ERR:
884 case MCDI_EVENT_CODE_RX_ERR:
885 netif_err(efx, hw, efx->net_dev,
886 "%s DMA error (event: "EFX_QWORD_FMT")\n",
887 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
888 EFX_QWORD_VAL(*event));
889 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
890 break;
594 default: 891 default:
595 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 892 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
596 code); 893 code);
@@ -606,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
606 903
607void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 904void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
608{ 905{
609 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; 906 MCDI_DECLARE_BUF(outbuf,
907 max(MC_CMD_GET_VERSION_OUT_LEN,
908 MC_CMD_GET_CAPABILITIES_OUT_LEN));
610 size_t outlength; 909 size_t outlength;
611 const __le16 *ver_words; 910 const __le16 *ver_words;
911 size_t offset;
612 int rc; 912 int rc;
613 913
614 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 914 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
615
616 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 915 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
617 outbuf, sizeof(outbuf), &outlength); 916 outbuf, sizeof(outbuf), &outlength);
618 if (rc) 917 if (rc)
619 goto fail; 918 goto fail;
620
621 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 919 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
622 rc = -EIO; 920 rc = -EIO;
623 goto fail; 921 goto fail;
624 } 922 }
625 923
626 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 924 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
627 snprintf(buf, len, "%u.%u.%u.%u", 925 offset = snprintf(buf, len, "%u.%u.%u.%u",
628 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 926 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
629 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 927 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
928
929 /* EF10 may have multiple datapath firmware variants within a
930 * single version. Report which variants are running.
931 */
932 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
933 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
934 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
935 outbuf, sizeof(outbuf), &outlength);
936 if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
937 offset += snprintf(
938 buf + offset, len - offset, " rx? tx?");
939 else
940 offset += snprintf(
941 buf + offset, len - offset, " rx%x tx%x",
942 MCDI_WORD(outbuf,
943 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
944 MCDI_WORD(outbuf,
945 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
946
947 /* It's theoretically possible for the string to exceed 31
948 * characters, though in practice the first three version
949 * components are short enough that this doesn't happen.
950 */
951 if (WARN_ON(offset >= len))
952 buf[0] = 0;
953 }
954
630 return; 955 return;
631 956
632fail: 957fail:
@@ -634,17 +959,18 @@ fail:
634 buf[0] = 0; 959 buf[0] = 0;
635} 960}
636 961
637int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 962static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
638 bool *was_attached) 963 bool *was_attached)
639{ 964{
640 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
641 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
642 size_t outlen; 967 size_t outlen;
643 int rc; 968 int rc;
644 969
645 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 970 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
646 driver_operating ? 1 : 0); 971 driver_operating ? 1 : 0);
647 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 972 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
973 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
648 974
649 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 975 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
650 outbuf, sizeof(outbuf), &outlen); 976 outbuf, sizeof(outbuf), &outlen);
@@ -667,8 +993,8 @@ fail:
667int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 993int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
668 u16 *fw_subtype_list, u32 *capabilities) 994 u16 *fw_subtype_list, u32 *capabilities)
669{ 995{
670 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX]; 996 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
671 size_t outlen, offset, i; 997 size_t outlen, i;
672 int port_num = efx_port_num(efx); 998 int port_num = efx_port_num(efx);
673 int rc; 999 int rc;
674 1000
@@ -684,22 +1010,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
684 goto fail; 1010 goto fail;
685 } 1011 }
686 1012
687 offset = (port_num)
688 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
689 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
690 if (mac_address) 1013 if (mac_address)
691 memcpy(mac_address, outbuf + offset, ETH_ALEN); 1014 memcpy(mac_address,
1015 port_num ?
1016 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1017 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
1018 ETH_ALEN);
692 if (fw_subtype_list) { 1019 if (fw_subtype_list) {
693 /* Byte-swap and truncate or zero-pad as necessary */
694 offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
695 for (i = 0; 1020 for (i = 0;
696 i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; 1021 i < MCDI_VAR_ARRAY_LEN(outlen,
697 i++) { 1022 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
698 fw_subtype_list[i] = 1023 i++)
699 (offset + 2 <= outlen) ? 1024 fw_subtype_list[i] = MCDI_ARRAY_WORD(
700 le16_to_cpup((__le16 *)(outbuf + offset)) : 0; 1025 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
701 offset += 2; 1026 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
702 } 1027 fw_subtype_list[i] = 0;
703 } 1028 }
704 if (capabilities) { 1029 if (capabilities) {
705 if (port_num) 1030 if (port_num)
@@ -721,7 +1046,7 @@ fail:
721 1046
722int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 1047int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
723{ 1048{
724 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; 1049 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
725 u32 dest = 0; 1050 u32 dest = 0;
726 int rc; 1051 int rc;
727 1052
@@ -749,7 +1074,7 @@ fail:
749 1074
750int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 1075int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
751{ 1076{
752 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; 1077 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
753 size_t outlen; 1078 size_t outlen;
754 int rc; 1079 int rc;
755 1080
@@ -777,8 +1102,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
777 size_t *size_out, size_t *erase_size_out, 1102 size_t *size_out, size_t *erase_size_out,
778 bool *protected_out) 1103 bool *protected_out)
779{ 1104{
780 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; 1105 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
781 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; 1106 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
782 size_t outlen; 1107 size_t outlen;
783 int rc; 1108 int rc;
784 1109
@@ -804,127 +1129,10 @@ fail:
804 return rc; 1129 return rc;
805} 1130}
806 1131
807int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
808{
809 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
810 int rc;
811
812 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
813
814 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
815
816 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
817 NULL, 0, NULL);
818 if (rc)
819 goto fail;
820
821 return 0;
822
823fail:
824 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
825 return rc;
826}
827
828int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
829 loff_t offset, u8 *buffer, size_t length)
830{
831 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
832 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
833 size_t outlen;
834 int rc;
835
836 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
837 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
838 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
839
840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
841 outbuf, sizeof(outbuf), &outlen);
842 if (rc)
843 goto fail;
844
845 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
846 return 0;
847
848fail:
849 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
850 return rc;
851}
852
853int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
854 loff_t offset, const u8 *buffer, size_t length)
855{
856 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
857 int rc;
858
859 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
860 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
861 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
862 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
863
864 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
865
866 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
867 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
868 NULL, 0, NULL);
869 if (rc)
870 goto fail;
871
872 return 0;
873
874fail:
875 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
876 return rc;
877}
878
879int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
880 loff_t offset, size_t length)
881{
882 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
883 int rc;
884
885 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
886 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
887 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
888
889 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
890
891 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
892 NULL, 0, NULL);
893 if (rc)
894 goto fail;
895
896 return 0;
897
898fail:
899 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
900 return rc;
901}
902
903int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
904{
905 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
906 int rc;
907
908 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
909
910 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
911
912 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
913 NULL, 0, NULL);
914 if (rc)
915 goto fail;
916
917 return 0;
918
919fail:
920 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
921 return rc;
922}
923
924static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 1132static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
925{ 1133{
926 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; 1134 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
927 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; 1135 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
928 int rc; 1136 int rc;
929 1137
930 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 1138 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
@@ -976,9 +1184,9 @@ fail1:
976 1184
977static int efx_mcdi_read_assertion(struct efx_nic *efx) 1185static int efx_mcdi_read_assertion(struct efx_nic *efx)
978{ 1186{
979 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; 1187 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
980 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; 1188 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
981 unsigned int flags, index, ofst; 1189 unsigned int flags, index;
982 const char *reason; 1190 const char *reason;
983 size_t outlen; 1191 size_t outlen;
984 int retry; 1192 int retry;
@@ -1020,19 +1228,20 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
1020 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1228 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1021 1229
1022 /* Print out the registers */ 1230 /* Print out the registers */
1023 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1231 for (index = 0;
1024 for (index = 1; index < 32; index++) { 1232 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1025 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, 1233 index++)
1026 MCDI_DWORD2(outbuf, ofst)); 1234 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1027 ofst += sizeof(efx_dword_t); 1235 1 + index,
1028 } 1236 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1237 index));
1029 1238
1030 return 0; 1239 return 0;
1031} 1240}
1032 1241
1033static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1242static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1034{ 1243{
1035 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1244 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1036 1245
1037 /* If the MC is running debug firmware, it might now be 1246 /* If the MC is running debug firmware, it might now be
1038 * waiting for a debugger to attach, but we just want it to 1247 * waiting for a debugger to attach, but we just want it to
@@ -1062,7 +1271,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
1062 1271
1063void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1272void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1064{ 1273{
1065 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1274 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1066 int rc; 1275 int rc;
1067 1276
1068 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1277 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
@@ -1080,7 +1289,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1080 __func__, rc); 1289 __func__, rc);
1081} 1290}
1082 1291
1083int efx_mcdi_reset_port(struct efx_nic *efx) 1292static int efx_mcdi_reset_port(struct efx_nic *efx)
1084{ 1293{
1085 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1294 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1086 if (rc) 1295 if (rc)
@@ -1089,9 +1298,9 @@ int efx_mcdi_reset_port(struct efx_nic *efx)
1089 return rc; 1298 return rc;
1090} 1299}
1091 1300
1092int efx_mcdi_reset_mc(struct efx_nic *efx) 1301static int efx_mcdi_reset_mc(struct efx_nic *efx)
1093{ 1302{
1094 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1303 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1095 int rc; 1304 int rc;
1096 1305
1097 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1306 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
@@ -1107,11 +1316,31 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
1107 return rc; 1316 return rc;
1108} 1317}
1109 1318
1319enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1320{
1321 return RESET_TYPE_RECOVER_OR_ALL;
1322}
1323
1324int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1325{
1326 int rc;
1327
1328 /* Recover from a failed assertion pre-reset */
1329 rc = efx_mcdi_handle_assertion(efx);
1330 if (rc)
1331 return rc;
1332
1333 if (method == RESET_TYPE_WORLD)
1334 return efx_mcdi_reset_mc(efx);
1335 else
1336 return efx_mcdi_reset_port(efx);
1337}
1338
1110static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1339static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1111 const u8 *mac, int *id_out) 1340 const u8 *mac, int *id_out)
1112{ 1341{
1113 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1342 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1114 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1343 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1115 size_t outlen; 1344 size_t outlen;
1116 int rc; 1345 int rc;
1117 1346
@@ -1151,7 +1380,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1151 1380
1152int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1381int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1153{ 1382{
1154 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; 1383 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1155 size_t outlen; 1384 size_t outlen;
1156 int rc; 1385 int rc;
1157 1386
@@ -1178,7 +1407,7 @@ fail:
1178 1407
1179int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1408int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1180{ 1409{
1181 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; 1410 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1182 int rc; 1411 int rc;
1183 1412
1184 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1413 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
@@ -1199,34 +1428,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1199{ 1428{
1200 struct efx_channel *channel; 1429 struct efx_channel *channel;
1201 struct efx_rx_queue *rx_queue; 1430 struct efx_rx_queue *rx_queue;
1202 __le32 *qid; 1431 MCDI_DECLARE_BUF(inbuf,
1432 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1203 int rc, count; 1433 int rc, count;
1204 1434
1205 BUILD_BUG_ON(EFX_MAX_CHANNELS > 1435 BUILD_BUG_ON(EFX_MAX_CHANNELS >
1206 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 1436 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1207 1437
1208 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1209 if (qid == NULL)
1210 return -ENOMEM;
1211
1212 count = 0; 1438 count = 0;
1213 efx_for_each_channel(channel, efx) { 1439 efx_for_each_channel(channel, efx) {
1214 efx_for_each_channel_rx_queue(rx_queue, channel) { 1440 efx_for_each_channel_rx_queue(rx_queue, channel) {
1215 if (rx_queue->flush_pending) { 1441 if (rx_queue->flush_pending) {
1216 rx_queue->flush_pending = false; 1442 rx_queue->flush_pending = false;
1217 atomic_dec(&efx->rxq_flush_pending); 1443 atomic_dec(&efx->rxq_flush_pending);
1218 qid[count++] = cpu_to_le32( 1444 MCDI_SET_ARRAY_DWORD(
1219 efx_rx_queue_index(rx_queue)); 1445 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1446 count, efx_rx_queue_index(rx_queue));
1447 count++;
1220 } 1448 }
1221 } 1449 }
1222 } 1450 }
1223 1451
1224 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1452 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1225 count * sizeof(*qid), NULL, 0, NULL); 1453 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1226 WARN_ON(rc < 0); 1454 WARN_ON(rc < 0);
1227 1455
1228 kfree(qid);
1229
1230 return rc; 1456 return rc;
1231} 1457}
1232 1458
@@ -1245,3 +1471,247 @@ fail:
1245 return rc; 1471 return rc;
1246} 1472}
1247 1473
1474int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
1475{
1476 MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
1477
1478 BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
1479 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
1480 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
1481 return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
1482 NULL, 0, NULL);
1483}
1484
1485#ifdef CONFIG_SFC_MTD
1486
1487#define EFX_MCDI_NVRAM_LEN_MAX 128
1488
1489static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
1490{
1491 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
1492 int rc;
1493
1494 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
1495
1496 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
1497
1498 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
1499 NULL, 0, NULL);
1500 if (rc)
1501 goto fail;
1502
1503 return 0;
1504
1505fail:
1506 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1507 return rc;
1508}
1509
1510static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
1511 loff_t offset, u8 *buffer, size_t length)
1512{
1513 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
1514 MCDI_DECLARE_BUF(outbuf,
1515 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1516 size_t outlen;
1517 int rc;
1518
1519 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
1520 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
1521 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
1522
1523 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
1524 outbuf, sizeof(outbuf), &outlen);
1525 if (rc)
1526 goto fail;
1527
1528 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
1529 return 0;
1530
1531fail:
1532 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1533 return rc;
1534}
1535
1536static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
1537 loff_t offset, const u8 *buffer, size_t length)
1538{
1539 MCDI_DECLARE_BUF(inbuf,
1540 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1541 int rc;
1542
1543 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
1544 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
1545 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
1546 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
1547
1548 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
1549
1550 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
1551 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
1552 NULL, 0, NULL);
1553 if (rc)
1554 goto fail;
1555
1556 return 0;
1557
1558fail:
1559 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1560 return rc;
1561}
1562
1563static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
1564 loff_t offset, size_t length)
1565{
1566 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
1567 int rc;
1568
1569 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
1570 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
1571 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
1572
1573 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
1574
1575 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
1576 NULL, 0, NULL);
1577 if (rc)
1578 goto fail;
1579
1580 return 0;
1581
1582fail:
1583 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1584 return rc;
1585}
1586
1587static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
1588{
1589 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
1590 int rc;
1591
1592 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
1593
1594 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
1595
1596 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
1597 NULL, 0, NULL);
1598 if (rc)
1599 goto fail;
1600
1601 return 0;
1602
1603fail:
1604 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1605 return rc;
1606}
1607
1608int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
1609 size_t len, size_t *retlen, u8 *buffer)
1610{
1611 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1612 struct efx_nic *efx = mtd->priv;
1613 loff_t offset = start;
1614 loff_t end = min_t(loff_t, start + len, mtd->size);
1615 size_t chunk;
1616 int rc = 0;
1617
1618 while (offset < end) {
1619 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1620 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
1621 buffer, chunk);
1622 if (rc)
1623 goto out;
1624 offset += chunk;
1625 buffer += chunk;
1626 }
1627out:
1628 *retlen = offset - start;
1629 return rc;
1630}
1631
1632int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
1633{
1634 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1635 struct efx_nic *efx = mtd->priv;
1636 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
1637 loff_t end = min_t(loff_t, start + len, mtd->size);
1638 size_t chunk = part->common.mtd.erasesize;
1639 int rc = 0;
1640
1641 if (!part->updating) {
1642 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1643 if (rc)
1644 goto out;
1645 part->updating = true;
1646 }
1647
1648 /* The MCDI interface can in fact do multiple erase blocks at once;
1649 * but erasing may be slow, so we make multiple calls here to avoid
1650 * tripping the MCDI RPC timeout. */
1651 while (offset < end) {
1652 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
1653 chunk);
1654 if (rc)
1655 goto out;
1656 offset += chunk;
1657 }
1658out:
1659 return rc;
1660}
1661
1662int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
1663 size_t len, size_t *retlen, const u8 *buffer)
1664{
1665 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1666 struct efx_nic *efx = mtd->priv;
1667 loff_t offset = start;
1668 loff_t end = min_t(loff_t, start + len, mtd->size);
1669 size_t chunk;
1670 int rc = 0;
1671
1672 if (!part->updating) {
1673 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1674 if (rc)
1675 goto out;
1676 part->updating = true;
1677 }
1678
1679 while (offset < end) {
1680 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1681 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
1682 buffer, chunk);
1683 if (rc)
1684 goto out;
1685 offset += chunk;
1686 buffer += chunk;
1687 }
1688out:
1689 *retlen = offset - start;
1690 return rc;
1691}
1692
1693int efx_mcdi_mtd_sync(struct mtd_info *mtd)
1694{
1695 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1696 struct efx_nic *efx = mtd->priv;
1697 int rc = 0;
1698
1699 if (part->updating) {
1700 part->updating = false;
1701 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
1702 }
1703
1704 return rc;
1705}
1706
1707void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
1708{
1709 struct efx_mcdi_mtd_partition *mcdi_part =
1710 container_of(part, struct efx_mcdi_mtd_partition, common);
1711 struct efx_nic *efx = part->mtd.priv;
1712
1713 snprintf(part->name, sizeof(part->name), "%s %s:%02x",
1714 efx->name, part->type_name, mcdi_part->fw_subtype);
1715}
1716
1717#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 3ba2e5b5a9cc..c34d0d4e10ee 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2008-2010 Solarflare Communications Inc. 3 * Copyright 2008-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,18 +11,20 @@
11#define EFX_MCDI_H 11#define EFX_MCDI_H
12 12
13/** 13/**
14 * enum efx_mcdi_state 14 * enum efx_mcdi_state - MCDI request handling state
15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the 15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
16 * mcdi_lock then they are able to move to MCDI_STATE_RUNNING 16 * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
17 * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that 17 * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
18 * moved into this state is allowed to move out of it. 18 * Only the thread that moved into this state is allowed to move out of it.
19 * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
19 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread 20 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
20 * has not yet consumed the result. For all other threads, equivalent to 21 * has not yet consumed the result. For all other threads, equivalent to
21 * MCDI_STATE_RUNNING. 22 * %MCDI_STATE_RUNNING.
22 */ 23 */
23enum efx_mcdi_state { 24enum efx_mcdi_state {
24 MCDI_STATE_QUIESCENT, 25 MCDI_STATE_QUIESCENT,
25 MCDI_STATE_RUNNING, 26 MCDI_STATE_RUNNING_SYNC,
27 MCDI_STATE_RUNNING_ASYNC,
26 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
27}; 29};
28 30
@@ -32,28 +34,39 @@ enum efx_mcdi_mode {
32}; 34};
33 35
34/** 36/**
35 * struct efx_mcdi_iface 37 * struct efx_mcdi_iface - MCDI protocol context
36 * @state: Interface state. Waited for by mcdi_wq. 38 * @efx: The associated NIC.
37 * @wq: Wait queue for threads waiting for state != STATE_RUNNING 39 * @state: Request handling state. Waited for by @wq.
38 * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
39 * @mode: Poll for mcdi completion, or wait for an mcdi_event. 40 * @mode: Poll for mcdi completion, or wait for an mcdi_event.
40 * Serialised by @lock 41 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
42 * @new_epoch: Indicates start of day or start of MC reboot recovery
43 * @iface_lock: Serialises access to @seqno, @credits and response metadata
41 * @seqno: The next sequence number to use for mcdi requests. 44 * @seqno: The next sequence number to use for mcdi requests.
42 * Serialised by @lock
43 * @credits: Number of spurious MCDI completion events allowed before we 45 * @credits: Number of spurious MCDI completion events allowed before we
44 * trigger a fatal error. Protected by @lock 46 * trigger a fatal error
45 * @resprc: Returned MCDI completion 47 * @resprc: Response error/success code (Linux numbering)
46 * @resplen: Returned payload length 48 * @resp_hdr_len: Response header length
49 * @resp_data_len: Response data (SDU or error) length
50 * @async_lock: Serialises access to @async_list while event processing is
51 * enabled
52 * @async_list: Queue of asynchronous requests
53 * @async_timer: Timer for asynchronous request timeout
47 */ 54 */
48struct efx_mcdi_iface { 55struct efx_mcdi_iface {
49 atomic_t state; 56 struct efx_nic *efx;
57 enum efx_mcdi_state state;
58 enum efx_mcdi_mode mode;
50 wait_queue_head_t wq; 59 wait_queue_head_t wq;
51 spinlock_t iface_lock; 60 spinlock_t iface_lock;
52 enum efx_mcdi_mode mode; 61 bool new_epoch;
53 unsigned int credits; 62 unsigned int credits;
54 unsigned int seqno; 63 unsigned int seqno;
55 unsigned int resprc; 64 int resprc;
56 size_t resplen; 65 size_t resp_hdr_len;
66 size_t resp_data_len;
67 spinlock_t async_lock;
68 struct list_head async_list;
69 struct timer_list async_timer;
57}; 70};
58 71
59struct efx_mcdi_mon { 72struct efx_mcdi_mon {
@@ -65,65 +78,204 @@ struct efx_mcdi_mon {
65 unsigned int n_attrs; 78 unsigned int n_attrs;
66}; 79};
67 80
68extern void efx_mcdi_init(struct efx_nic *efx); 81struct efx_mcdi_mtd_partition {
82 struct efx_mtd_partition common;
83 bool updating;
84 u16 nvram_type;
85 u16 fw_subtype;
86};
87
88#define to_efx_mcdi_mtd_partition(mtd) \
89 container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
90
91/**
92 * struct efx_mcdi_data - extra state for NICs that implement MCDI
93 * @iface: Interface/protocol state
94 * @hwmon: Hardware monitor state
95 */
96struct efx_mcdi_data {
97 struct efx_mcdi_iface iface;
98#ifdef CONFIG_SFC_MCDI_MON
99 struct efx_mcdi_mon hwmon;
100#endif
101};
102
103#ifdef CONFIG_SFC_MCDI_MON
104static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
105{
106 EFX_BUG_ON_PARANOID(!efx->mcdi);
107 return &efx->mcdi->hwmon;
108}
109#endif
110
111extern int efx_mcdi_init(struct efx_nic *efx);
112extern void efx_mcdi_fini(struct efx_nic *efx);
69 113
70extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 114extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
71 size_t inlen, u8 *outbuf, size_t outlen, 115 const efx_dword_t *inbuf, size_t inlen,
116 efx_dword_t *outbuf, size_t outlen,
72 size_t *outlen_actual); 117 size_t *outlen_actual);
73 118
74extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, 119extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
75 const u8 *inbuf, size_t inlen); 120 const efx_dword_t *inbuf, size_t inlen);
76extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 121extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
77 u8 *outbuf, size_t outlen, 122 efx_dword_t *outbuf, size_t outlen,
78 size_t *outlen_actual); 123 size_t *outlen_actual);
79 124
125typedef void efx_mcdi_async_completer(struct efx_nic *efx,
126 unsigned long cookie, int rc,
127 efx_dword_t *outbuf,
128 size_t outlen_actual);
129extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
130 const efx_dword_t *inbuf, size_t inlen,
131 size_t outlen,
132 efx_mcdi_async_completer *complete,
133 unsigned long cookie);
134
80extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 135extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
81extern void efx_mcdi_mode_poll(struct efx_nic *efx); 136extern void efx_mcdi_mode_poll(struct efx_nic *efx);
82extern void efx_mcdi_mode_event(struct efx_nic *efx); 137extern void efx_mcdi_mode_event(struct efx_nic *efx);
138extern void efx_mcdi_flush_async(struct efx_nic *efx);
83 139
84extern void efx_mcdi_process_event(struct efx_channel *channel, 140extern void efx_mcdi_process_event(struct efx_channel *channel,
85 efx_qword_t *event); 141 efx_qword_t *event);
86extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); 142extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
87 143
88#define MCDI_PTR2(_buf, _ofst) \ 144/* We expect that 16- and 32-bit fields in MCDI requests and responses
89 (((u8 *)_buf) + _ofst) 145 * are appropriately aligned, but 64-bit fields are only
90#define MCDI_SET_DWORD2(_buf, _ofst, _value) \ 146 * 32-bit-aligned. Also, on Siena we must copy to the MC shared
91 EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ 147 * memory strictly 32 bits at a time, so add any necessary padding.
92 EFX_DWORD_0, _value) 148 */
93#define MCDI_DWORD2(_buf, _ofst) \ 149#define MCDI_DECLARE_BUF(_name, _len) \
94 EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ 150 efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
95 EFX_DWORD_0) 151#define _MCDI_PTR(_buf, _offset) \
96#define MCDI_QWORD2(_buf, _ofst) \ 152 ((u8 *)(_buf) + (_offset))
97 EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ 153#define MCDI_PTR(_buf, _field) \
98 EFX_QWORD_0) 154 _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
99 155#define _MCDI_CHECK_ALIGN(_ofst, _align) \
100#define MCDI_PTR(_buf, _ofst) \ 156 ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
101 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) 157#define _MCDI_DWORD(_buf, _field) \
102#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \ 158 ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
103 MCDI_PTR2(_buf, \ 159
104 MC_CMD_ ## _field ## _OFST + \ 160#define MCDI_WORD(_buf, _field) \
105 (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN) 161 ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
106#define MCDI_SET_DWORD(_buf, _ofst, _value) \ 162 le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
107 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) 163#define MCDI_SET_DWORD(_buf, _field, _value) \
108#define MCDI_DWORD(_buf, _ofst) \ 164 EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
109 MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) 165#define MCDI_DWORD(_buf, _field) \
110#define MCDI_QWORD(_buf, _ofst) \ 166 EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
111 MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) 167#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
168 EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
169 MC_CMD_ ## _name1, _value1)
170#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
171 _name2, _value2) \
172 EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
173 MC_CMD_ ## _name1, _value1, \
174 MC_CMD_ ## _name2, _value2)
175#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
176 _name2, _value2, _name3, _value3) \
177 EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
178 MC_CMD_ ## _name1, _value1, \
179 MC_CMD_ ## _name2, _value2, \
180 MC_CMD_ ## _name3, _value3)
181#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
182 _name2, _value2, _name3, _value3, \
183 _name4, _value4) \
184 EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
185 MC_CMD_ ## _name1, _value1, \
186 MC_CMD_ ## _name2, _value2, \
187 MC_CMD_ ## _name3, _value3, \
188 MC_CMD_ ## _name4, _value4)
189#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
190 _name2, _value2, _name3, _value3, \
191 _name4, _value4, _name5, _value5) \
192 EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
193 MC_CMD_ ## _name1, _value1, \
194 MC_CMD_ ## _name2, _value2, \
195 MC_CMD_ ## _name3, _value3, \
196 MC_CMD_ ## _name4, _value4, \
197 MC_CMD_ ## _name5, _value5)
198#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
199 _name2, _value2, _name3, _value3, \
200 _name4, _value4, _name5, _value5, \
201 _name6, _value6) \
202 EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
203 MC_CMD_ ## _name1, _value1, \
204 MC_CMD_ ## _name2, _value2, \
205 MC_CMD_ ## _name3, _value3, \
206 MC_CMD_ ## _name4, _value4, \
207 MC_CMD_ ## _name5, _value5, \
208 MC_CMD_ ## _name6, _value6)
209#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
210 _name2, _value2, _name3, _value3, \
211 _name4, _value4, _name5, _value5, \
212 _name6, _value6, _name7, _value7) \
213 EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
214 MC_CMD_ ## _name1, _value1, \
215 MC_CMD_ ## _name2, _value2, \
216 MC_CMD_ ## _name3, _value3, \
217 MC_CMD_ ## _name4, _value4, \
218 MC_CMD_ ## _name5, _value5, \
219 MC_CMD_ ## _name6, _value6, \
220 MC_CMD_ ## _name7, _value7)
221#define MCDI_SET_QWORD(_buf, _field, _value) \
222 do { \
223 EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
224 EFX_DWORD_0, (u32)(_value)); \
225 EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
226 EFX_DWORD_0, (u64)(_value) >> 32); \
227 } while (0)
228#define MCDI_QWORD(_buf, _field) \
229 (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
230 (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
231#define MCDI_FIELD(_ptr, _type, _field) \
232 EFX_EXTRACT_DWORD( \
233 *(efx_dword_t *) \
234 _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
235 MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
236 (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
237 MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
238
239#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
240 (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
241 + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
242#define MCDI_DECLARE_STRUCT_PTR(_name) \
243 efx_dword_t *_name
244#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
245 ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
246#define MCDI_VAR_ARRAY_LEN(_len, _field) \
247 min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
248 ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
249#define MCDI_ARRAY_WORD(_buf, _field, _index) \
250 (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
251 le16_to_cpu(*(__force const __le16 *) \
252 _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
253#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
254 (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
255 (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
256#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
257 EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
258 EFX_DWORD_0, _value)
259#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
260 EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
261#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
262 (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
263 (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
264#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
265 do { \
266 EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
267 EFX_DWORD_0, (u32)(_value)); \
268 EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
269 EFX_DWORD_0, (u64)(_value) >> 32); \
270 } while (0)
271#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
272 MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
273 _type ## _TYPEDEF, _field2)
112 274
113#define MCDI_EVENT_FIELD(_ev, _field) \ 275#define MCDI_EVENT_FIELD(_ev, _field) \
114 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 276 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
115#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
116 EFX_EXTRACT_DWORD( \
117 *((efx_dword_t *) \
118 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
119 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
120 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
121 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
122 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
123 277
124extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 278extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
125extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
126 bool *was_attached_out);
127extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 279extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
128 u16 *fw_subtype_list, u32 *capabilities); 280 u16 *fw_subtype_list, u32 *capabilities);
129extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 281extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -132,34 +284,29 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
132extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 284extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
133 size_t *size_out, size_t *erase_size_out, 285 size_t *size_out, size_t *erase_size_out,
134 bool *protected_out); 286 bool *protected_out);
135extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
136 unsigned int type);
137extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
138 loff_t offset, u8 *buffer, size_t length);
139extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
140 loff_t offset, const u8 *buffer,
141 size_t length);
142#define EFX_MCDI_NVRAM_LEN_MAX 128
143extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
144 loff_t offset, size_t length);
145extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
146 unsigned int type);
147extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); 287extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
148extern int efx_mcdi_handle_assertion(struct efx_nic *efx); 288extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
149extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 289extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
150extern int efx_mcdi_reset_port(struct efx_nic *efx);
151extern int efx_mcdi_reset_mc(struct efx_nic *efx);
152extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, 290extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
153 const u8 *mac, int *id_out); 291 const u8 *mac, int *id_out);
154extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 292extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
155extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 293extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
156extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 294extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
157extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); 295extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
296extern int efx_mcdi_port_probe(struct efx_nic *efx);
297extern void efx_mcdi_port_remove(struct efx_nic *efx);
298extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
299extern int efx_mcdi_port_get_number(struct efx_nic *efx);
300extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
301extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
158extern int efx_mcdi_set_mac(struct efx_nic *efx); 302extern int efx_mcdi_set_mac(struct efx_nic *efx);
159extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 303#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
160 u32 dma_len, int enable, int clear); 304extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
161extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx); 305extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
162extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); 306extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
307extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
308extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
309extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
163 310
164#ifdef CONFIG_SFC_MCDI_MON 311#ifdef CONFIG_SFC_MCDI_MON
165extern int efx_mcdi_mon_probe(struct efx_nic *efx); 312extern int efx_mcdi_mon_probe(struct efx_nic *efx);
@@ -169,4 +316,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
169static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} 316static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
170#endif 317#endif
171 318
319#ifdef CONFIG_SFC_MTD
320extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
321 size_t len, size_t *retlen, u8 *buffer);
322extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
323extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
324 size_t len, size_t *retlen, const u8 *buffer);
325extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
326extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
327#endif
328
172#endif /* EFX_MCDI_H */ 329#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
deleted file mode 100644
index 1003f309cba7..000000000000
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ /dev/null
@@ -1,130 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "efx.h"
12#include "mcdi.h"
13#include "mcdi_pcol.h"
14
15int efx_mcdi_set_mac(struct efx_nic *efx)
16{
17 u32 reject, fcntl;
18 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
19
20 memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
21 efx->net_dev->dev_addr, ETH_ALEN);
22
23 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
24 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
25 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
26
27 /* The MCDI command provides for controlling accept/reject
28 * of broadcast packets too, but the driver doesn't currently
29 * expose this. */
30 reject = (efx->promiscuous) ? 0 :
31 (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
32 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
33
34 switch (efx->wanted_fc) {
35 case EFX_FC_RX | EFX_FC_TX:
36 fcntl = MC_CMD_FCNTL_BIDIR;
37 break;
38 case EFX_FC_RX:
39 fcntl = MC_CMD_FCNTL_RESPOND;
40 break;
41 default:
42 fcntl = MC_CMD_FCNTL_OFF;
43 break;
44 }
45 if (efx->wanted_fc & EFX_FC_AUTO)
46 fcntl = MC_CMD_FCNTL_AUTO;
47 if (efx->fc_disable)
48 fcntl = MC_CMD_FCNTL_OFF;
49
50 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
51
52 return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
53 NULL, 0, NULL);
54}
55
56bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
57{
58 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
59 size_t outlength;
60 int rc;
61
62 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
63
64 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
65 outbuf, sizeof(outbuf), &outlength);
66 if (rc) {
67 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
68 __func__, rc);
69 return true;
70 }
71
72 return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
73}
74
75int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
76 u32 dma_len, int enable, int clear)
77{
78 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
79 int rc;
80 efx_dword_t *cmd_ptr;
81 int period = enable ? 1000 : 0;
82 u32 addr_hi;
83 u32 addr_lo;
84
85 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
86
87 addr_lo = ((u64)dma_addr) >> 0;
88 addr_hi = ((u64)dma_addr) >> 32;
89
90 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
91 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
92 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
93 EFX_POPULATE_DWORD_7(*cmd_ptr,
94 MC_CMD_MAC_STATS_IN_DMA, !!enable,
95 MC_CMD_MAC_STATS_IN_CLEAR, clear,
96 MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
97 MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
98 MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
99 MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
100 MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
101 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
102
103 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
104 NULL, 0, NULL);
105 if (rc)
106 goto fail;
107
108 return 0;
109
110fail:
111 netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
112 __func__, enable ? "enable" : "disable", rc);
113 return rc;
114}
115
116int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
117{
118 int rc;
119
120 WARN_ON(!mutex_is_locked(&efx->mac_lock));
121
122 rc = efx_mcdi_set_mac(efx);
123 if (rc != 0)
124 return rc;
125
126 return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
127 efx->multicast_hash.byte,
128 sizeof(efx->multicast_hash),
129 NULL, 0, NULL);
130}
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 1d552f0664d7..4cc5d95b2a5a 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc. 3 * Copyright 2011-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -21,31 +21,62 @@ enum efx_hwmon_type {
21 EFX_HWMON_UNKNOWN, 21 EFX_HWMON_UNKNOWN,
22 EFX_HWMON_TEMP, /* temperature */ 22 EFX_HWMON_TEMP, /* temperature */
23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */ 23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */
24 EFX_HWMON_IN /* input voltage */ 24 EFX_HWMON_IN, /* voltage */
25 EFX_HWMON_CURR, /* current */
26 EFX_HWMON_POWER, /* power */
25}; 27};
26 28
27static const struct { 29static const struct {
28 const char *label; 30 const char *label;
29 enum efx_hwmon_type hwmon_type; 31 enum efx_hwmon_type hwmon_type;
30 int port; 32 int port;
31} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = { 33} efx_mcdi_sensor_type[] = {
32#define SENSOR(name, label, hwmon_type, port) \ 34#define SENSOR(name, label, hwmon_type, port) \
33 [MC_CMD_SENSOR_##name] = { label, hwmon_type, port } 35 [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
34 SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1), 36 SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
35 SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1), 37 SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
36 SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1), 38 SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
37 SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0), 39 SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
38 SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0), 40 SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
39 SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1), 41 SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
40 SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1), 42 SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
41 SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1), 43 SENSOR(IN_1V0, "1.0V supply", IN, -1),
42 SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1), 44 SENSOR(IN_1V2, "1.2V supply", IN, -1),
43 SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1), 45 SENSOR(IN_1V8, "1.8V supply", IN, -1),
44 SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1), 46 SENSOR(IN_2V5, "2.5V supply", IN, -1),
45 SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1), 47 SENSOR(IN_3V3, "3.3V supply", IN, -1),
46 SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1), 48 SENSOR(IN_12V0, "12.0V supply", IN, -1),
47 SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1), 49 SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
48 SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1), 50 SENSOR(IN_VREF, "ref. voltage", IN, -1),
51 SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
52 SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
53 SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
54 SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
55 SENSOR(FAN_0, NULL, COOL, -1),
56 SENSOR(FAN_1, NULL, COOL, -1),
57 SENSOR(FAN_2, NULL, COOL, -1),
58 SENSOR(FAN_3, NULL, COOL, -1),
59 SENSOR(FAN_4, NULL, COOL, -1),
60 SENSOR(IN_VAOE, "AOE input supply", IN, -1),
61 SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
62 SENSOR(IN_IAOE, "AOE input current", CURR, -1),
63 SENSOR(NIC_POWER, "Board power use", POWER, -1),
64 SENSOR(IN_0V9, "0.9V supply", IN, -1),
65 SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
66 SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
67 SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
68 SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
69 SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
70 SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
71 SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
72 SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
73 SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
74 SENSOR(CONTROLLER_VPTAT_EXTADC,
75 "Controller int. temp. raw (at ADC)", IN, -1),
76 SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
77 "Controller int. temp. (via ADC)", TEMP, -1),
78 SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
79 SENSOR(AIRFLOW, "Air flow raw", IN, -1),
49#undef SENSOR 80#undef SENSOR
50}; 81};
51 82
@@ -54,6 +85,7 @@ static const char *const sensor_status_names[] = {
54 [MC_CMD_SENSOR_STATE_WARNING] = "Warning", 85 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
55 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", 86 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
56 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", 87 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
88 [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
57}; 89};
58 90
59void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) 91void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@@ -85,6 +117,7 @@ struct efx_mcdi_mon_attribute {
85 struct device_attribute dev_attr; 117 struct device_attribute dev_attr;
86 unsigned int index; 118 unsigned int index;
87 unsigned int type; 119 unsigned int type;
120 enum efx_hwmon_type hwmon_type;
88 unsigned int limit_value; 121 unsigned int limit_value;
89 char name[12]; 122 char name[12];
90}; 123};
@@ -92,13 +125,12 @@ struct efx_mcdi_mon_attribute {
92static int efx_mcdi_mon_update(struct efx_nic *efx) 125static int efx_mcdi_mon_update(struct efx_nic *efx)
93{ 126{
94 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 127 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
95 u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN]; 128 MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
96 int rc; 129 int rc;
97 130
98 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO, 131 MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
99 hwmon->dma_buf.dma_addr & 0xffffffff); 132 hwmon->dma_buf.dma_addr);
100 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI, 133 MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
101 (u64)hwmon->dma_buf.dma_addr >> 32);
102 134
103 rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, 135 rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
104 inbuf, sizeof(inbuf), NULL, 0, NULL); 136 inbuf, sizeof(inbuf), NULL, 0, NULL);
@@ -146,18 +178,32 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
146 struct efx_mcdi_mon_attribute *mon_attr = 178 struct efx_mcdi_mon_attribute *mon_attr =
147 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); 179 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
148 efx_dword_t entry; 180 efx_dword_t entry;
149 unsigned int value; 181 unsigned int value, state;
150 int rc; 182 int rc;
151 183
152 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); 184 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
153 if (rc) 185 if (rc)
154 return rc; 186 return rc;
155 187
188 state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
189 if (state == MC_CMD_SENSOR_STATE_NO_READING)
190 return -EBUSY;
191
156 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); 192 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
157 193
158 /* Convert temperature from degrees to milli-degrees Celsius */ 194 switch (mon_attr->hwmon_type) {
159 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP) 195 case EFX_HWMON_TEMP:
196 /* Convert temperature from degrees to milli-degrees Celsius */
160 value *= 1000; 197 value *= 1000;
198 break;
199 case EFX_HWMON_POWER:
200 /* Convert power from watts to microwatts */
201 value *= 1000000;
202 break;
203 default:
204 /* No conversion needed */
205 break;
206 }
161 207
162 return sprintf(buf, "%u\n", value); 208 return sprintf(buf, "%u\n", value);
163} 209}
@@ -172,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
172 218
173 value = mon_attr->limit_value; 219 value = mon_attr->limit_value;
174 220
175 /* Convert temperature from degrees to milli-degrees Celsius */ 221 switch (mon_attr->hwmon_type) {
176 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP) 222 case EFX_HWMON_TEMP:
223 /* Convert temperature from degrees to milli-degrees Celsius */
177 value *= 1000; 224 value *= 1000;
225 break;
226 case EFX_HWMON_POWER:
227 /* Convert power from watts to microwatts */
228 value *= 1000000;
229 break;
230 default:
231 /* No conversion needed */
232 break;
233 }
178 234
179 return sprintf(buf, "%u\n", value); 235 return sprintf(buf, "%u\n", value);
180} 236}
@@ -221,6 +277,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
221 strlcpy(attr->name, name, sizeof(attr->name)); 277 strlcpy(attr->name, name, sizeof(attr->name));
222 attr->index = index; 278 attr->index = index;
223 attr->type = type; 279 attr->type = type;
280 if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
281 attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
282 else
283 attr->hwmon_type = EFX_HWMON_UNKNOWN;
224 attr->limit_value = limit_value; 284 attr->limit_value = limit_value;
225 sysfs_attr_init(&attr->dev_attr.attr); 285 sysfs_attr_init(&attr->dev_attr.attr);
226 attr->dev_attr.attr.name = attr->name; 286 attr->dev_attr.attr.name = attr->name;
@@ -234,36 +294,43 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
234 294
235int efx_mcdi_mon_probe(struct efx_nic *efx) 295int efx_mcdi_mon_probe(struct efx_nic *efx)
236{ 296{
297 unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
237 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 298 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
238 unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0; 299 MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
239 u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX]; 300 MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
301 unsigned int n_pages, n_sensors, n_attrs, page;
240 size_t outlen; 302 size_t outlen;
241 char name[12]; 303 char name[12];
242 u32 mask; 304 u32 mask;
243 int rc, i, type; 305 int rc, i, j, type;
244 306
245 BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0); 307 /* Find out how many sensors are present */
308 n_sensors = 0;
309 page = 0;
310 do {
311 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
246 312
247 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0, 313 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
248 outbuf, sizeof(outbuf), &outlen); 314 outbuf, sizeof(outbuf), &outlen);
249 if (rc) 315 if (rc)
250 return rc; 316 return rc;
251 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) 317 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
252 return -EIO; 318 return -EIO;
253 319
254 /* Find out which sensors are present. Don't create a device 320 mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
255 * if there are none. 321 n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
256 */ 322 ++page;
257 mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); 323 } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
258 if (mask == 0) 324 n_pages = page;
325
326 /* Don't create a device if there are none */
327 if (n_sensors == 0)
259 return 0; 328 return 0;
260 329
261 /* Check again for short response */ 330 rc = efx_nic_alloc_buffer(
262 if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) 331 efx, &hwmon->dma_buf,
263 return -EIO; 332 n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
264 333 GFP_KERNEL);
265 rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
266 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
267 if (rc) 334 if (rc)
268 return rc; 335 return rc;
269 336
@@ -274,7 +341,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
274 * attributes for this set of sensors: name of the driver plus 341 * attributes for this set of sensors: name of the driver plus
275 * value, min, max, crit, alarm and label for each sensor. 342 * value, min, max, crit, alarm and label for each sensor.
276 */ 343 */
277 n_attrs = 1 + 6 * hweight32(mask); 344 n_attrs = 1 + 6 * n_sensors;
278 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); 345 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
279 if (!hwmon->attrs) { 346 if (!hwmon->attrs) {
280 rc = -ENOMEM; 347 rc = -ENOMEM;
@@ -291,26 +358,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
291 if (rc) 358 if (rc)
292 goto fail; 359 goto fail;
293 360
294 for (i = 0, type = -1; ; i++) { 361 for (i = 0, j = -1, type = -1; ; i++) {
362 enum efx_hwmon_type hwmon_type;
295 const char *hwmon_prefix; 363 const char *hwmon_prefix;
296 unsigned hwmon_index; 364 unsigned hwmon_index;
297 u16 min1, max1, min2, max2; 365 u16 min1, max1, min2, max2;
298 366
299 /* Find next sensor type or exit if there is none */ 367 /* Find next sensor type or exit if there is none */
300 type++; 368 do {
301 while (!(mask & (1 << type))) {
302 type++; 369 type++;
303 if (type == 32)
304 return 0;
305 }
306 370
307 /* Skip sensors specific to a different port */ 371 if ((type % 32) == 0) {
308 if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN && 372 page = type / 32;
309 efx_mcdi_sensor_type[type].port >= 0 && 373 j = -1;
310 efx_mcdi_sensor_type[type].port != efx_port_num(efx)) 374 if (page == n_pages)
311 continue; 375 return 0;
376
377 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
378 page);
379 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
380 inbuf, sizeof(inbuf),
381 outbuf, sizeof(outbuf),
382 &outlen);
383 if (rc)
384 goto fail;
385 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
386 rc = -EIO;
387 goto fail;
388 }
389
390 mask = (MCDI_DWORD(outbuf,
391 SENSOR_INFO_OUT_MASK) &
392 ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
312 393
313 switch (efx_mcdi_sensor_type[type].hwmon_type) { 394 /* Check again for short response */
395 if (outlen <
396 MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
397 rc = -EIO;
398 goto fail;
399 }
400 }
401 } while (!(mask & (1 << type % 32)));
402 j++;
403
404 if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
405 hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
406
407 /* Skip sensors specific to a different port */
408 if (hwmon_type != EFX_HWMON_UNKNOWN &&
409 efx_mcdi_sensor_type[type].port >= 0 &&
410 efx_mcdi_sensor_type[type].port !=
411 efx_port_num(efx))
412 continue;
413 } else {
414 hwmon_type = EFX_HWMON_UNKNOWN;
415 }
416
417 switch (hwmon_type) {
314 case EFX_HWMON_TEMP: 418 case EFX_HWMON_TEMP:
315 hwmon_prefix = "temp"; 419 hwmon_prefix = "temp";
316 hwmon_index = ++n_temp; /* 1-based */ 420 hwmon_index = ++n_temp; /* 1-based */
@@ -327,16 +431,24 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
327 hwmon_prefix = "in"; 431 hwmon_prefix = "in";
328 hwmon_index = n_in++; /* 0-based */ 432 hwmon_index = n_in++; /* 0-based */
329 break; 433 break;
434 case EFX_HWMON_CURR:
435 hwmon_prefix = "curr";
436 hwmon_index = ++n_curr; /* 1-based */
437 break;
438 case EFX_HWMON_POWER:
439 hwmon_prefix = "power";
440 hwmon_index = ++n_power; /* 1-based */
441 break;
330 } 442 }
331 443
332 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 444 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
333 SENSOR_INFO_ENTRY, i, MIN1); 445 SENSOR_INFO_ENTRY, j, MIN1);
334 max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 446 max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
335 SENSOR_INFO_ENTRY, i, MAX1); 447 SENSOR_INFO_ENTRY, j, MAX1);
336 min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 448 min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
337 SENSOR_INFO_ENTRY, i, MIN2); 449 SENSOR_INFO_ENTRY, j, MIN2);
338 max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 450 max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
339 SENSOR_INFO_ENTRY, i, MAX2); 451 SENSOR_INFO_ENTRY, j, MAX2);
340 452
341 if (min1 != max1) { 453 if (min1 != max1) {
342 snprintf(name, sizeof(name), "%s%u_input", 454 snprintf(name, sizeof(name), "%s%u_input",
@@ -346,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
346 if (rc) 458 if (rc)
347 goto fail; 459 goto fail;
348 460
349 snprintf(name, sizeof(name), "%s%u_min", 461 if (hwmon_type != EFX_HWMON_POWER) {
350 hwmon_prefix, hwmon_index); 462 snprintf(name, sizeof(name), "%s%u_min",
351 rc = efx_mcdi_mon_add_attr( 463 hwmon_prefix, hwmon_index);
352 efx, name, efx_mcdi_mon_show_limit, 464 rc = efx_mcdi_mon_add_attr(
353 i, type, min1); 465 efx, name, efx_mcdi_mon_show_limit,
354 if (rc) 466 i, type, min1);
355 goto fail; 467 if (rc)
468 goto fail;
469 }
356 470
357 snprintf(name, sizeof(name), "%s%u_max", 471 snprintf(name, sizeof(name), "%s%u_max",
358 hwmon_prefix, hwmon_index); 472 hwmon_prefix, hwmon_index);
@@ -383,7 +497,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
383 if (rc) 497 if (rc)
384 goto fail; 498 goto fail;
385 499
386 if (efx_mcdi_sensor_type[type].label) { 500 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
501 efx_mcdi_sensor_type[type].label) {
387 snprintf(name, sizeof(name), "%s%u_label", 502 snprintf(name, sizeof(name), "%s%u_label",
388 hwmon_prefix, hwmon_index); 503 hwmon_prefix, hwmon_index);
389 rc = efx_mcdi_mon_add_attr( 504 rc = efx_mcdi_mon_add_attr(
@@ -400,8 +515,7 @@ fail:
400 515
401void efx_mcdi_mon_remove(struct efx_nic *efx) 516void efx_mcdi_mon_remove(struct efx_nic *efx)
402{ 517{
403 struct siena_nic_data *nic_data = efx->nic_data; 518 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
404 struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
405 unsigned int i; 519 unsigned int i;
406 520
407 for (i = 0; i < hwmon->n_attrs; i++) 521 for (i = 0; i < hwmon->n_attrs; i++)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c5c9747861ba..b5cf62492f8e 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2009-2011 Solarflare Communications Inc. 3 * Copyright 2009-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,13 @@
21#define MC_FW_STATE_BOOTING (4) 21#define MC_FW_STATE_BOOTING (4)
22/* The Scheduler has started. */ 22/* The Scheduler has started. */
23#define MC_FW_STATE_SCHED (8) 23#define MC_FW_STATE_SCHED (8)
24/* If this is set in MC_RESET_STATE_REG then it should be
25 * possible to jump into IMEM without loading code from flash.
26 * Unlike a warm boot, assume DMEM has been reloaded, so that
27 * the MC persistent data must be reinitialised. */
28#define MC_FW_TEPID_BOOT_OK (16)
29/* BIST state has been initialized */
30#define MC_FW_BIST_INIT_OK (128)
24 31
25/* Siena MC shared memmory offsets */ 32/* Siena MC shared memmory offsets */
26/* The 'doorbell' addresses are hard-wired to alert the MC when written */ 33/* The 'doorbell' addresses are hard-wired to alert the MC when written */
@@ -39,18 +46,21 @@
39#define MC_STATUS_DWORD_REBOOT (0xb007b007) 46#define MC_STATUS_DWORD_REBOOT (0xb007b007)
40#define MC_STATUS_DWORD_ASSERT (0xdeaddead) 47#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
41 48
49/* Check whether an mcfw version (in host order) belongs to a bootloader */
50#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
51
42/* The current version of the MCDI protocol. 52/* The current version of the MCDI protocol.
43 * 53 *
44 * Note that the ROM burnt into the card only talks V0, so at the very 54 * Note that the ROM burnt into the card only talks V0, so at the very
45 * least every driver must support version 0 and MCDI_PCOL_VERSION 55 * least every driver must support version 0 and MCDI_PCOL_VERSION
46 */ 56 */
47#define MCDI_PCOL_VERSION 1 57#define MCDI_PCOL_VERSION 2
48 58
49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ 59/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
50 60
51/* MCDI version 1 61/* MCDI version 1
52 * 62 *
53 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte 63 * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
54 * structure, filled in by the client. 64 * structure, filled in by the client.
55 * 65 *
56 * 0 7 8 16 20 22 23 24 31 66 * 0 7 8 16 20 22 23 24 31
@@ -87,9 +97,11 @@
87#define MCDI_HEADER_DATALEN_LBN 8 97#define MCDI_HEADER_DATALEN_LBN 8
88#define MCDI_HEADER_DATALEN_WIDTH 8 98#define MCDI_HEADER_DATALEN_WIDTH 8
89#define MCDI_HEADER_SEQ_LBN 16 99#define MCDI_HEADER_SEQ_LBN 16
90#define MCDI_HEADER_RSVD_LBN 20
91#define MCDI_HEADER_RSVD_WIDTH 2
92#define MCDI_HEADER_SEQ_WIDTH 4 100#define MCDI_HEADER_SEQ_WIDTH 4
101#define MCDI_HEADER_RSVD_LBN 20
102#define MCDI_HEADER_RSVD_WIDTH 1
103#define MCDI_HEADER_NOT_EPOCH_LBN 21
104#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
93#define MCDI_HEADER_ERROR_LBN 22 105#define MCDI_HEADER_ERROR_LBN 22
94#define MCDI_HEADER_ERROR_WIDTH 1 106#define MCDI_HEADER_ERROR_WIDTH 1
95#define MCDI_HEADER_RESPONSE_LBN 23 107#define MCDI_HEADER_RESPONSE_LBN 23
@@ -100,7 +112,11 @@
100#define MCDI_HEADER_XFLAGS_EVREQ 0x01 112#define MCDI_HEADER_XFLAGS_EVREQ 0x01
101 113
102/* Maximum number of payload bytes */ 114/* Maximum number of payload bytes */
103#define MCDI_CTL_SDU_LEN_MAX 0xfc 115#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
116#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
117
118#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
119
104 120
105/* The MC can generate events for two reasons: 121/* The MC can generate events for two reasons:
106 * - To complete a shared memory request if XFLAGS_EVREQ was set 122 * - To complete a shared memory request if XFLAGS_EVREQ was set
@@ -145,22 +161,69 @@
145#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc 161#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
146 162
147 163
164/* Operation not permitted. */
165#define MC_CMD_ERR_EPERM 1
148/* Non-existent command target */ 166/* Non-existent command target */
149#define MC_CMD_ERR_ENOENT 2 167#define MC_CMD_ERR_ENOENT 2
150/* assert() has killed the MC */ 168/* assert() has killed the MC */
151#define MC_CMD_ERR_EINTR 4 169#define MC_CMD_ERR_EINTR 4
170/* I/O failure */
171#define MC_CMD_ERR_EIO 5
172/* Try again */
173#define MC_CMD_ERR_EAGAIN 11
174/* Out of memory */
175#define MC_CMD_ERR_ENOMEM 12
152/* Caller does not hold required locks */ 176/* Caller does not hold required locks */
153#define MC_CMD_ERR_EACCES 13 177#define MC_CMD_ERR_EACCES 13
154/* Resource is currently unavailable (e.g. lock contention) */ 178/* Resource is currently unavailable (e.g. lock contention) */
155#define MC_CMD_ERR_EBUSY 16 179#define MC_CMD_ERR_EBUSY 16
180/* No such device */
181#define MC_CMD_ERR_ENODEV 19
156/* Invalid argument to target */ 182/* Invalid argument to target */
157#define MC_CMD_ERR_EINVAL 22 183#define MC_CMD_ERR_EINVAL 22
184/* Out of range */
185#define MC_CMD_ERR_ERANGE 34
158/* Non-recursive resource is already acquired */ 186/* Non-recursive resource is already acquired */
159#define MC_CMD_ERR_EDEADLK 35 187#define MC_CMD_ERR_EDEADLK 35
160/* Operation not implemented */ 188/* Operation not implemented */
161#define MC_CMD_ERR_ENOSYS 38 189#define MC_CMD_ERR_ENOSYS 38
162/* Operation timed out */ 190/* Operation timed out */
163#define MC_CMD_ERR_ETIME 62 191#define MC_CMD_ERR_ETIME 62
192/* Link has been severed */
193#define MC_CMD_ERR_ENOLINK 67
194/* Protocol error */
195#define MC_CMD_ERR_EPROTO 71
196/* Operation not supported */
197#define MC_CMD_ERR_ENOTSUP 95
198/* Address not available */
199#define MC_CMD_ERR_EADDRNOTAVAIL 99
200/* Not connected */
201#define MC_CMD_ERR_ENOTCONN 107
202/* Operation already in progress */
203#define MC_CMD_ERR_EALREADY 114
204
205/* Resource allocation failed. */
206#define MC_CMD_ERR_ALLOC_FAIL 0x1000
207/* V-adaptor not found. */
208#define MC_CMD_ERR_NO_VADAPTOR 0x1001
209/* EVB port not found. */
210#define MC_CMD_ERR_NO_EVB_PORT 0x1002
211/* V-switch not found. */
212#define MC_CMD_ERR_NO_VSWITCH 0x1003
213/* Too many VLAN tags. */
214#define MC_CMD_ERR_VLAN_LIMIT 0x1004
215/* Bad PCI function number. */
216#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
217/* Invalid VLAN mode. */
218#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
219/* Invalid v-switch type. */
220#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
221/* Invalid v-port type. */
222#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
223/* MAC address exists. */
224#define MC_CMD_ERR_MAC_EXIST 0x1009
225/* Slave core not present */
226#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
164 227
165#define MC_CMD_ERR_CODE_OFST 0 228#define MC_CMD_ERR_CODE_OFST 0
166 229
@@ -178,9 +241,11 @@
178 241
179/* Vectors in the boot ROM */ 242/* Vectors in the boot ROM */
180/* Point to the copycode entry point. */ 243/* Point to the copycode entry point. */
181#define MC_BOOTROM_COPYCODE_VEC (0x7f4) 244#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
245#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
182/* Points to the recovery mode entry point. */ 246/* Points to the recovery mode entry point. */
183#define MC_BOOTROM_NOFLASH_VEC (0x7f8) 247#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
248#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
184 249
185/* The command set exported by the boot ROM (MCDI v0) */ 250/* The command set exported by the boot ROM (MCDI v0) */
186#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ 251#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -209,16 +274,29 @@
209 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) 274 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
210 275
211 276
277/* Version 2 adds an optional argument to error returns: the errno value
278 * may be followed by the (0-based) number of the first argument that
279 * could not be processed.
280 */
281#define MC_CMD_ERR_ARG_OFST 4
282
283/* No space */
284#define MC_CMD_ERR_ENOSPC 28
285
212/* MCDI_EVENT structuredef */ 286/* MCDI_EVENT structuredef */
213#define MCDI_EVENT_LEN 8 287#define MCDI_EVENT_LEN 8
214#define MCDI_EVENT_CONT_LBN 32 288#define MCDI_EVENT_CONT_LBN 32
215#define MCDI_EVENT_CONT_WIDTH 1 289#define MCDI_EVENT_CONT_WIDTH 1
216#define MCDI_EVENT_LEVEL_LBN 33 290#define MCDI_EVENT_LEVEL_LBN 33
217#define MCDI_EVENT_LEVEL_WIDTH 3 291#define MCDI_EVENT_LEVEL_WIDTH 3
218#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */ 292/* enum: Info. */
219#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */ 293#define MCDI_EVENT_LEVEL_INFO 0x0
220#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */ 294/* enum: Warning. */
221#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */ 295#define MCDI_EVENT_LEVEL_WARN 0x1
296/* enum: Error. */
297#define MCDI_EVENT_LEVEL_ERR 0x2
298/* enum: Fatal. */
299#define MCDI_EVENT_LEVEL_FATAL 0x3
222#define MCDI_EVENT_DATA_OFST 0 300#define MCDI_EVENT_DATA_OFST 0
223#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 301#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
224#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 302#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
@@ -230,9 +308,14 @@
230#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 308#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
231#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 309#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
232#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 310#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
233#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */ 311/* enum: 100Mbs */
234#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */ 312#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
235#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */ 313/* enum: 1Gbs */
314#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
315/* enum: 10Gbs */
316#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
317/* enum: 40Gbs */
318#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
236#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 319#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
237#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 320#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
238#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 321#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -247,26 +330,80 @@
247#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 330#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
248#define MCDI_EVENT_FWALERT_REASON_LBN 0 331#define MCDI_EVENT_FWALERT_REASON_LBN 0
249#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 332#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
250#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */ 333/* enum: SRAM Access. */
334#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
251#define MCDI_EVENT_FLR_VF_LBN 0 335#define MCDI_EVENT_FLR_VF_LBN 0
252#define MCDI_EVENT_FLR_VF_WIDTH 8 336#define MCDI_EVENT_FLR_VF_WIDTH 8
253#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 337#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
254#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 338#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
255#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 339#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
256#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 340#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
257#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */ 341/* enum: Descriptor loader reported failure */
258#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */ 342#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
259#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */ 343/* enum: Descriptor ring empty and no EOP seen for packet */
344#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
345/* enum: Overlength packet */
346#define MCDI_EVENT_TX_ERR_2BIG 0x3
347/* enum: Malformed option descriptor */
348#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
349/* enum: Option descriptor part way through a packet */
350#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
351/* enum: DMA or PIO data access error */
352#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
260#define MCDI_EVENT_TX_ERR_INFO_LBN 16 353#define MCDI_EVENT_TX_ERR_INFO_LBN 16
261#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 354#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
355#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
356#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
262#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 357#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
263#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 358#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
264#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 359#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
265#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 360#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
266#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */ 361/* enum: PLL lost lock */
267#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */ 362#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
268#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */ 363/* enum: Filter overflow (PDMA) */
269#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */ 364#define MCDI_EVENT_PTP_ERR_FILTER 0x2
365/* enum: FIFO overflow (FPGA) */
366#define MCDI_EVENT_PTP_ERR_FIFO 0x3
367/* enum: Merge queue overflow */
368#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
369#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
370#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
371/* enum: AOE failed to load - no valid image? */
372#define MCDI_EVENT_AOE_NO_LOAD 0x1
373/* enum: AOE FC reported an exception */
374#define MCDI_EVENT_AOE_FC_ASSERT 0x2
375/* enum: AOE FC watchdogged */
376#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
377/* enum: AOE FC failed to start */
378#define MCDI_EVENT_AOE_FC_NO_START 0x4
379/* enum: Generic AOE fault - likely to have been reported via other means too
380 * but intended for use by aoex driver.
381 */
382#define MCDI_EVENT_AOE_FAULT 0x5
383/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
384#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
385/* enum: AOE loaded successfully */
386#define MCDI_EVENT_AOE_LOAD 0x7
387/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
388#define MCDI_EVENT_AOE_DMA 0x8
389/* enum: AOE byteblaster connected/disconnected (Connection status in
390 * AOE_ERR_DATA)
391 */
392#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
393#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
394#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
395#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
396#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
397#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
398#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
399#define MCDI_EVENT_RX_ERR_INFO_LBN 16
400#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
401#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
402#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
403#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
404#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
405#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
406#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
270#define MCDI_EVENT_DATA_LBN 0 407#define MCDI_EVENT_DATA_LBN 0
271#define MCDI_EVENT_DATA_WIDTH 32 408#define MCDI_EVENT_DATA_WIDTH 32
272#define MCDI_EVENT_SRC_LBN 36 409#define MCDI_EVENT_SRC_LBN 36
@@ -275,21 +412,60 @@
275#define MCDI_EVENT_EV_CODE_WIDTH 4 412#define MCDI_EVENT_EV_CODE_WIDTH 4
276#define MCDI_EVENT_CODE_LBN 44 413#define MCDI_EVENT_CODE_LBN 44
277#define MCDI_EVENT_CODE_WIDTH 8 414#define MCDI_EVENT_CODE_WIDTH 8
278#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */ 415/* enum: Bad assert. */
279#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */ 416#define MCDI_EVENT_CODE_BADSSERT 0x1
280#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */ 417/* enum: PM Notice. */
281#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */ 418#define MCDI_EVENT_CODE_PMNOTICE 0x2
282#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */ 419/* enum: Command done. */
283#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */ 420#define MCDI_EVENT_CODE_CMDDONE 0x3
284#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */ 421/* enum: Link change. */
285#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */ 422#define MCDI_EVENT_CODE_LINKCHANGE 0x4
286#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */ 423/* enum: Sensor Event. */
287#define MCDI_EVENT_CODE_FLR 0xa /* enum */ 424#define MCDI_EVENT_CODE_SENSOREVT 0x5
288#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */ 425/* enum: Schedule error. */
289#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */ 426#define MCDI_EVENT_CODE_SCHEDERR 0x6
290#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */ 427/* enum: Reboot. */
291#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */ 428#define MCDI_EVENT_CODE_REBOOT 0x7
292#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */ 429/* enum: Mac stats DMA. */
430#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
431/* enum: Firmware alert. */
432#define MCDI_EVENT_CODE_FWALERT 0x9
433/* enum: Function level reset. */
434#define MCDI_EVENT_CODE_FLR 0xa
435/* enum: Transmit error */
436#define MCDI_EVENT_CODE_TX_ERR 0xb
437/* enum: Tx flush has completed */
438#define MCDI_EVENT_CODE_TX_FLUSH 0xc
439/* enum: PTP packet received timestamp */
440#define MCDI_EVENT_CODE_PTP_RX 0xd
441/* enum: PTP NIC failure */
442#define MCDI_EVENT_CODE_PTP_FAULT 0xe
443/* enum: PTP PPS event */
444#define MCDI_EVENT_CODE_PTP_PPS 0xf
445/* enum: Rx flush has completed */
446#define MCDI_EVENT_CODE_RX_FLUSH 0x10
447/* enum: Receive error */
448#define MCDI_EVENT_CODE_RX_ERR 0x11
449/* enum: AOE fault */
450#define MCDI_EVENT_CODE_AOE 0x12
451/* enum: Network port calibration failed (VCAL). */
452#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
453/* enum: HW PPS event */
454#define MCDI_EVENT_CODE_HW_PPS 0x14
455/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
456 * a different format)
457 */
458#define MCDI_EVENT_CODE_MC_REBOOT 0x15
459/* enum: the MC has detected a parity error */
460#define MCDI_EVENT_CODE_PAR_ERR 0x16
461/* enum: the MC has detected a correctable error */
462#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
463/* enum: the MC has detected an uncorrectable error */
464#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
465/* enum: Artificial event generated by host and posted via MC for test
466 * purposes.
467 */
468#define MCDI_EVENT_CODE_TESTGEN 0xfa
293#define MCDI_EVENT_CMDDONE_DATA_OFST 0 469#define MCDI_EVENT_CMDDONE_DATA_OFST 0
294#define MCDI_EVENT_CMDDONE_DATA_LBN 0 470#define MCDI_EVENT_CMDDONE_DATA_LBN 0
295#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 471#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -305,15 +481,114 @@
305#define MCDI_EVENT_TX_ERR_DATA_OFST 0 481#define MCDI_EVENT_TX_ERR_DATA_OFST 0
306#define MCDI_EVENT_TX_ERR_DATA_LBN 0 482#define MCDI_EVENT_TX_ERR_DATA_LBN 0
307#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 483#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
484/* Seconds field of timestamp */
308#define MCDI_EVENT_PTP_SECONDS_OFST 0 485#define MCDI_EVENT_PTP_SECONDS_OFST 0
309#define MCDI_EVENT_PTP_SECONDS_LBN 0 486#define MCDI_EVENT_PTP_SECONDS_LBN 0
310#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 487#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
488/* Nanoseconds field of timestamp */
311#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 489#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
312#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 490#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
313#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 491#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
492/* Lowest four bytes of sourceUUID from PTP packet */
314#define MCDI_EVENT_PTP_UUID_OFST 0 493#define MCDI_EVENT_PTP_UUID_OFST 0
315#define MCDI_EVENT_PTP_UUID_LBN 0 494#define MCDI_EVENT_PTP_UUID_LBN 0
316#define MCDI_EVENT_PTP_UUID_WIDTH 32 495#define MCDI_EVENT_PTP_UUID_WIDTH 32
496#define MCDI_EVENT_RX_ERR_DATA_OFST 0
497#define MCDI_EVENT_RX_ERR_DATA_LBN 0
498#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
499#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
500#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
501#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
502#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
503#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
504#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
505#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
506#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
507#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
508
509/* FCDI_EVENT structuredef */
510#define FCDI_EVENT_LEN 8
511#define FCDI_EVENT_CONT_LBN 32
512#define FCDI_EVENT_CONT_WIDTH 1
513#define FCDI_EVENT_LEVEL_LBN 33
514#define FCDI_EVENT_LEVEL_WIDTH 3
515/* enum: Info. */
516#define FCDI_EVENT_LEVEL_INFO 0x0
517/* enum: Warning. */
518#define FCDI_EVENT_LEVEL_WARN 0x1
519/* enum: Error. */
520#define FCDI_EVENT_LEVEL_ERR 0x2
521/* enum: Fatal. */
522#define FCDI_EVENT_LEVEL_FATAL 0x3
523#define FCDI_EVENT_DATA_OFST 0
524#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
525#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
526#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
527#define FCDI_EVENT_LINK_UP 0x1 /* enum */
528#define FCDI_EVENT_DATA_LBN 0
529#define FCDI_EVENT_DATA_WIDTH 32
530#define FCDI_EVENT_SRC_LBN 36
531#define FCDI_EVENT_SRC_WIDTH 8
532#define FCDI_EVENT_EV_CODE_LBN 60
533#define FCDI_EVENT_EV_CODE_WIDTH 4
534#define FCDI_EVENT_CODE_LBN 44
535#define FCDI_EVENT_CODE_WIDTH 8
536/* enum: The FC was rebooted. */
537#define FCDI_EVENT_CODE_REBOOT 0x1
538/* enum: Bad assert. */
539#define FCDI_EVENT_CODE_ASSERT 0x2
540/* enum: DDR3 test result. */
541#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
542/* enum: Link status. */
543#define FCDI_EVENT_CODE_LINK_STATE 0x4
544/* enum: A timed read is ready to be serviced. */
545#define FCDI_EVENT_CODE_TIMED_READ 0x5
546/* enum: One or more PPS IN events */
547#define FCDI_EVENT_CODE_PPS_IN 0x6
548/* enum: One or more PPS OUT events */
549#define FCDI_EVENT_CODE_PPS_OUT 0x7
550#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
551#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
552#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
553#define FCDI_EVENT_ASSERT_TYPE_LBN 36
554#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
555#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
556#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
557#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
558#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
559#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
560#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
561#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
562#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
563#define FCDI_EVENT_PPS_COUNT_OFST 0
564#define FCDI_EVENT_PPS_COUNT_LBN 0
565#define FCDI_EVENT_PPS_COUNT_WIDTH 32
566
567/* FCDI_EXTENDED_EVENT structuredef */
568#define FCDI_EXTENDED_EVENT_LENMIN 16
569#define FCDI_EXTENDED_EVENT_LENMAX 248
570#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
571/* Number of timestamps following */
572#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
573#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
574#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
575/* Seconds field of a timestamp record */
576#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
577#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
578#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
579/* Nanoseconds field of a timestamp record */
580#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
581#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
582#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
583/* Timestamp records comprising the event */
584#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
585#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
586#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
587#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
588#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
589#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
590#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
591#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
317 592
318 593
319/***********************************/ 594/***********************************/
@@ -365,11 +640,27 @@
365 640
366/* MC_CMD_COPYCODE_IN msgrequest */ 641/* MC_CMD_COPYCODE_IN msgrequest */
367#define MC_CMD_COPYCODE_IN_LEN 16 642#define MC_CMD_COPYCODE_IN_LEN 16
643/* Source address */
368#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 644#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
645/* enum: Entering the main image via a copy of a single word from and to this
646 * address indicates that it should not attempt to start the datapath CPUs.
647 * This is useful for certain soft rebooting scenarios. (Huntington only)
648 */
649#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
650/* enum: Entering the main image via a copy of a single word from and to this
651 * address indicates that it should not attempt to parse any configuration from
652 * flash. (In addition, the datapath CPUs will not be started, as for
653 * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
654 * certain soft rebooting scenarios. (Huntington only)
655 */
656#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
657/* Destination address */
369#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 658#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
370#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 659#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
660/* Address of where to jump after copy. */
371#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 661#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
372#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */ 662/* enum: Control should return to the caller rather than jumping */
663#define MC_CMD_COPYCODE_JUMP_NONE 0x1
373 664
374/* MC_CMD_COPYCODE_OUT msgresponse */ 665/* MC_CMD_COPYCODE_OUT msgresponse */
375#define MC_CMD_COPYCODE_OUT_LEN 0 666#define MC_CMD_COPYCODE_OUT_LEN 0
@@ -377,11 +668,13 @@
377 668
378/***********************************/ 669/***********************************/
379/* MC_CMD_SET_FUNC 670/* MC_CMD_SET_FUNC
671 * Select function for function-specific commands.
380 */ 672 */
381#define MC_CMD_SET_FUNC 0x4 673#define MC_CMD_SET_FUNC 0x4
382 674
383/* MC_CMD_SET_FUNC_IN msgrequest */ 675/* MC_CMD_SET_FUNC_IN msgrequest */
384#define MC_CMD_SET_FUNC_IN_LEN 4 676#define MC_CMD_SET_FUNC_IN_LEN 4
677/* Set function */
385#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 678#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
386 679
387/* MC_CMD_SET_FUNC_OUT msgresponse */ 680/* MC_CMD_SET_FUNC_OUT msgresponse */
@@ -390,6 +683,7 @@
390 683
391/***********************************/ 684/***********************************/
392/* MC_CMD_GET_BOOT_STATUS 685/* MC_CMD_GET_BOOT_STATUS
686 * Get the instruction address from which the MC booted.
393 */ 687 */
394#define MC_CMD_GET_BOOT_STATUS 0x5 688#define MC_CMD_GET_BOOT_STATUS 0x5
395 689
@@ -398,7 +692,10 @@
398 692
399/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ 693/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
400#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 694#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
695/* ?? */
401#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 696#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
697/* enum: indicates that the MC wasn't flash booted */
698#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
402#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 699#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
403#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 700#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
404#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 701#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
@@ -410,25 +707,38 @@
410 707
411/***********************************/ 708/***********************************/
412/* MC_CMD_GET_ASSERTS 709/* MC_CMD_GET_ASSERTS
413 * Get and clear any assertion status. 710 * Get (and optionally clear) the current assertion status. Only
711 * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
712 * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
414 */ 713 */
415#define MC_CMD_GET_ASSERTS 0x6 714#define MC_CMD_GET_ASSERTS 0x6
416 715
417/* MC_CMD_GET_ASSERTS_IN msgrequest */ 716/* MC_CMD_GET_ASSERTS_IN msgrequest */
418#define MC_CMD_GET_ASSERTS_IN_LEN 4 717#define MC_CMD_GET_ASSERTS_IN_LEN 4
718/* Set to clear assertion */
419#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 719#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
420 720
421/* MC_CMD_GET_ASSERTS_OUT msgresponse */ 721/* MC_CMD_GET_ASSERTS_OUT msgresponse */
422#define MC_CMD_GET_ASSERTS_OUT_LEN 140 722#define MC_CMD_GET_ASSERTS_OUT_LEN 140
723/* Assertion status flag. */
423#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 724#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
424#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */ 725/* enum: No assertions have failed. */
425#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */ 726#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
426#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */ 727/* enum: A system-level assertion has failed. */
427#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */ 728#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
729/* enum: A thread-level assertion has failed. */
730#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
731/* enum: The system was reset by the watchdog. */
732#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
733/* enum: An illegal address trap stopped the system (huntington and later) */
734#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
735/* Failing PC value */
428#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 736#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
737/* Saved GP regs */
429#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 738#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
430#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 739#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
431#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 740#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
741/* Failing thread address */
432#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 742#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
433#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 743#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
434 744
@@ -441,9 +751,12 @@
441 751
442/* MC_CMD_LOG_CTRL_IN msgrequest */ 752/* MC_CMD_LOG_CTRL_IN msgrequest */
443#define MC_CMD_LOG_CTRL_IN_LEN 8 753#define MC_CMD_LOG_CTRL_IN_LEN 8
754/* Log destination */
444#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 755#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
445#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */ 756/* enum: UART. */
446#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */ 757#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
758/* enum: Event queue. */
759#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
447#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 760#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
448 761
449/* MC_CMD_LOG_CTRL_OUT msgresponse */ 762/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -459,11 +772,20 @@
459/* MC_CMD_GET_VERSION_IN msgrequest */ 772/* MC_CMD_GET_VERSION_IN msgrequest */
460#define MC_CMD_GET_VERSION_IN_LEN 0 773#define MC_CMD_GET_VERSION_IN_LEN 0
461 774
462/* MC_CMD_GET_VERSION_V0_OUT msgresponse */ 775/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
776#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
777/* placeholder, set to 0 */
778#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
779
780/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
463#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 781#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
464#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 782#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
465#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */ 783/* enum: Reserved version number to indicate "any" version. */
466#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */ 784#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
785/* enum: Bootrom version value for Siena. */
786#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
787/* enum: Bootrom version value for Huntington. */
788#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
467 789
468/* MC_CMD_GET_VERSION_OUT msgresponse */ 790/* MC_CMD_GET_VERSION_OUT msgresponse */
469#define MC_CMD_GET_VERSION_OUT_LEN 32 791#define MC_CMD_GET_VERSION_OUT_LEN 32
@@ -471,6 +793,7 @@
471/* Enum values, see field(s): */ 793/* Enum values, see field(s): */
472/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ 794/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
473#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 795#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
796/* 128bit mask of functions supported by the current firmware */
474#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 797#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
475#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 798#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
476#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 799#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
@@ -478,46 +801,22 @@
478#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 801#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
479#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 802#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
480 803
481 804/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
482/***********************************/ 805#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
483/* MC_CMD_GET_FPGAREG 806/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
484 * Read multiple bytes from PTP FPGA. 807/* Enum values, see field(s): */
485 */ 808/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
486#define MC_CMD_GET_FPGAREG 0x9 809#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
487 810/* 128bit mask of functions supported by the current firmware */
488/* MC_CMD_GET_FPGAREG_IN msgrequest */ 811#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
489#define MC_CMD_GET_FPGAREG_IN_LEN 8 812#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
490#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0 813#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
491#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4 814#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
492 815#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
493/* MC_CMD_GET_FPGAREG_OUT msgresponse */ 816#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
494#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1 817/* extra info */
495#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252 818#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
496#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num)) 819#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
500#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
501
502
503/***********************************/
504/* MC_CMD_PUT_FPGAREG
505 * Write multiple bytes to PTP FPGA.
506 */
507#define MC_CMD_PUT_FPGAREG 0xa
508
509/* MC_CMD_PUT_FPGAREG_IN msgrequest */
510#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
511#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
512#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
513#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
517#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
518
519/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
520#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
521 820
522 821
523/***********************************/ 822/***********************************/
@@ -528,32 +827,74 @@
528 827
529/* MC_CMD_PTP_IN msgrequest */ 828/* MC_CMD_PTP_IN msgrequest */
530#define MC_CMD_PTP_IN_LEN 1 829#define MC_CMD_PTP_IN_LEN 1
830/* PTP operation code */
531#define MC_CMD_PTP_IN_OP_OFST 0 831#define MC_CMD_PTP_IN_OP_OFST 0
532#define MC_CMD_PTP_IN_OP_LEN 1 832#define MC_CMD_PTP_IN_OP_LEN 1
533#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */ 833/* enum: Enable PTP packet timestamping operation. */
534#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */ 834#define MC_CMD_PTP_OP_ENABLE 0x1
535#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */ 835/* enum: Disable PTP packet timestamping operation. */
536#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */ 836#define MC_CMD_PTP_OP_DISABLE 0x2
537#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */ 837/* enum: Send a PTP packet. */
538#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */ 838#define MC_CMD_PTP_OP_TRANSMIT 0x3
539#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */ 839/* enum: Read the current NIC time. */
540#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */ 840#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
541#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */ 841/* enum: Get the current PTP status. */
542#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */ 842#define MC_CMD_PTP_OP_STATUS 0x5
543#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */ 843/* enum: Adjust the PTP NIC's time. */
544#define MC_CMD_PTP_OP_MAX 0xc /* enum */ 844#define MC_CMD_PTP_OP_ADJUST 0x6
845/* enum: Synchronize host and NIC time. */
846#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
847/* enum: Basic manufacturing tests. */
848#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
849/* enum: Packet based manufacturing tests. */
850#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
851/* enum: Reset some of the PTP related statistics */
852#define MC_CMD_PTP_OP_RESET_STATS 0xa
853/* enum: Debug operations to MC. */
854#define MC_CMD_PTP_OP_DEBUG 0xb
855/* enum: Read an FPGA register */
856#define MC_CMD_PTP_OP_FPGAREAD 0xc
857/* enum: Write an FPGA register */
858#define MC_CMD_PTP_OP_FPGAWRITE 0xd
859/* enum: Apply an offset to the NIC clock */
860#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
861/* enum: Change Apply an offset to the NIC clock */
862#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
863/* enum: Set the MC packet filter VLAN tags for received PTP packets */
864#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
865/* enum: Set the MC packet filter UUID for received PTP packets */
866#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
867/* enum: Set the MC packet filter Domain for received PTP packets */
868#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
869/* enum: Set the clock source */
870#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
871/* enum: Reset value of Timer Reg. */
872#define MC_CMD_PTP_OP_RST_CLK 0x14
873/* enum: Enable the forwarding of PPS events to the host */
874#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
875/* enum: Above this for future use. */
876#define MC_CMD_PTP_OP_MAX 0x16
545 877
546/* MC_CMD_PTP_IN_ENABLE msgrequest */ 878/* MC_CMD_PTP_IN_ENABLE msgrequest */
547#define MC_CMD_PTP_IN_ENABLE_LEN 16 879#define MC_CMD_PTP_IN_ENABLE_LEN 16
548#define MC_CMD_PTP_IN_CMD_OFST 0 880#define MC_CMD_PTP_IN_CMD_OFST 0
549#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 881#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
882/* Event queue for PTP events */
550#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 883#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
884/* PTP timestamping mode */
551#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 885#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
552#define MC_CMD_PTP_MODE_V1 0x0 /* enum */ 886/* enum: PTP, version 1 */
553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ 887#define MC_CMD_PTP_MODE_V1 0x0
554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */ 888/* enum: PTP, version 1, with VLAN headers - deprecated */
555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ 889#define MC_CMD_PTP_MODE_V1_VLAN 0x1
556#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */ 890/* enum: PTP, version 2 */
891#define MC_CMD_PTP_MODE_V2 0x2
892/* enum: PTP, version 2, with VLAN headers - deprecated */
893#define MC_CMD_PTP_MODE_V2_VLAN 0x3
894/* enum: PTP, version 2, with improved UUID filtering */
895#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
896/* enum: FCoE (seconds and microseconds) */
897#define MC_CMD_PTP_MODE_FCOE 0x5
557 898
558/* MC_CMD_PTP_IN_DISABLE msgrequest */ 899/* MC_CMD_PTP_IN_DISABLE msgrequest */
559#define MC_CMD_PTP_IN_DISABLE_LEN 8 900#define MC_CMD_PTP_IN_DISABLE_LEN 8
@@ -566,7 +907,9 @@
566#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) 907#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
567/* MC_CMD_PTP_IN_CMD_OFST 0 */ 908/* MC_CMD_PTP_IN_CMD_OFST 0 */
568/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 909/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
910/* Transmit packet length */
569#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 911#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
912/* Transmit packet data */
570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 913#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 914#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
572#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 915#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
@@ -586,19 +929,27 @@
586#define MC_CMD_PTP_IN_ADJUST_LEN 24 929#define MC_CMD_PTP_IN_ADJUST_LEN 24
587/* MC_CMD_PTP_IN_CMD_OFST 0 */ 930/* MC_CMD_PTP_IN_CMD_OFST 0 */
588/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 931/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
932/* Frequency adjustment 40 bit fixed point ns */
589#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 933#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
590#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 934#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
591#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 935#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
592#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 936#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
593#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */ 937/* enum: Number of fractional bits in frequency adjustment */
938#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
939/* Time adjustment in seconds */
594#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 940#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
941/* Time adjustment in nanoseconds */
595#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 942#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
596 943
597/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ 944/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
598#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 945#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
599/* MC_CMD_PTP_IN_CMD_OFST 0 */ 946/* MC_CMD_PTP_IN_CMD_OFST 0 */
600/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 947/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
948/* Number of time readings to capture */
601#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 949#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
950/* Host address in which to write "synchronization started" indication (64
951 * bits)
952 */
602#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 953#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
603#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 954#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
604#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 955#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
@@ -613,86 +964,240 @@
613#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 964#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
614/* MC_CMD_PTP_IN_CMD_OFST 0 */ 965/* MC_CMD_PTP_IN_CMD_OFST 0 */
615/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 966/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
967/* Enable or disable packet testing */
616#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 968#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
617 969
618/* MC_CMD_PTP_IN_RESET_STATS msgrequest */ 970/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
619#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 971#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
620/* MC_CMD_PTP_IN_CMD_OFST 0 */ 972/* MC_CMD_PTP_IN_CMD_OFST 0 */
973/* Reset PTP statistics */
621/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 974/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
622 975
623/* MC_CMD_PTP_IN_DEBUG msgrequest */ 976/* MC_CMD_PTP_IN_DEBUG msgrequest */
624#define MC_CMD_PTP_IN_DEBUG_LEN 12 977#define MC_CMD_PTP_IN_DEBUG_LEN 12
625/* MC_CMD_PTP_IN_CMD_OFST 0 */ 978/* MC_CMD_PTP_IN_CMD_OFST 0 */
626/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 979/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
980/* Debug operations */
627#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 981#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
628 982
983/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
984#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
985/* MC_CMD_PTP_IN_CMD_OFST 0 */
986/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
987#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
988#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
989
990/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
991#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
992#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
993#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
994/* MC_CMD_PTP_IN_CMD_OFST 0 */
995/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
996#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
997#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
998#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
999#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
1000#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
1001
1002/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
1003#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
1004/* MC_CMD_PTP_IN_CMD_OFST 0 */
1005/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1006/* Time adjustment in seconds */
1007#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
1008/* Time adjustment in nanoseconds */
1009#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
1010
1011/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
1012#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
1013/* MC_CMD_PTP_IN_CMD_OFST 0 */
1014/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1015/* Frequency adjustment 40 bit fixed point ns */
1016#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
1017#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
1018#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
1019#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
1020/* enum: Number of fractional bits in frequency adjustment */
1021/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
1022
1023/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
1024#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
1025/* MC_CMD_PTP_IN_CMD_OFST 0 */
1026/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1027/* Number of VLAN tags, 0 if not VLAN */
1028#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
1029/* Set of VLAN tags to filter against */
1030#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
1031#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
1032#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
1033
1034/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
1035#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
1036/* MC_CMD_PTP_IN_CMD_OFST 0 */
1037/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1038/* 1 to enable UUID filtering, 0 to disable */
1039#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
1040/* UUID to filter against */
1041#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
1042#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
1043#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
1044#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
1045
1046/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
1047#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
1048/* MC_CMD_PTP_IN_CMD_OFST 0 */
1049/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1050/* 1 to enable Domain filtering, 0 to disable */
1051#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
1052/* Domain number to filter against */
1053#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
1054
1055/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
1056#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
1057/* MC_CMD_PTP_IN_CMD_OFST 0 */
1058/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1059/* Set the clock source. */
1060#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
1061/* enum: Internal. */
1062#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
1063/* enum: External. */
1064#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
1065
1066/* MC_CMD_PTP_IN_RST_CLK msgrequest */
1067#define MC_CMD_PTP_IN_RST_CLK_LEN 8
1068/* MC_CMD_PTP_IN_CMD_OFST 0 */
1069/* Reset value of Timer Reg. */
1070/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1071
1072/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
1073#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
1074/* MC_CMD_PTP_IN_CMD_OFST 0 */
1075/* Enable or disable */
1076#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
1077/* enum: Enable */
1078#define MC_CMD_PTP_ENABLE_PPS 0x0
1079/* enum: Disable */
1080#define MC_CMD_PTP_DISABLE_PPS 0x1
1081/* Queueid to send events back */
1082#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
1083
629/* MC_CMD_PTP_OUT msgresponse */ 1084/* MC_CMD_PTP_OUT msgresponse */
630#define MC_CMD_PTP_OUT_LEN 0 1085#define MC_CMD_PTP_OUT_LEN 0
631 1086
632/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ 1087/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
633#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 1088#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
1089/* Value of seconds timestamp */
634#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 1090#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
1091/* Value of nanoseconds timestamp */
635#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 1092#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
636 1093
637/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ 1094/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
638#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 1095#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
1096/* Value of seconds timestamp */
639#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 1097#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
1098/* Value of nanoseconds timestamp */
640#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 1099#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
641 1100
642/* MC_CMD_PTP_OUT_STATUS msgresponse */ 1101/* MC_CMD_PTP_OUT_STATUS msgresponse */
643#define MC_CMD_PTP_OUT_STATUS_LEN 64 1102#define MC_CMD_PTP_OUT_STATUS_LEN 64
1103/* Frequency of NIC's hardware clock */
644#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 1104#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
1105/* Number of packets transmitted and timestamped */
645#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 1106#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
1107/* Number of packets received and timestamped */
646#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 1108#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
1109/* Number of packets timestamped by the FPGA */
647#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 1110#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
1111/* Number of packets filter matched */
648#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 1112#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
1113/* Number of packets not filter matched */
649#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 1114#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
1115/* Number of PPS overflows (noise on input?) */
650#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 1116#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
1117/* Number of PPS bad periods */
651#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 1118#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
1119/* Minimum period of PPS pulse */
652#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 1120#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
1121/* Maximum period of PPS pulse */
653#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 1122#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
1123/* Last period of PPS pulse */
654#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 1124#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
1125/* Mean period of PPS pulse */
655#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 1126#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
1127/* Minimum offset of PPS pulse (signed) */
656#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 1128#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
1129/* Maximum offset of PPS pulse (signed) */
657#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 1130#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
1131/* Last offset of PPS pulse (signed) */
658#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 1132#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
1133/* Mean offset of PPS pulse (signed) */
659#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 1134#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
660 1135
661/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ 1136/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
662#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 1137#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
663#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 1138#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
664#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) 1139#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
1140/* A set of host and NIC times */
665#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 1141#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
666#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 1142#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
667#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 1143#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
668#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 1144#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
1145/* Host time immediately before NIC's hardware clock read */
669#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 1146#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
1147/* Value of seconds timestamp */
670#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 1148#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
1149/* Value of nanoseconds timestamp */
671#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 1150#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
1151/* Host time immediately after NIC's hardware clock read */
672#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 1152#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
1153/* Number of nanoseconds waited after reading NIC's hardware clock */
673#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 1154#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
674 1155
675/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ 1156/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
676#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 1157#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
1158/* Results of testing */
677#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 1159#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
678#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */ 1160/* enum: Successful test */
679#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */ 1161#define MC_CMD_PTP_MANF_SUCCESS 0x0
680#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */ 1162/* enum: FPGA load failed */
681#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */ 1163#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
682#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */ 1164/* enum: FPGA version invalid */
683#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */ 1165#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
684#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */ 1166/* enum: FPGA registers incorrect */
685#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */ 1167#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
686#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */ 1168/* enum: Oscillator possibly not working? */
687#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */ 1169#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
1170/* enum: Timestamps not increasing */
1171#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
1172/* enum: Mismatched packet count */
1173#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
1174/* enum: Mismatched packet count (Siena filter and FPGA) */
1175#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
1176/* enum: Not enough packets to perform timestamp check */
1177#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
1178/* enum: Timestamp trigger GPIO not working */
1179#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
1180/* Presence of external oscillator */
688#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 1181#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
689 1182
690/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ 1183/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
691#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 1184#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
1185/* Results of testing */
692#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 1186#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
1187/* Number of packets received by FPGA */
693#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 1188#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
1189/* Number of packets received by Siena filters */
694#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 1190#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
695 1191
1192/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
1193#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
1194#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
1195#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
1196#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
1197#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
1198#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
1199#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
1200
696 1201
697/***********************************/ 1202/***********************************/
698/* MC_CMD_CSR_READ32 1203/* MC_CMD_CSR_READ32
@@ -702,6 +1207,7 @@
702 1207
703/* MC_CMD_CSR_READ32_IN msgrequest */ 1208/* MC_CMD_CSR_READ32_IN msgrequest */
704#define MC_CMD_CSR_READ32_IN_LEN 12 1209#define MC_CMD_CSR_READ32_IN_LEN 12
1210/* Address */
705#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 1211#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
706#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 1212#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
707#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 1213#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
@@ -710,6 +1216,7 @@
710#define MC_CMD_CSR_READ32_OUT_LENMIN 4 1216#define MC_CMD_CSR_READ32_OUT_LENMIN 4
711#define MC_CMD_CSR_READ32_OUT_LENMAX 252 1217#define MC_CMD_CSR_READ32_OUT_LENMAX 252
712#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) 1218#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
1219/* The last dword is the status, not a value read */
713#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 1220#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
714#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 1221#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
715#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 1222#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
@@ -726,6 +1233,7 @@
726#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 1233#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
727#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 1234#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
728#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) 1235#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
1236/* Address */
729#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 1237#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
730#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 1238#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
731#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 1239#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
@@ -739,6 +1247,48 @@
739 1247
740 1248
741/***********************************/ 1249/***********************************/
1250/* MC_CMD_HP
1251 * These commands are used for HP related features. They are grouped under one
1252 * MCDI command to avoid creating too many MCDI commands.
1253 */
1254#define MC_CMD_HP 0x54
1255
1256/* MC_CMD_HP_IN msgrequest */
1257#define MC_CMD_HP_IN_LEN 16
1258/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
1259 * the specified address with the specified interval.When address is NULL,
1260 * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
1261 * state / 2: (debug) Show temperature reported by one of the supported
1262 * sensors.
1263 */
1264#define MC_CMD_HP_IN_SUBCMD_OFST 0
1265/* enum: OCSD (Option Card Sensor Data) sub-command. */
1266#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
1267/* enum: Last known valid HP sub-command. */
1268#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
1269/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
1270 */
1271#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
1272#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
1273#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
1274#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
1275/* The requested update interval, in seconds. (Or the sub-command if ADDR is
1276 * NULL.)
1277 */
1278#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
1279
1280/* MC_CMD_HP_OUT msgresponse */
1281#define MC_CMD_HP_OUT_LEN 4
1282#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
1283/* enum: OCSD stopped for this card. */
1284#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
1285/* enum: OCSD was successfully started with the address provided. */
1286#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
1287/* enum: OCSD was already started for this card. */
1288#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
1289
1290
1291/***********************************/
742/* MC_CMD_STACKINFO 1292/* MC_CMD_STACKINFO
743 * Get stack information. 1293 * Get stack information.
744 */ 1294 */
@@ -751,6 +1301,7 @@
751#define MC_CMD_STACKINFO_OUT_LENMIN 12 1301#define MC_CMD_STACKINFO_OUT_LENMIN 12
752#define MC_CMD_STACKINFO_OUT_LENMAX 252 1302#define MC_CMD_STACKINFO_OUT_LENMAX 252
753#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) 1303#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
1304/* (thread ptr, stack size, free space) for each thread in system */
754#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 1305#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
755#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 1306#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
756#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 1307#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
@@ -765,19 +1316,35 @@
765 1316
766/* MC_CMD_MDIO_READ_IN msgrequest */ 1317/* MC_CMD_MDIO_READ_IN msgrequest */
767#define MC_CMD_MDIO_READ_IN_LEN 16 1318#define MC_CMD_MDIO_READ_IN_LEN 16
1319/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
1320 * external devices.
1321 */
768#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 1322#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
769#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */ 1323/* enum: Internal. */
770#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */ 1324#define MC_CMD_MDIO_BUS_INTERNAL 0x0
1325/* enum: External. */
1326#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
1327/* Port address */
771#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 1328#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
1329/* Device Address or clause 22. */
772#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 1330#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
773#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */ 1331/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
1332 * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
1333 */
1334#define MC_CMD_MDIO_CLAUSE22 0x20
1335/* Address */
774#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 1336#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
775 1337
776/* MC_CMD_MDIO_READ_OUT msgresponse */ 1338/* MC_CMD_MDIO_READ_OUT msgresponse */
777#define MC_CMD_MDIO_READ_OUT_LEN 8 1339#define MC_CMD_MDIO_READ_OUT_LEN 8
1340/* Value */
778#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 1341#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
1342/* Status the MDIO commands return the raw status bits from the MDIO block. A
1343 * "good" transaction should have the DONE bit set and all other bits clear.
1344 */
779#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 1345#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
780#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */ 1346/* enum: Good. */
1347#define MC_CMD_MDIO_STATUS_GOOD 0x8
781 1348
782 1349
783/***********************************/ 1350/***********************************/
@@ -788,18 +1355,34 @@
788 1355
789/* MC_CMD_MDIO_WRITE_IN msgrequest */ 1356/* MC_CMD_MDIO_WRITE_IN msgrequest */
790#define MC_CMD_MDIO_WRITE_IN_LEN 20 1357#define MC_CMD_MDIO_WRITE_IN_LEN 20
1358/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
1359 * external devices.
1360 */
791#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 1361#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
1362/* enum: Internal. */
792/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ 1363/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
1364/* enum: External. */
793/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ 1365/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
1366/* Port address */
794#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 1367#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
1368/* Device Address or clause 22. */
795#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 1369#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
1370/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
1371 * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
1372 */
796/* MC_CMD_MDIO_CLAUSE22 0x20 */ 1373/* MC_CMD_MDIO_CLAUSE22 0x20 */
1374/* Address */
797#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 1375#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
1376/* Value */
798#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 1377#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
799 1378
800/* MC_CMD_MDIO_WRITE_OUT msgresponse */ 1379/* MC_CMD_MDIO_WRITE_OUT msgresponse */
801#define MC_CMD_MDIO_WRITE_OUT_LEN 4 1380#define MC_CMD_MDIO_WRITE_OUT_LEN 4
1381/* Status; the MDIO commands return the raw status bits from the MDIO block. A
1382 * "good" transaction should have the DONE bit set and all other bits clear.
1383 */
802#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 1384#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
1385/* enum: Good. */
803/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ 1386/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
804 1387
805 1388
@@ -813,6 +1396,9 @@
813#define MC_CMD_DBI_WRITE_IN_LENMIN 12 1396#define MC_CMD_DBI_WRITE_IN_LENMIN 12
814#define MC_CMD_DBI_WRITE_IN_LENMAX 252 1397#define MC_CMD_DBI_WRITE_IN_LENMAX 252
815#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) 1398#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
1399/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
1400 * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
1401 */
816#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 1402#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
817#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 1403#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
818#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 1404#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
@@ -826,9 +1412,15 @@
826#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 1412#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
827#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 1413#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
828#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 1414#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
829#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4 1415#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
830#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32 1416#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
831#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32 1417#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
1418#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
1419#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
1420#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
1421#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
1422#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
1423#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
832#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 1424#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
833#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 1425#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
834#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 1426#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -836,69 +1428,111 @@
836 1428
837/***********************************/ 1429/***********************************/
838/* MC_CMD_PORT_READ32 1430/* MC_CMD_PORT_READ32
839 * Read a 32-bit register from the indirect port register map. 1431 * Read a 32-bit register from the indirect port register map. The port to
1432 * access is implied by the Shared memory channel used.
840 */ 1433 */
841#define MC_CMD_PORT_READ32 0x14 1434#define MC_CMD_PORT_READ32 0x14
842 1435
843/* MC_CMD_PORT_READ32_IN msgrequest */ 1436/* MC_CMD_PORT_READ32_IN msgrequest */
844#define MC_CMD_PORT_READ32_IN_LEN 4 1437#define MC_CMD_PORT_READ32_IN_LEN 4
1438/* Address */
845#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 1439#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
846 1440
847/* MC_CMD_PORT_READ32_OUT msgresponse */ 1441/* MC_CMD_PORT_READ32_OUT msgresponse */
848#define MC_CMD_PORT_READ32_OUT_LEN 8 1442#define MC_CMD_PORT_READ32_OUT_LEN 8
1443/* Value */
849#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 1444#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
1445/* Status */
850#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 1446#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
851 1447
852 1448
853/***********************************/ 1449/***********************************/
854/* MC_CMD_PORT_WRITE32 1450/* MC_CMD_PORT_WRITE32
855 * Write a 32-bit register to the indirect port register map. 1451 * Write a 32-bit register to the indirect port register map. The port to
1452 * access is implied by the Shared memory channel used.
856 */ 1453 */
857#define MC_CMD_PORT_WRITE32 0x15 1454#define MC_CMD_PORT_WRITE32 0x15
858 1455
859/* MC_CMD_PORT_WRITE32_IN msgrequest */ 1456/* MC_CMD_PORT_WRITE32_IN msgrequest */
860#define MC_CMD_PORT_WRITE32_IN_LEN 8 1457#define MC_CMD_PORT_WRITE32_IN_LEN 8
1458/* Address */
861#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 1459#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
1460/* Value */
862#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 1461#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
863 1462
864/* MC_CMD_PORT_WRITE32_OUT msgresponse */ 1463/* MC_CMD_PORT_WRITE32_OUT msgresponse */
865#define MC_CMD_PORT_WRITE32_OUT_LEN 4 1464#define MC_CMD_PORT_WRITE32_OUT_LEN 4
1465/* Status */
866#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 1466#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
867 1467
868 1468
869/***********************************/ 1469/***********************************/
870/* MC_CMD_PORT_READ128 1470/* MC_CMD_PORT_READ128
871 * Read a 128-bit register from the indirect port register map. 1471 * Read a 128-bit register from the indirect port register map. The port to
1472 * access is implied by the Shared memory channel used.
872 */ 1473 */
873#define MC_CMD_PORT_READ128 0x16 1474#define MC_CMD_PORT_READ128 0x16
874 1475
875/* MC_CMD_PORT_READ128_IN msgrequest */ 1476/* MC_CMD_PORT_READ128_IN msgrequest */
876#define MC_CMD_PORT_READ128_IN_LEN 4 1477#define MC_CMD_PORT_READ128_IN_LEN 4
1478/* Address */
877#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 1479#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
878 1480
879/* MC_CMD_PORT_READ128_OUT msgresponse */ 1481/* MC_CMD_PORT_READ128_OUT msgresponse */
880#define MC_CMD_PORT_READ128_OUT_LEN 20 1482#define MC_CMD_PORT_READ128_OUT_LEN 20
1483/* Value */
881#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 1484#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
882#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 1485#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
1486/* Status */
883#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 1487#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
884 1488
885 1489
886/***********************************/ 1490/***********************************/
887/* MC_CMD_PORT_WRITE128 1491/* MC_CMD_PORT_WRITE128
888 * Write a 128-bit register to the indirect port register map. 1492 * Write a 128-bit register to the indirect port register map. The port to
1493 * access is implied by the Shared memory channel used.
889 */ 1494 */
890#define MC_CMD_PORT_WRITE128 0x17 1495#define MC_CMD_PORT_WRITE128 0x17
891 1496
892/* MC_CMD_PORT_WRITE128_IN msgrequest */ 1497/* MC_CMD_PORT_WRITE128_IN msgrequest */
893#define MC_CMD_PORT_WRITE128_IN_LEN 20 1498#define MC_CMD_PORT_WRITE128_IN_LEN 20
1499/* Address */
894#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 1500#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
1501/* Value */
895#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 1502#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
896#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 1503#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
897 1504
898/* MC_CMD_PORT_WRITE128_OUT msgresponse */ 1505/* MC_CMD_PORT_WRITE128_OUT msgresponse */
899#define MC_CMD_PORT_WRITE128_OUT_LEN 4 1506#define MC_CMD_PORT_WRITE128_OUT_LEN 4
1507/* Status */
900#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 1508#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
901 1509
1510/* MC_CMD_CAPABILITIES structuredef */
1511#define MC_CMD_CAPABILITIES_LEN 4
1512/* Small buf table. */
1513#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
1514#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
1515/* Turbo mode (for Maranello). */
1516#define MC_CMD_CAPABILITIES_TURBO_LBN 1
1517#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
1518/* Turbo mode active (for Maranello). */
1519#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
1520#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
1521/* PTP offload. */
1522#define MC_CMD_CAPABILITIES_PTP_LBN 3
1523#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
1524/* AOE mode. */
1525#define MC_CMD_CAPABILITIES_AOE_LBN 4
1526#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
1527/* AOE mode active. */
1528#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
1529#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
1530/* AOE mode active. */
1531#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
1532#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
1533#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
1534#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
1535
902 1536
903/***********************************/ 1537/***********************************/
904/* MC_CMD_GET_BOARD_CFG 1538/* MC_CMD_GET_BOARD_CFG
@@ -916,18 +1550,10 @@
916#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 1550#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
917#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 1551#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
918#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 1552#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
1553/* See MC_CMD_CAPABILITIES */
919#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 1554#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
920#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */ 1555/* See MC_CMD_CAPABILITIES */
921#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
922#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
923#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
924#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
925#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
926#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
927#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
928#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 1556#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
929/* Enum values, see field(s): */
930/* CAPABILITIES_PORT0 */
931#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 1557#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
932#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 1558#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
933#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 1559#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
@@ -936,6 +1562,11 @@
936#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 1562#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
937#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 1563#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
938#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 1564#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
1565/* This field contains a 16-bit value for each of the types of NVRAM area. The
1566 * values are defined in the firmware/mc/platform/.c file for a specific board
1567 * type, but otherwise have no meaning to the MC; they are used by the driver
1568 * to manage selection of appropriate firmware updates.
1569 */
939#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 1570#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
940#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 1571#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
941#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 1572#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
@@ -944,7 +1575,7 @@
944 1575
945/***********************************/ 1576/***********************************/
946/* MC_CMD_DBI_READX 1577/* MC_CMD_DBI_READX
947 * Read DBI register(s). 1578 * Read DBI register(s) -- extended functionality
948 */ 1579 */
949#define MC_CMD_DBI_READX 0x19 1580#define MC_CMD_DBI_READX 0x19
950 1581
@@ -952,6 +1583,7 @@
952#define MC_CMD_DBI_READX_IN_LENMIN 8 1583#define MC_CMD_DBI_READX_IN_LENMIN 8
953#define MC_CMD_DBI_READX_IN_LENMAX 248 1584#define MC_CMD_DBI_READX_IN_LENMAX 248
954#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) 1585#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
1586/* Each Read op consists of an address (offset 0), VF/CS2) */
955#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 1587#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
956#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 1588#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
957#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 1589#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
@@ -963,11 +1595,27 @@
963#define MC_CMD_DBI_READX_OUT_LENMIN 4 1595#define MC_CMD_DBI_READX_OUT_LENMIN 4
964#define MC_CMD_DBI_READX_OUT_LENMAX 252 1596#define MC_CMD_DBI_READX_OUT_LENMAX 252
965#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) 1597#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
1598/* Value */
966#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 1599#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
967#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 1600#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
968#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 1601#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
969#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 1602#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
970 1603
1604/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
1605#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
1606#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
1607#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
1608#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
1609#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
1610#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
1611#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
1612#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
1613#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
1614#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
1615#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
1616#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
1617#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
1618
971 1619
972/***********************************/ 1620/***********************************/
973/* MC_CMD_SET_RAND_SEED 1621/* MC_CMD_SET_RAND_SEED
@@ -977,6 +1625,7 @@
977 1625
978/* MC_CMD_SET_RAND_SEED_IN msgrequest */ 1626/* MC_CMD_SET_RAND_SEED_IN msgrequest */
979#define MC_CMD_SET_RAND_SEED_IN_LEN 16 1627#define MC_CMD_SET_RAND_SEED_IN_LEN 16
1628/* Seed value. */
980#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 1629#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
981#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 1630#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
982 1631
@@ -986,7 +1635,7 @@
986 1635
987/***********************************/ 1636/***********************************/
988/* MC_CMD_LTSSM_HIST 1637/* MC_CMD_LTSSM_HIST
989 * Retrieve the history of the PCIE LTSSM. 1638 * Retrieve the history of the LTSSM, if the build supports it.
990 */ 1639 */
991#define MC_CMD_LTSSM_HIST 0x1b 1640#define MC_CMD_LTSSM_HIST 0x1b
992 1641
@@ -997,6 +1646,7 @@
997#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 1646#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
998#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 1647#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
999#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) 1648#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
1649/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
1000#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 1650#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
1001#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 1651#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
1002#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 1652#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
@@ -1005,41 +1655,47 @@
1005 1655
1006/***********************************/ 1656/***********************************/
1007/* MC_CMD_DRV_ATTACH 1657/* MC_CMD_DRV_ATTACH
1008 * Inform MCPU that this port is managed on the host. 1658 * Inform MCPU that this port is managed on the host (i.e. driver active). For
1659 * Huntington, also request the preferred datapath firmware to use if possible
1660 * (it may not be possible for this request to be fulfilled; the driver must
1661 * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
1662 * features are actually available). The FIRMWARE_ID field is ignored by older
1663 * platforms.
1009 */ 1664 */
1010#define MC_CMD_DRV_ATTACH 0x1c 1665#define MC_CMD_DRV_ATTACH 0x1c
1011 1666
1012/* MC_CMD_DRV_ATTACH_IN msgrequest */ 1667/* MC_CMD_DRV_ATTACH_IN msgrequest */
1013#define MC_CMD_DRV_ATTACH_IN_LEN 8 1668#define MC_CMD_DRV_ATTACH_IN_LEN 12
1669/* new state (0=detached, 1=attached) to set if UPDATE=1 */
1014#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 1670#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
1671/* 1 to set new state, or 0 to just report the existing state */
1015#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 1672#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
1673/* preferred datapath firmware (for Huntington; ignored for Siena) */
1674#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
1675/* enum: Prefer to use full featured firmware */
1676#define MC_CMD_FW_FULL_FEATURED 0x0
1677/* enum: Prefer to use firmware with fewer features but lower latency */
1678#define MC_CMD_FW_LOW_LATENCY 0x1
1016 1679
1017/* MC_CMD_DRV_ATTACH_OUT msgresponse */ 1680/* MC_CMD_DRV_ATTACH_OUT msgresponse */
1018#define MC_CMD_DRV_ATTACH_OUT_LEN 4 1681#define MC_CMD_DRV_ATTACH_OUT_LEN 4
1682/* previous or existing state (0=detached, 1=attached) */
1019#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 1683#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
1020 1684
1021 1685/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
1022/***********************************/ 1686#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
1023/* MC_CMD_NCSI_PROD 1687/* previous or existing state (0=detached, 1=attached) */
1024 * Trigger an NC-SI event. 1688#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
1689/* Flags associated with this function */
1690#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
1691/* enum: Labels the lowest-numbered function visible to the OS */
1692#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
1693/* enum: The function can control the link state of the physical port it is
1694 * bound to.
1025 */ 1695 */
1026#define MC_CMD_NCSI_PROD 0x1d 1696#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
1027 1697/* enum: The function can perform privileged operations */
1028/* MC_CMD_NCSI_PROD_IN msgrequest */ 1698#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
1029#define MC_CMD_NCSI_PROD_IN_LEN 4
1030#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
1031#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
1032#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
1033#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
1034#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
1035#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
1036#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
1037#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
1038#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
1039#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
1040
1041/* MC_CMD_NCSI_PROD_OUT msgresponse */
1042#define MC_CMD_NCSI_PROD_OUT_LEN 0
1043 1699
1044 1700
1045/***********************************/ 1701/***********************************/
@@ -1050,6 +1706,7 @@
1050 1706
1051/* MC_CMD_SHMUART_IN msgrequest */ 1707/* MC_CMD_SHMUART_IN msgrequest */
1052#define MC_CMD_SHMUART_IN_LEN 4 1708#define MC_CMD_SHMUART_IN_LEN 4
1709/* ??? */
1053#define MC_CMD_SHMUART_IN_FLAG_OFST 0 1710#define MC_CMD_SHMUART_IN_FLAG_OFST 0
1054 1711
1055/* MC_CMD_SHMUART_OUT msgresponse */ 1712/* MC_CMD_SHMUART_OUT msgresponse */
@@ -1057,13 +1714,33 @@
1057 1714
1058 1715
1059/***********************************/ 1716/***********************************/
1717/* MC_CMD_PORT_RESET
1718 * Generic per-port reset. There is no equivalent for per-board reset. Locks
1719 * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
1720 * use MC_CMD_ENTITY_RESET instead.
1721 */
1722#define MC_CMD_PORT_RESET 0x20
1723
1724/* MC_CMD_PORT_RESET_IN msgrequest */
1725#define MC_CMD_PORT_RESET_IN_LEN 0
1726
1727/* MC_CMD_PORT_RESET_OUT msgresponse */
1728#define MC_CMD_PORT_RESET_OUT_LEN 0
1729
1730
1731/***********************************/
1060/* MC_CMD_ENTITY_RESET 1732/* MC_CMD_ENTITY_RESET
1061 * Generic per-port reset. 1733 * Generic per-resource reset. There is no equivalent for per-board reset.
1734 * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
1735 * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
1062 */ 1736 */
1063#define MC_CMD_ENTITY_RESET 0x20 1737#define MC_CMD_ENTITY_RESET 0x20
1064 1738
1065/* MC_CMD_ENTITY_RESET_IN msgrequest */ 1739/* MC_CMD_ENTITY_RESET_IN msgrequest */
1066#define MC_CMD_ENTITY_RESET_IN_LEN 4 1740#define MC_CMD_ENTITY_RESET_IN_LEN 4
1741/* Optional flags field. Omitting this will perform a "legacy" reset action
1742 * (TBD).
1743 */
1067#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 1744#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
1068#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 1745#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
1069#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 1746#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -1080,7 +1757,9 @@
1080 1757
1081/* MC_CMD_PCIE_CREDITS_IN msgrequest */ 1758/* MC_CMD_PCIE_CREDITS_IN msgrequest */
1082#define MC_CMD_PCIE_CREDITS_IN_LEN 8 1759#define MC_CMD_PCIE_CREDITS_IN_LEN 8
1760/* poll period. 0 is disabled */
1083#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 1761#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
1762/* wipe statistics */
1084#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 1763#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
1085 1764
1086/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ 1765/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
@@ -1141,7 +1820,7 @@
1141 1820
1142/***********************************/ 1821/***********************************/
1143/* MC_CMD_PUTS 1822/* MC_CMD_PUTS
1144 * puts(3) implementation over MCDI 1823 * Copy the given ASCII string out onto UART and/or out of the network port.
1145 */ 1824 */
1146#define MC_CMD_PUTS 0x23 1825#define MC_CMD_PUTS 0x23
1147 1826
@@ -1167,7 +1846,8 @@
1167 1846
1168/***********************************/ 1847/***********************************/
1169/* MC_CMD_GET_PHY_CFG 1848/* MC_CMD_GET_PHY_CFG
1170 * Report PHY configuration. 1849 * Report PHY configuration. This guarantees to succeed even if the PHY is in a
1850 * 'zombie' state. Locks required: None
1171 */ 1851 */
1172#define MC_CMD_GET_PHY_CFG 0x24 1852#define MC_CMD_GET_PHY_CFG 0x24
1173 1853
@@ -1176,6 +1856,7 @@
1176 1856
1177/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ 1857/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
1178#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 1858#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
1859/* flags */
1179#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 1860#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
1180#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 1861#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
1181#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 1862#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
@@ -1191,7 +1872,9 @@
1191#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 1872#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
1192#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 1873#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
1193#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 1874#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
1875/* ?? */
1194#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 1876#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
1877/* Bitmask of supported capabilities */
1195#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 1878#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
1196#define MC_CMD_PHY_CAP_10HDX_LBN 1 1879#define MC_CMD_PHY_CAP_10HDX_LBN 1
1197#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 1880#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
@@ -1213,20 +1896,36 @@
1213#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 1896#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
1214#define MC_CMD_PHY_CAP_AN_LBN 10 1897#define MC_CMD_PHY_CAP_AN_LBN 10
1215#define MC_CMD_PHY_CAP_AN_WIDTH 1 1898#define MC_CMD_PHY_CAP_AN_WIDTH 1
1899#define MC_CMD_PHY_CAP_40000FDX_LBN 11
1900#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
1901#define MC_CMD_PHY_CAP_DDM_LBN 12
1902#define MC_CMD_PHY_CAP_DDM_WIDTH 1
1903/* ?? */
1216#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 1904#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
1905/* ?? */
1217#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 1906#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
1907/* ?? */
1218#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 1908#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
1909/* ?? */
1219#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 1910#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
1220#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 1911#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
1912/* ?? */
1221#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 1913#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
1222#define MC_CMD_MEDIA_XAUI 0x1 /* enum */ 1914/* enum: Xaui. */
1223#define MC_CMD_MEDIA_CX4 0x2 /* enum */ 1915#define MC_CMD_MEDIA_XAUI 0x1
1224#define MC_CMD_MEDIA_KX4 0x3 /* enum */ 1916/* enum: CX4. */
1225#define MC_CMD_MEDIA_XFP 0x4 /* enum */ 1917#define MC_CMD_MEDIA_CX4 0x2
1226#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */ 1918/* enum: KX4. */
1227#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */ 1919#define MC_CMD_MEDIA_KX4 0x3
1920/* enum: XFP Far. */
1921#define MC_CMD_MEDIA_XFP 0x4
1922/* enum: SFP+. */
1923#define MC_CMD_MEDIA_SFP_PLUS 0x5
1924/* enum: 10GBaseT. */
1925#define MC_CMD_MEDIA_BASE_T 0x6
1228#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 1926#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
1229#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */ 1927/* enum: Native clause 22 */
1928#define MC_CMD_MMD_CLAUSE22 0x0
1230#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ 1929#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
1231#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ 1930#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
1232#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ 1931#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
@@ -1234,7 +1933,8 @@
1234#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ 1933#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
1235#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ 1934#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
1236#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ 1935#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
1237#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */ 1936/* enum: Clause22 proxied over clause45 by PHY. */
1937#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
1238#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ 1938#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
1239#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ 1939#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
1240#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 1940#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
@@ -1243,18 +1943,31 @@
1243 1943
1244/***********************************/ 1944/***********************************/
1245/* MC_CMD_START_BIST 1945/* MC_CMD_START_BIST
1246 * Start a BIST test on the PHY. 1946 * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
1947 * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
1247 */ 1948 */
1248#define MC_CMD_START_BIST 0x25 1949#define MC_CMD_START_BIST 0x25
1249 1950
1250/* MC_CMD_START_BIST_IN msgrequest */ 1951/* MC_CMD_START_BIST_IN msgrequest */
1251#define MC_CMD_START_BIST_IN_LEN 4 1952#define MC_CMD_START_BIST_IN_LEN 4
1953/* Type of test. */
1252#define MC_CMD_START_BIST_IN_TYPE_OFST 0 1954#define MC_CMD_START_BIST_IN_TYPE_OFST 0
1253#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */ 1955/* enum: Run the PHY's short cable BIST. */
1254#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */ 1956#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
1255#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */ 1957/* enum: Run the PHY's long cable BIST. */
1256#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */ 1958#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
1257#define MC_CMD_PHY_BIST 0x5 /* enum */ 1959/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
1960#define MC_CMD_BPX_SERDES_BIST 0x3
1961/* enum: Run the MC loopback tests. */
1962#define MC_CMD_MC_LOOPBACK_BIST 0x4
1963/* enum: Run the PHY's standard BIST. */
1964#define MC_CMD_PHY_BIST 0x5
1965/* enum: Run MC RAM test. */
1966#define MC_CMD_MC_MEM_BIST 0x6
1967/* enum: Run Port RAM test. */
1968#define MC_CMD_PORT_MEM_BIST 0x7
1969/* enum: Run register test. */
1970#define MC_CMD_REG_BIST 0x8
1258 1971
1259/* MC_CMD_START_BIST_OUT msgresponse */ 1972/* MC_CMD_START_BIST_OUT msgresponse */
1260#define MC_CMD_START_BIST_OUT_LEN 0 1973#define MC_CMD_START_BIST_OUT_LEN 0
@@ -1262,7 +1975,12 @@
1262 1975
1263/***********************************/ 1976/***********************************/
1264/* MC_CMD_POLL_BIST 1977/* MC_CMD_POLL_BIST
1265 * Poll for BIST completion. 1978 * Poll for BIST completion. Returns a single status code, and optionally some
1979 * PHY specific bist output. The driver should only consume the BIST output
1980 * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
1981 * successfully parse the BIST output, it should still respect the pass/Fail in
1982 * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
1983 * EACCES (if PHY_LOCK is not held).
1266 */ 1984 */
1267#define MC_CMD_POLL_BIST 0x26 1985#define MC_CMD_POLL_BIST 0x26
1268 1986
@@ -1271,15 +1989,21 @@
1271 1989
1272/* MC_CMD_POLL_BIST_OUT msgresponse */ 1990/* MC_CMD_POLL_BIST_OUT msgresponse */
1273#define MC_CMD_POLL_BIST_OUT_LEN 8 1991#define MC_CMD_POLL_BIST_OUT_LEN 8
1992/* result */
1274#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 1993#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
1275#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */ 1994/* enum: Running. */
1276#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */ 1995#define MC_CMD_POLL_BIST_RUNNING 0x1
1277#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */ 1996/* enum: Passed. */
1278#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */ 1997#define MC_CMD_POLL_BIST_PASSED 0x2
1998/* enum: Failed. */
1999#define MC_CMD_POLL_BIST_FAILED 0x3
2000/* enum: Timed-out. */
2001#define MC_CMD_POLL_BIST_TIMEOUT 0x4
1279#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 2002#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
1280 2003
1281/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ 2004/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
1282#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 2005#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
2006/* result */
1283/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ 2007/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
1284/* Enum values, see field(s): */ 2008/* Enum values, see field(s): */
1285/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ 2009/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
@@ -1287,42 +2011,116 @@
1287#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 2011#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
1288#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 2012#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
1289#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 2013#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
2014/* Status of each channel A */
1290#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 2015#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
1291#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */ 2016/* enum: Ok. */
1292#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */ 2017#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
1293#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */ 2018/* enum: Open. */
1294#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */ 2019#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
1295#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */ 2020/* enum: Intra-pair short. */
2021#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
2022/* enum: Inter-pair short. */
2023#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
2024/* enum: Busy. */
2025#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
2026/* Status of each channel B */
1296#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 2027#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
1297/* Enum values, see field(s): */ 2028/* Enum values, see field(s): */
1298/* CABLE_STATUS_A */ 2029/* CABLE_STATUS_A */
2030/* Status of each channel C */
1299#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 2031#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
1300/* Enum values, see field(s): */ 2032/* Enum values, see field(s): */
1301/* CABLE_STATUS_A */ 2033/* CABLE_STATUS_A */
2034/* Status of each channel D */
1302#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 2035#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
1303/* Enum values, see field(s): */ 2036/* Enum values, see field(s): */
1304/* CABLE_STATUS_A */ 2037/* CABLE_STATUS_A */
1305 2038
1306/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ 2039/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
1307#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 2040#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
2041/* result */
1308/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ 2042/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
1309/* Enum values, see field(s): */ 2043/* Enum values, see field(s): */
1310/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ 2044/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
1311#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 2045#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
1312#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */ 2046/* enum: Complete. */
1313#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */ 2047#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
1314#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */ 2048/* enum: Bus switch off I2C write. */
1315#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */ 2049#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
1316#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */ 2050/* enum: Bus switch off I2C no access IO exp. */
1317#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */ 2051#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
1318#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */ 2052/* enum: Bus switch off I2C no access module. */
1319#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */ 2053#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
1320#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */ 2054/* enum: IO exp I2C configure. */
2055#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
2056/* enum: Bus switch I2C no cross talk. */
2057#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
2058/* enum: Module presence. */
2059#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
2060/* enum: Module ID I2C access. */
2061#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
2062/* enum: Module ID sane value. */
2063#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
2064
2065/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
2066#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
2067/* result */
2068/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
2069/* Enum values, see field(s): */
2070/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
2071#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
2072/* enum: Test has completed. */
2073#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
2074/* enum: RAM test - walk ones. */
2075#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
2076/* enum: RAM test - walk zeros. */
2077#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
2078/* enum: RAM test - walking inversions zeros/ones. */
2079#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
2080/* enum: RAM test - walking inversions checkerboard. */
2081#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
2082/* enum: Register test - set / clear individual bits. */
2083#define MC_CMD_POLL_BIST_MEM_REG 0x5
2084/* enum: ECC error detected. */
2085#define MC_CMD_POLL_BIST_MEM_ECC 0x6
2086/* Failure address, only valid if result is POLL_BIST_FAILED */
2087#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
2088/* Bus or address space to which the failure address corresponds */
2089#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
2090/* enum: MC MIPS bus. */
2091#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
2092/* enum: CSR IREG bus. */
2093#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
2094/* enum: RX DPCPU bus. */
2095#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
2096/* enum: TX0 DPCPU bus. */
2097#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
2098/* enum: TX1 DPCPU bus. */
2099#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
2100/* enum: RX DICPU bus. */
2101#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
2102/* enum: TX DICPU bus. */
2103#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
2104/* Pattern written to RAM / register */
2105#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
2106/* Actual value read from RAM / register */
2107#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
2108/* ECC error mask */
2109#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
2110/* ECC parity error mask */
2111#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
2112/* ECC fatal error mask */
2113#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
1321 2114
1322 2115
1323/***********************************/ 2116/***********************************/
1324/* MC_CMD_FLUSH_RX_QUEUES 2117/* MC_CMD_FLUSH_RX_QUEUES
1325 * Flush receive queue(s). 2118 * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
2119 * flushes should be initiated via this MCDI operation, rather than via
2120 * directly writing FLUSH_CMD.
2121 *
2122 * The flush is completed (either done/fail) asynchronously (after this command
2123 * returns). The driver must still wait for flush done/failure events as usual.
1326 */ 2124 */
1327#define MC_CMD_FLUSH_RX_QUEUES 0x27 2125#define MC_CMD_FLUSH_RX_QUEUES 0x27
1328 2126
@@ -1341,7 +2139,7 @@
1341 2139
1342/***********************************/ 2140/***********************************/
1343/* MC_CMD_GET_LOOPBACK_MODES 2141/* MC_CMD_GET_LOOPBACK_MODES
1344 * Get port's loopback modes. 2142 * Returns a bitmask of loopback modes available at each speed.
1345 */ 2143 */
1346#define MC_CMD_GET_LOOPBACK_MODES 0x28 2144#define MC_CMD_GET_LOOPBACK_MODES 0x28
1347 2145
@@ -1349,61 +2147,116 @@
1349#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 2147#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
1350 2148
1351/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ 2149/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
1352#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 2150#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
2151/* Supported loopbacks. */
1353#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 2152#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
1354#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 2153#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
1355#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 2154#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
1356#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 2155#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
1357#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */ 2156/* enum: None. */
1358#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */ 2157#define MC_CMD_LOOPBACK_NONE 0x0
1359#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */ 2158/* enum: Data. */
1360#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */ 2159#define MC_CMD_LOOPBACK_DATA 0x1
1361#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */ 2160/* enum: GMAC. */
1362#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */ 2161#define MC_CMD_LOOPBACK_GMAC 0x2
1363#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */ 2162/* enum: XGMII. */
1364#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */ 2163#define MC_CMD_LOOPBACK_XGMII 0x3
1365#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */ 2164/* enum: XGXS. */
1366#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */ 2165#define MC_CMD_LOOPBACK_XGXS 0x4
1367#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */ 2166/* enum: XAUI. */
1368#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */ 2167#define MC_CMD_LOOPBACK_XAUI 0x5
1369#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */ 2168/* enum: GMII. */
1370#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */ 2169#define MC_CMD_LOOPBACK_GMII 0x6
1371#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */ 2170/* enum: SGMII. */
1372#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */ 2171#define MC_CMD_LOOPBACK_SGMII 0x7
1373#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */ 2172/* enum: XGBR. */
1374#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */ 2173#define MC_CMD_LOOPBACK_XGBR 0x8
1375#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */ 2174/* enum: XFI. */
1376#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */ 2175#define MC_CMD_LOOPBACK_XFI 0x9
1377#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */ 2176/* enum: XAUI Far. */
1378#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */ 2177#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
1379#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */ 2178/* enum: GMII Far. */
1380#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */ 2179#define MC_CMD_LOOPBACK_GMII_FAR 0xb
1381#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */ 2180/* enum: SGMII Far. */
1382#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */ 2181#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
1383#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */ 2182/* enum: XFI Far. */
2183#define MC_CMD_LOOPBACK_XFI_FAR 0xd
2184/* enum: GPhy. */
2185#define MC_CMD_LOOPBACK_GPHY 0xe
2186/* enum: PhyXS. */
2187#define MC_CMD_LOOPBACK_PHYXS 0xf
2188/* enum: PCS. */
2189#define MC_CMD_LOOPBACK_PCS 0x10
2190/* enum: PMA-PMD. */
2191#define MC_CMD_LOOPBACK_PMAPMD 0x11
2192/* enum: Cross-Port. */
2193#define MC_CMD_LOOPBACK_XPORT 0x12
2194/* enum: XGMII-Wireside. */
2195#define MC_CMD_LOOPBACK_XGMII_WS 0x13
2196/* enum: XAUI Wireside. */
2197#define MC_CMD_LOOPBACK_XAUI_WS 0x14
2198/* enum: XAUI Wireside Far. */
2199#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
2200/* enum: XAUI Wireside near. */
2201#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
2202/* enum: GMII Wireside. */
2203#define MC_CMD_LOOPBACK_GMII_WS 0x17
2204/* enum: XFI Wireside. */
2205#define MC_CMD_LOOPBACK_XFI_WS 0x18
2206/* enum: XFI Wireside Far. */
2207#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
2208/* enum: PhyXS Wireside. */
2209#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
2210/* enum: PMA lanes MAC-Serdes. */
2211#define MC_CMD_LOOPBACK_PMA_INT 0x1b
2212/* enum: KR Serdes Parallel (Encoder). */
2213#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
2214/* enum: KR Serdes Serial. */
2215#define MC_CMD_LOOPBACK_SD_FAR 0x1d
2216/* enum: PMA lanes MAC-Serdes Wireside. */
2217#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
2218/* enum: KR Serdes Parallel Wireside (Full PCS). */
2219#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
2220/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
2221#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
2222/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
2223#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
2224/* enum: KR Serdes Serial Wireside. */
2225#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
2226/* Supported loopbacks. */
1384#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 2227#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
1385#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 2228#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
1386#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 2229#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
1387#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 2230#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
1388/* Enum values, see field(s): */ 2231/* Enum values, see field(s): */
1389/* 100M */ 2232/* 100M */
2233/* Supported loopbacks. */
1390#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 2234#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
1391#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 2235#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
1392#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 2236#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
1393#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 2237#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
1394/* Enum values, see field(s): */ 2238/* Enum values, see field(s): */
1395/* 100M */ 2239/* 100M */
2240/* Supported loopbacks. */
1396#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 2241#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
1397#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 2242#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
1398#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 2243#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
1399#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 2244#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
1400/* Enum values, see field(s): */ 2245/* Enum values, see field(s): */
1401/* 100M */ 2246/* 100M */
2247/* Supported loopbacks. */
2248#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
2249#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
2250#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
2251#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
2252/* Enum values, see field(s): */
2253/* 100M */
1402 2254
1403 2255
1404/***********************************/ 2256/***********************************/
1405/* MC_CMD_GET_LINK 2257/* MC_CMD_GET_LINK
1406 * Read the unified MAC/PHY link state. 2258 * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
2259 * ETIME.
1407 */ 2260 */
1408#define MC_CMD_GET_LINK 0x29 2261#define MC_CMD_GET_LINK 0x29
1409 2262
@@ -1412,9 +2265,15 @@
1412 2265
1413/* MC_CMD_GET_LINK_OUT msgresponse */ 2266/* MC_CMD_GET_LINK_OUT msgresponse */
1414#define MC_CMD_GET_LINK_OUT_LEN 28 2267#define MC_CMD_GET_LINK_OUT_LEN 28
2268/* near-side advertised capabilities */
1415#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 2269#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
2270/* link-partner advertised capabilities */
1416#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 2271#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
2272/* Autonegotiated speed in mbit/s. The link may still be down even if this
2273 * reads non-zero.
2274 */
1417#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 2275#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
2276/* Current loopback setting. */
1418#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 2277#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
1419/* Enum values, see field(s): */ 2278/* Enum values, see field(s): */
1420/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ 2279/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
@@ -1427,10 +2286,14 @@
1427#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 2286#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
1428#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 2287#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
1429#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 2288#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
2289/* This returns the negotiated flow control value. */
1430#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 2290#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
1431#define MC_CMD_FCNTL_OFF 0x0 /* enum */ 2291/* enum: Flow control is off. */
1432#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */ 2292#define MC_CMD_FCNTL_OFF 0x0
1433#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */ 2293/* enum: Respond to flow control. */
2294#define MC_CMD_FCNTL_RESPOND 0x1
2295/* enum: Respond to and Issue flow control. */
2296#define MC_CMD_FCNTL_BIDIR 0x2
1434#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 2297#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
1435#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 2298#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
1436#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 2299#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -1444,13 +2307,16 @@
1444 2307
1445/***********************************/ 2308/***********************************/
1446/* MC_CMD_SET_LINK 2309/* MC_CMD_SET_LINK
1447 * Write the unified MAC/PHY link configuration. 2310 * Write the unified MAC/PHY link configuration. Locks required: None. Return
2311 * code: 0, EINVAL, ETIME
1448 */ 2312 */
1449#define MC_CMD_SET_LINK 0x2a 2313#define MC_CMD_SET_LINK 0x2a
1450 2314
1451/* MC_CMD_SET_LINK_IN msgrequest */ 2315/* MC_CMD_SET_LINK_IN msgrequest */
1452#define MC_CMD_SET_LINK_IN_LEN 16 2316#define MC_CMD_SET_LINK_IN_LEN 16
2317/* ??? */
1453#define MC_CMD_SET_LINK_IN_CAP_OFST 0 2318#define MC_CMD_SET_LINK_IN_CAP_OFST 0
2319/* Flags */
1454#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 2320#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
1455#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 2321#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
1456#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 2322#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
@@ -1458,9 +2324,13 @@
1458#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 2324#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
1459#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 2325#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
1460#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 2326#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
2327/* Loopback mode. */
1461#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 2328#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
1462/* Enum values, see field(s): */ 2329/* Enum values, see field(s): */
1463/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ 2330/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
2331/* A loopback speed of "0" is supported, and means (choose any available
2332 * speed).
2333 */
1464#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 2334#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
1465 2335
1466/* MC_CMD_SET_LINK_OUT msgresponse */ 2336/* MC_CMD_SET_LINK_OUT msgresponse */
@@ -1469,12 +2339,13 @@
1469 2339
1470/***********************************/ 2340/***********************************/
1471/* MC_CMD_SET_ID_LED 2341/* MC_CMD_SET_ID_LED
1472 * Set indentification LED state. 2342 * Set identification LED state. Locks required: None. Return code: 0, EINVAL
1473 */ 2343 */
1474#define MC_CMD_SET_ID_LED 0x2b 2344#define MC_CMD_SET_ID_LED 0x2b
1475 2345
1476/* MC_CMD_SET_ID_LED_IN msgrequest */ 2346/* MC_CMD_SET_ID_LED_IN msgrequest */
1477#define MC_CMD_SET_ID_LED_IN_LEN 4 2347#define MC_CMD_SET_ID_LED_IN_LEN 4
2348/* Set LED state. */
1478#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 2349#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
1479#define MC_CMD_LED_OFF 0x0 /* enum */ 2350#define MC_CMD_LED_OFF 0x0 /* enum */
1480#define MC_CMD_LED_ON 0x1 /* enum */ 2351#define MC_CMD_LED_ON 0x1 /* enum */
@@ -1486,12 +2357,15 @@
1486 2357
1487/***********************************/ 2358/***********************************/
1488/* MC_CMD_SET_MAC 2359/* MC_CMD_SET_MAC
1489 * Set MAC configuration. 2360 * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
1490 */ 2361 */
1491#define MC_CMD_SET_MAC 0x2c 2362#define MC_CMD_SET_MAC 0x2c
1492 2363
1493/* MC_CMD_SET_MAC_IN msgrequest */ 2364/* MC_CMD_SET_MAC_IN msgrequest */
1494#define MC_CMD_SET_MAC_IN_LEN 24 2365#define MC_CMD_SET_MAC_IN_LEN 24
2366/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
2367 * EtherII, VLAN, bug16011 padding).
2368 */
1495#define MC_CMD_SET_MAC_IN_MTU_OFST 0 2369#define MC_CMD_SET_MAC_IN_MTU_OFST 0
1496#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 2370#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
1497#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 2371#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
@@ -1504,10 +2378,14 @@
1504#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 2378#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
1505#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 2379#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
1506#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 2380#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
2381/* enum: Flow control is off. */
1507/* MC_CMD_FCNTL_OFF 0x0 */ 2382/* MC_CMD_FCNTL_OFF 0x0 */
2383/* enum: Respond to flow control. */
1508/* MC_CMD_FCNTL_RESPOND 0x1 */ 2384/* MC_CMD_FCNTL_RESPOND 0x1 */
2385/* enum: Respond to and Issue flow control. */
1509/* MC_CMD_FCNTL_BIDIR 0x2 */ 2386/* MC_CMD_FCNTL_BIDIR 0x2 */
1510#define MC_CMD_FCNTL_AUTO 0x3 /* enum */ 2387/* enum: Auto neg flow control. */
2388#define MC_CMD_FCNTL_AUTO 0x3
1511 2389
1512/* MC_CMD_SET_MAC_OUT msgresponse */ 2390/* MC_CMD_SET_MAC_OUT msgresponse */
1513#define MC_CMD_SET_MAC_OUT_LEN 0 2391#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -1515,12 +2393,18 @@
1515 2393
1516/***********************************/ 2394/***********************************/
1517/* MC_CMD_PHY_STATS 2395/* MC_CMD_PHY_STATS
1518 * Get generic PHY statistics. 2396 * Get generic PHY statistics. This call returns the statistics for a generic
2397 * PHY in a sparse array (indexed by the enumerate). Each value is represented
2398 * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
2399 * statistics may be read from the message response. If DMA_ADDR != 0, then the
2400 * statistics are dmad to that (page-aligned location). Locks required: None.
2401 * Returns: 0, ETIME
1519 */ 2402 */
1520#define MC_CMD_PHY_STATS 0x2d 2403#define MC_CMD_PHY_STATS 0x2d
1521 2404
1522/* MC_CMD_PHY_STATS_IN msgrequest */ 2405/* MC_CMD_PHY_STATS_IN msgrequest */
1523#define MC_CMD_PHY_STATS_IN_LEN 8 2406#define MC_CMD_PHY_STATS_IN_LEN 8
2407/* ??? */
1524#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 2408#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
1525#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 2409#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
1526#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 2410#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1534,40 +2418,71 @@
1534#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 2418#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
1535#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 2419#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
1536#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS 2420#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
1537#define MC_CMD_OUI 0x0 /* enum */ 2421/* enum: OUI. */
1538#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */ 2422#define MC_CMD_OUI 0x0
1539#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */ 2423/* enum: PMA-PMD Link Up. */
1540#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */ 2424#define MC_CMD_PMA_PMD_LINK_UP 0x1
1541#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */ 2425/* enum: PMA-PMD RX Fault. */
1542#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */ 2426#define MC_CMD_PMA_PMD_RX_FAULT 0x2
1543#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */ 2427/* enum: PMA-PMD TX Fault. */
1544#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */ 2428#define MC_CMD_PMA_PMD_TX_FAULT 0x3
1545#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */ 2429/* enum: PMA-PMD Signal */
1546#define MC_CMD_PCS_LINK_UP 0x9 /* enum */ 2430#define MC_CMD_PMA_PMD_SIGNAL 0x4
1547#define MC_CMD_PCS_RX_FAULT 0xa /* enum */ 2431/* enum: PMA-PMD SNR A. */
1548#define MC_CMD_PCS_TX_FAULT 0xb /* enum */ 2432#define MC_CMD_PMA_PMD_SNR_A 0x5
1549#define MC_CMD_PCS_BER 0xc /* enum */ 2433/* enum: PMA-PMD SNR B. */
1550#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */ 2434#define MC_CMD_PMA_PMD_SNR_B 0x6
1551#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */ 2435/* enum: PMA-PMD SNR C. */
1552#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */ 2436#define MC_CMD_PMA_PMD_SNR_C 0x7
1553#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */ 2437/* enum: PMA-PMD SNR D. */
1554#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */ 2438#define MC_CMD_PMA_PMD_SNR_D 0x8
1555#define MC_CMD_PHYXS_SYNC 0x12 /* enum */ 2439/* enum: PCS Link Up. */
1556#define MC_CMD_AN_LINK_UP 0x13 /* enum */ 2440#define MC_CMD_PCS_LINK_UP 0x9
1557#define MC_CMD_AN_COMPLETE 0x14 /* enum */ 2441/* enum: PCS RX Fault. */
1558#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */ 2442#define MC_CMD_PCS_RX_FAULT 0xa
1559#define MC_CMD_CL22_LINK_UP 0x16 /* enum */ 2443/* enum: PCS TX Fault. */
1560#define MC_CMD_PHY_NSTATS 0x17 /* enum */ 2444#define MC_CMD_PCS_TX_FAULT 0xb
2445/* enum: PCS BER. */
2446#define MC_CMD_PCS_BER 0xc
2447/* enum: PCS Block Errors. */
2448#define MC_CMD_PCS_BLOCK_ERRORS 0xd
2449/* enum: PhyXS Link Up. */
2450#define MC_CMD_PHYXS_LINK_UP 0xe
2451/* enum: PhyXS RX Fault. */
2452#define MC_CMD_PHYXS_RX_FAULT 0xf
2453/* enum: PhyXS TX Fault. */
2454#define MC_CMD_PHYXS_TX_FAULT 0x10
2455/* enum: PhyXS Align. */
2456#define MC_CMD_PHYXS_ALIGN 0x11
2457/* enum: PhyXS Sync. */
2458#define MC_CMD_PHYXS_SYNC 0x12
2459/* enum: AN link-up. */
2460#define MC_CMD_AN_LINK_UP 0x13
2461/* enum: AN Complete. */
2462#define MC_CMD_AN_COMPLETE 0x14
2463/* enum: AN 10GBaseT Status. */
2464#define MC_CMD_AN_10GBT_STATUS 0x15
2465/* enum: Clause 22 Link-Up. */
2466#define MC_CMD_CL22_LINK_UP 0x16
2467/* enum: (Last entry) */
2468#define MC_CMD_PHY_NSTATS 0x17
1561 2469
1562 2470
1563/***********************************/ 2471/***********************************/
1564/* MC_CMD_MAC_STATS 2472/* MC_CMD_MAC_STATS
1565 * Get generic MAC statistics. 2473 * Get generic MAC statistics. This call returns unified statistics maintained
2474 * by the MC as it switches between the GMAC and XMAC. The MC will write out
2475 * all supported stats. The driver should zero initialise the buffer to
2476 * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
2477 * performed, and the statistics may be read from the message response. If
2478 * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
2479 * Locks required: None. Returns: 0, ETIME
1566 */ 2480 */
1567#define MC_CMD_MAC_STATS 0x2e 2481#define MC_CMD_MAC_STATS 0x2e
1568 2482
1569/* MC_CMD_MAC_STATS_IN msgrequest */ 2483/* MC_CMD_MAC_STATS_IN msgrequest */
1570#define MC_CMD_MAC_STATS_IN_LEN 16 2484#define MC_CMD_MAC_STATS_IN_LEN 16
2485/* ??? */
1571#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 2486#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
1572#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 2487#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
1573#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 2488#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1684,6 +2599,7 @@
1684 2599
1685/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ 2600/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
1686#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 2601#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
2602/* this is only used for the first record */
1687#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 2603#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
1688#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 2604#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
1689#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 2605#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
@@ -1713,7 +2629,23 @@
1713 2629
1714/***********************************/ 2630/***********************************/
1715/* MC_CMD_MEMCPY 2631/* MC_CMD_MEMCPY
1716 * Perform memory copy operation. 2632 * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
2633 * embedded directly in the command.
2634 *
2635 * A common pattern is for a client to use generation counts to signal a dma
2636 * update of a datastructure. To facilitate this, this MCDI operation can
2637 * contain multiple requests which are executed in strict order. Requests take
2638 * the form of duplicating the entire MCDI request continuously (including the
2639 * requests record, which is ignored in all but the first structure)
2640 *
2641 * The source data can either come from a DMA from the host, or it can be
2642 * embedded within the request directly, thereby eliminating a DMA read. To
2643 * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
2644 * ADDR_LO=offset, and inserts the data at %offset from the start of the
2645 * payload. It's the callers responsibility to ensure that the embedded data
2646 * doesn't overlap the records.
2647 *
2648 * Returns: 0, EINVAL (invalid RID)
1717 */ 2649 */
1718#define MC_CMD_MEMCPY 0x31 2650#define MC_CMD_MEMCPY 0x31
1719 2651
@@ -1721,6 +2653,7 @@
1721#define MC_CMD_MEMCPY_IN_LENMIN 32 2653#define MC_CMD_MEMCPY_IN_LENMIN 32
1722#define MC_CMD_MEMCPY_IN_LENMAX 224 2654#define MC_CMD_MEMCPY_IN_LENMAX 224
1723#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) 2655#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
2656/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
1724#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 2657#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
1725#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 2658#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
1726#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 2659#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
@@ -1741,14 +2674,22 @@
1741#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 2674#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
1742#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ 2675#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
1743#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ 2676#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
2677/* A type value of 1 is unused. */
1744#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 2678#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
1745#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */ 2679/* enum: Magic */
1746#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */ 2680#define MC_CMD_WOL_TYPE_MAGIC 0x0
1747#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */ 2681/* enum: MS Windows Magic */
1748#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */ 2682#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
1749#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */ 2683/* enum: IPv4 Syn */
1750#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */ 2684#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
1751#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */ 2685/* enum: IPv6 Syn */
2686#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
2687/* enum: Bitmap */
2688#define MC_CMD_WOL_TYPE_BITMAP 0x5
2689/* enum: Link */
2690#define MC_CMD_WOL_TYPE_LINK 0x6
2691/* enum: (Above this for future use) */
2692#define MC_CMD_WOL_TYPE_MAX 0x7
1752#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 2693#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
1753#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 2694#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
1754#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 2695#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -1818,7 +2759,7 @@
1818 2759
1819/***********************************/ 2760/***********************************/
1820/* MC_CMD_WOL_FILTER_REMOVE 2761/* MC_CMD_WOL_FILTER_REMOVE
1821 * Remove a WoL filter. 2762 * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
1822 */ 2763 */
1823#define MC_CMD_WOL_FILTER_REMOVE 0x33 2764#define MC_CMD_WOL_FILTER_REMOVE 0x33
1824 2765
@@ -1832,7 +2773,8 @@
1832 2773
1833/***********************************/ 2774/***********************************/
1834/* MC_CMD_WOL_FILTER_RESET 2775/* MC_CMD_WOL_FILTER_RESET
1835 * Reset (i.e. remove all) WoL filters. 2776 * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
2777 * ENOSYS
1836 */ 2778 */
1837#define MC_CMD_WOL_FILTER_RESET 0x34 2779#define MC_CMD_WOL_FILTER_RESET 0x34
1838 2780
@@ -1848,7 +2790,7 @@
1848 2790
1849/***********************************/ 2791/***********************************/
1850/* MC_CMD_SET_MCAST_HASH 2792/* MC_CMD_SET_MCAST_HASH
1851 * Set the MCASH hash value. 2793 * Set the MCAST hash value without otherwise reconfiguring the MAC
1852 */ 2794 */
1853#define MC_CMD_SET_MCAST_HASH 0x35 2795#define MC_CMD_SET_MCAST_HASH 0x35
1854 2796
@@ -1865,7 +2807,8 @@
1865 2807
1866/***********************************/ 2808/***********************************/
1867/* MC_CMD_NVRAM_TYPES 2809/* MC_CMD_NVRAM_TYPES
1868 * Get virtual NVRAM partitions information. 2810 * Return bitfield indicating available types of virtual NVRAM partitions.
2811 * Locks required: none. Returns: 0
1869 */ 2812 */
1870#define MC_CMD_NVRAM_TYPES 0x36 2813#define MC_CMD_NVRAM_TYPES 0x36
1871 2814
@@ -1874,26 +2817,54 @@
1874 2817
1875/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ 2818/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
1876#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 2819#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
2820/* Bit mask of supported types. */
1877#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 2821#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
1878#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */ 2822/* enum: Disabled callisto. */
1879#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */ 2823#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
1880#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */ 2824/* enum: MC firmware. */
1881#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */ 2825#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
1882#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */ 2826/* enum: MC backup firmware. */
1883#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */ 2827#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
1884#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */ 2828/* enum: Static configuration Port0. */
1885#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */ 2829#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
1886#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */ 2830/* enum: Static configuration Port1. */
1887#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */ 2831#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
1888#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */ 2832/* enum: Dynamic configuration Port0. */
1889#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */ 2833#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
1890#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */ 2834/* enum: Dynamic configuration Port1. */
1891#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */ 2835#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
2836/* enum: Expansion Rom. */
2837#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
2838/* enum: Expansion Rom Configuration Port0. */
2839#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
2840/* enum: Expansion Rom Configuration Port1. */
2841#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
2842/* enum: Phy Configuration Port0. */
2843#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
2844/* enum: Phy Configuration Port1. */
2845#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
2846/* enum: Log. */
2847#define MC_CMD_NVRAM_TYPE_LOG 0xc
2848/* enum: FPGA image. */
2849#define MC_CMD_NVRAM_TYPE_FPGA 0xd
2850/* enum: FPGA backup image */
2851#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
2852/* enum: FC firmware. */
2853#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
2854/* enum: FC backup firmware. */
2855#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
2856/* enum: CPLD image. */
2857#define MC_CMD_NVRAM_TYPE_CPLD 0x11
2858/* enum: Licensing information. */
2859#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
2860/* enum: FC Log. */
2861#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
1892 2862
1893 2863
1894/***********************************/ 2864/***********************************/
1895/* MC_CMD_NVRAM_INFO 2865/* MC_CMD_NVRAM_INFO
1896 * Read info about a virtual NVRAM partition. 2866 * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
2867 * EINVAL (bad type).
1897 */ 2868 */
1898#define MC_CMD_NVRAM_INFO 0x37 2869#define MC_CMD_NVRAM_INFO 0x37
1899 2870
@@ -1913,13 +2884,19 @@
1913#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 2884#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
1914#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 2885#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
1915#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 2886#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
2887#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
2888#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
2889#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
2890#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
1916#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 2891#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
1917#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 2892#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
1918 2893
1919 2894
1920/***********************************/ 2895/***********************************/
1921/* MC_CMD_NVRAM_UPDATE_START 2896/* MC_CMD_NVRAM_UPDATE_START
1922 * Start a group of update operations on a virtual NVRAM partition. 2897 * Start a group of update operations on a virtual NVRAM partition. Locks
2898 * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
2899 * PHY_LOCK required and not held).
1923 */ 2900 */
1924#define MC_CMD_NVRAM_UPDATE_START 0x38 2901#define MC_CMD_NVRAM_UPDATE_START 0x38
1925 2902
@@ -1935,7 +2912,9 @@
1935 2912
1936/***********************************/ 2913/***********************************/
1937/* MC_CMD_NVRAM_READ 2914/* MC_CMD_NVRAM_READ
1938 * Read data from a virtual NVRAM partition. 2915 * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
2916 * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
2917 * PHY_LOCK required and not held)
1939 */ 2918 */
1940#define MC_CMD_NVRAM_READ 0x39 2919#define MC_CMD_NVRAM_READ 0x39
1941 2920
@@ -1945,6 +2924,7 @@
1945/* Enum values, see field(s): */ 2924/* Enum values, see field(s): */
1946/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ 2925/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1947#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 2926#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
2927/* amount to read in bytes */
1948#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 2928#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
1949 2929
1950/* MC_CMD_NVRAM_READ_OUT msgresponse */ 2930/* MC_CMD_NVRAM_READ_OUT msgresponse */
@@ -1959,7 +2939,9 @@
1959 2939
1960/***********************************/ 2940/***********************************/
1961/* MC_CMD_NVRAM_WRITE 2941/* MC_CMD_NVRAM_WRITE
1962 * Write data to a virtual NVRAM partition. 2942 * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
2943 * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
2944 * PHY_LOCK required and not held)
1963 */ 2945 */
1964#define MC_CMD_NVRAM_WRITE 0x3a 2946#define MC_CMD_NVRAM_WRITE 0x3a
1965 2947
@@ -1983,7 +2965,9 @@
1983 2965
1984/***********************************/ 2966/***********************************/
1985/* MC_CMD_NVRAM_ERASE 2967/* MC_CMD_NVRAM_ERASE
1986 * Erase sector(s) from a virtual NVRAM partition. 2968 * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
2969 * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
2970 * PHY_LOCK required and not held)
1987 */ 2971 */
1988#define MC_CMD_NVRAM_ERASE 0x3b 2972#define MC_CMD_NVRAM_ERASE 0x3b
1989 2973
@@ -2001,7 +2985,9 @@
2001 2985
2002/***********************************/ 2986/***********************************/
2003/* MC_CMD_NVRAM_UPDATE_FINISH 2987/* MC_CMD_NVRAM_UPDATE_FINISH
2004 * Finish a group of update operations on a virtual NVRAM partition. 2988 * Finish a group of update operations on a virtual NVRAM partition. Locks
2989 * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
2990 * type/offset/length), EACCES (if PHY_LOCK required and not held)
2005 */ 2991 */
2006#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c 2992#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
2007 2993
@@ -2019,6 +3005,20 @@
2019/***********************************/ 3005/***********************************/
2020/* MC_CMD_REBOOT 3006/* MC_CMD_REBOOT
2021 * Reboot the MC. 3007 * Reboot the MC.
3008 *
3009 * The AFTER_ASSERTION flag is intended to be used when the driver notices an
3010 * assertion failure (at which point it is expected to perform a complete tear
3011 * down and reinitialise), to allow both ports to reset the MC once in an
3012 * atomic fashion.
3013 *
3014 * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
3015 * which means that they will automatically reboot out of the assertion
3016 * handler, so this is in practise an optional operation. It is still
3017 * recommended that drivers execute this to support custom firmwares with
3018 * REBOOT_ON_ASSERT=0.
3019 *
3020 * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
3021 * DATALEN=0
2022 */ 3022 */
2023#define MC_CMD_REBOOT 0x3d 3023#define MC_CMD_REBOOT 0x3d
2024 3024
@@ -2033,7 +3033,9 @@
2033 3033
2034/***********************************/ 3034/***********************************/
2035/* MC_CMD_SCHEDINFO 3035/* MC_CMD_SCHEDINFO
2036 * Request scheduler info. 3036 * Request scheduler info. Locks required: NONE. Returns: An array of
3037 * (timeslice,maximum overrun), one for each thread, in ascending order of
3038 * thread address.
2037 */ 3039 */
2038#define MC_CMD_SCHEDINFO 0x3e 3040#define MC_CMD_SCHEDINFO 0x3e
2039 3041
@@ -2052,14 +3054,24 @@
2052 3054
2053/***********************************/ 3055/***********************************/
2054/* MC_CMD_REBOOT_MODE 3056/* MC_CMD_REBOOT_MODE
3057 * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
3058 * mode to the specified value. Returns the old mode.
2055 */ 3059 */
2056#define MC_CMD_REBOOT_MODE 0x3f 3060#define MC_CMD_REBOOT_MODE 0x3f
2057 3061
2058/* MC_CMD_REBOOT_MODE_IN msgrequest */ 3062/* MC_CMD_REBOOT_MODE_IN msgrequest */
2059#define MC_CMD_REBOOT_MODE_IN_LEN 4 3063#define MC_CMD_REBOOT_MODE_IN_LEN 4
2060#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 3064#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
2061#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */ 3065/* enum: Normal. */
2062#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */ 3066#define MC_CMD_REBOOT_MODE_NORMAL 0x0
3067/* enum: Power-on Reset. */
3068#define MC_CMD_REBOOT_MODE_POR 0x2
3069/* enum: Snapper. */
3070#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
3071/* enum: snapper fake POR */
3072#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
3073#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
3074#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
2063 3075
2064/* MC_CMD_REBOOT_MODE_OUT msgresponse */ 3076/* MC_CMD_REBOOT_MODE_OUT msgresponse */
2065#define MC_CMD_REBOOT_MODE_OUT_LEN 4 3077#define MC_CMD_REBOOT_MODE_OUT_LEN 4
@@ -2069,32 +3081,145 @@
2069/***********************************/ 3081/***********************************/
2070/* MC_CMD_SENSOR_INFO 3082/* MC_CMD_SENSOR_INFO
2071 * Returns information about every available sensor. 3083 * Returns information about every available sensor.
3084 *
3085 * Each sensor has a single (16bit) value, and a corresponding state. The
3086 * mapping between value and state is nominally determined by the MC, but may
3087 * be implemented using up to 2 ranges per sensor.
3088 *
3089 * This call returns a mask (32bit) of the sensors that are supported by this
3090 * platform, then an array of sensor information structures, in order of sensor
3091 * type (but without gaps for unimplemented sensors). Each structure defines
3092 * the ranges for the corresponding sensor. An unused range is indicated by
3093 * equal limit values. If one range is used, a value outside that range results
3094 * in STATE_FATAL. If two ranges are used, a value outside the second range
3095 * results in STATE_FATAL while a value outside the first and inside the second
3096 * range results in STATE_WARNING.
3097 *
3098 * Sensor masks and sensor information arrays are organised into pages. For
3099 * backward compatibility, older host software can only use sensors in page 0.
3100 * Bit 32 in the sensor mask was previously unused, and is no reserved for use
3101 * as the next page flag.
3102 *
3103 * If the request does not contain a PAGE value then firmware will only return
3104 * page 0 of sensor information, with bit 31 in the sensor mask cleared.
3105 *
3106 * If the request contains a PAGE value then firmware responds with the sensor
3107 * mask and sensor information array for that page of sensors. In this case bit
3108 * 31 in the mask is set if another page exists.
3109 *
3110 * Locks required: None Returns: 0
2072 */ 3111 */
2073#define MC_CMD_SENSOR_INFO 0x41 3112#define MC_CMD_SENSOR_INFO 0x41
2074 3113
2075/* MC_CMD_SENSOR_INFO_IN msgrequest */ 3114/* MC_CMD_SENSOR_INFO_IN msgrequest */
2076#define MC_CMD_SENSOR_INFO_IN_LEN 0 3115#define MC_CMD_SENSOR_INFO_IN_LEN 0
2077 3116
3117/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
3118#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
3119/* Which page of sensors to report.
3120 *
3121 * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
3122 *
3123 * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
3124 */
3125#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
3126
2078/* MC_CMD_SENSOR_INFO_OUT msgresponse */ 3127/* MC_CMD_SENSOR_INFO_OUT msgresponse */
2079#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12 3128#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
2080#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 3129#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
2081#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) 3130#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
2082#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 3131#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
2083#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */ 3132/* enum: Controller temperature: degC */
2084#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */ 3133#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
2085#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */ 3134/* enum: Phy common temperature: degC */
2086#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */ 3135#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
2087#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */ 3136/* enum: Controller cooling: bool */
2088#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */ 3137#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
2089#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */ 3138/* enum: Phy 0 temperature: degC */
2090#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */ 3139#define MC_CMD_SENSOR_PHY0_TEMP 0x3
2091#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */ 3140/* enum: Phy 0 cooling: bool */
2092#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */ 3141#define MC_CMD_SENSOR_PHY0_COOLING 0x4
2093#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */ 3142/* enum: Phy 1 temperature: degC */
2094#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */ 3143#define MC_CMD_SENSOR_PHY1_TEMP 0x5
2095#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */ 3144/* enum: Phy 1 cooling: bool */
2096#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */ 3145#define MC_CMD_SENSOR_PHY1_COOLING 0x6
2097#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */ 3146/* enum: 1.0v power: mV */
3147#define MC_CMD_SENSOR_IN_1V0 0x7
3148/* enum: 1.2v power: mV */
3149#define MC_CMD_SENSOR_IN_1V2 0x8
3150/* enum: 1.8v power: mV */
3151#define MC_CMD_SENSOR_IN_1V8 0x9
3152/* enum: 2.5v power: mV */
3153#define MC_CMD_SENSOR_IN_2V5 0xa
3154/* enum: 3.3v power: mV */
3155#define MC_CMD_SENSOR_IN_3V3 0xb
3156/* enum: 12v power: mV */
3157#define MC_CMD_SENSOR_IN_12V0 0xc
3158/* enum: 1.2v analogue power: mV */
3159#define MC_CMD_SENSOR_IN_1V2A 0xd
3160/* enum: reference voltage: mV */
3161#define MC_CMD_SENSOR_IN_VREF 0xe
3162/* enum: AOE FPGA power: mV */
3163#define MC_CMD_SENSOR_OUT_VAOE 0xf
3164/* enum: AOE FPGA temperature: degC */
3165#define MC_CMD_SENSOR_AOE_TEMP 0x10
3166/* enum: AOE FPGA PSU temperature: degC */
3167#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
3168/* enum: AOE PSU temperature: degC */
3169#define MC_CMD_SENSOR_PSU_TEMP 0x12
3170/* enum: Fan 0 speed: RPM */
3171#define MC_CMD_SENSOR_FAN_0 0x13
3172/* enum: Fan 1 speed: RPM */
3173#define MC_CMD_SENSOR_FAN_1 0x14
3174/* enum: Fan 2 speed: RPM */
3175#define MC_CMD_SENSOR_FAN_2 0x15
3176/* enum: Fan 3 speed: RPM */
3177#define MC_CMD_SENSOR_FAN_3 0x16
3178/* enum: Fan 4 speed: RPM */
3179#define MC_CMD_SENSOR_FAN_4 0x17
3180/* enum: AOE FPGA input power: mV */
3181#define MC_CMD_SENSOR_IN_VAOE 0x18
3182/* enum: AOE FPGA current: mA */
3183#define MC_CMD_SENSOR_OUT_IAOE 0x19
3184/* enum: AOE FPGA input current: mA */
3185#define MC_CMD_SENSOR_IN_IAOE 0x1a
3186/* enum: NIC power consumption: W */
3187#define MC_CMD_SENSOR_NIC_POWER 0x1b
3188/* enum: 0.9v power voltage: mV */
3189#define MC_CMD_SENSOR_IN_0V9 0x1c
3190/* enum: 0.9v power current: mA */
3191#define MC_CMD_SENSOR_IN_I0V9 0x1d
3192/* enum: 1.2v power current: mA */
3193#define MC_CMD_SENSOR_IN_I1V2 0x1e
3194/* enum: Not a sensor: reserved for the next page flag */
3195#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
3196/* enum: 0.9v power voltage (at ADC): mV */
3197#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
3198/* enum: Controller temperature 2: degC */
3199#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
3200/* enum: Voltage regulator internal temperature: degC */
3201#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
3202/* enum: 0.9V voltage regulator temperature: degC */
3203#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
3204/* enum: 1.2V voltage regulator temperature: degC */
3205#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
3206/* enum: controller internal temperature sensor voltage (internal ADC): mV */
3207#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
3208/* enum: controller internal temperature (internal ADC): degC */
3209#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
3210/* enum: controller internal temperature sensor voltage (external ADC): mV */
3211#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
3212/* enum: controller internal temperature (external ADC): degC */
3213#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
3214/* enum: ambient temperature: degC */
3215#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
3216/* enum: air flow: bool */
3217#define MC_CMD_SENSOR_AIRFLOW 0x2a
3218/* enum: voltage between VSS08D and VSS08D at CSR: mV */
3219#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
3220/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
3221#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
3222/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
2098#define MC_CMD_SENSOR_ENTRY_OFST 4 3223#define MC_CMD_SENSOR_ENTRY_OFST 4
2099#define MC_CMD_SENSOR_ENTRY_LEN 8 3224#define MC_CMD_SENSOR_ENTRY_LEN 8
2100#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 3225#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
@@ -2102,6 +3227,23 @@
2102#define MC_CMD_SENSOR_ENTRY_MINNUM 1 3227#define MC_CMD_SENSOR_ENTRY_MINNUM 1
2103#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 3228#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
2104 3229
3230/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
3231#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
3232#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
3233#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
3234#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
3235/* Enum values, see field(s): */
3236/* MC_CMD_SENSOR_INFO_OUT */
3237#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
3238#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
3239/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
3240/* MC_CMD_SENSOR_ENTRY_OFST 4 */
3241/* MC_CMD_SENSOR_ENTRY_LEN 8 */
3242/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
3243/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
3244/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
3245/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
3246
2105/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ 3247/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
2106#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 3248#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
2107#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 3249#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
@@ -2124,39 +3266,80 @@
2124 3266
2125/***********************************/ 3267/***********************************/
2126/* MC_CMD_READ_SENSORS 3268/* MC_CMD_READ_SENSORS
2127 * Returns the current reading from each sensor. 3269 * Returns the current reading from each sensor. DMAs an array of sensor
3270 * readings, in order of sensor type (but without gaps for unimplemented
3271 * sensors), into host memory. Each array element is a
3272 * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
3273 *
3274 * If the request does not contain the LENGTH field then only sensors 0 to 30
3275 * are reported, to avoid DMA buffer overflow in older host software. If the
3276 * sensor reading require more space than the LENGTH allows, then return
3277 * EINVAL.
3278 *
3279 * The MC will send a SENSOREVT event every time any sensor changes state. The
3280 * driver is responsible for ensuring that it doesn't miss any events. The
3281 * board will function normally if all sensors are in STATE_OK or
3282 * STATE_WARNING. Otherwise the board should not be expected to function.
2128 */ 3283 */
2129#define MC_CMD_READ_SENSORS 0x42 3284#define MC_CMD_READ_SENSORS 0x42
2130 3285
2131/* MC_CMD_READ_SENSORS_IN msgrequest */ 3286/* MC_CMD_READ_SENSORS_IN msgrequest */
2132#define MC_CMD_READ_SENSORS_IN_LEN 8 3287#define MC_CMD_READ_SENSORS_IN_LEN 8
3288/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
2133#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 3289#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
2134#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 3290#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
2135#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 3291#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
2136#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 3292#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
2137 3293
3294/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
3295#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
3296/* DMA address of host buffer for sensor readings */
3297#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
3298#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
3299#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
3300#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
3301/* Size in bytes of host buffer. */
3302#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
3303
2138/* MC_CMD_READ_SENSORS_OUT msgresponse */ 3304/* MC_CMD_READ_SENSORS_OUT msgresponse */
2139#define MC_CMD_READ_SENSORS_OUT_LEN 0 3305#define MC_CMD_READ_SENSORS_OUT_LEN 0
2140 3306
3307/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
3308#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
3309
2141/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ 3310/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
2142#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3 3311#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
2143#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 3312#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
2144#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 3313#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
2145#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 3314#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
2146#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 3315#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
2147#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 3316#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
2148#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 3317#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
2149#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */ 3318/* enum: Ok. */
2150#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */ 3319#define MC_CMD_SENSOR_STATE_OK 0x0
2151#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */ 3320/* enum: Breached warning threshold. */
2152#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */ 3321#define MC_CMD_SENSOR_STATE_WARNING 0x1
3322/* enum: Breached fatal threshold. */
3323#define MC_CMD_SENSOR_STATE_FATAL 0x2
3324/* enum: Fault with sensor. */
3325#define MC_CMD_SENSOR_STATE_BROKEN 0x3
3326/* enum: Sensor is working but does not currently have a reading. */
3327#define MC_CMD_SENSOR_STATE_NO_READING 0x4
2153#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 3328#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
2154#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 3329#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
3330#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
3331#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
3332/* Enum values, see field(s): */
3333/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
3334#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
3335#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
2155 3336
2156 3337
2157/***********************************/ 3338/***********************************/
2158/* MC_CMD_GET_PHY_STATE 3339/* MC_CMD_GET_PHY_STATE
2159 * Report current state of PHY. 3340 * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
3341 * (e.g. due to missing or corrupted firmware). Locks required: None. Return
3342 * code: 0
2160 */ 3343 */
2161#define MC_CMD_GET_PHY_STATE 0x43 3344#define MC_CMD_GET_PHY_STATE 0x43
2162 3345
@@ -2166,13 +3349,16 @@
2166/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ 3349/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
2167#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 3350#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
2168#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 3351#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
2169#define MC_CMD_PHY_STATE_OK 0x1 /* enum */ 3352/* enum: Ok. */
2170#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */ 3353#define MC_CMD_PHY_STATE_OK 0x1
3354/* enum: Faulty. */
3355#define MC_CMD_PHY_STATE_ZOMBIE 0x2
2171 3356
2172 3357
2173/***********************************/ 3358/***********************************/
2174/* MC_CMD_SETUP_8021QBB 3359/* MC_CMD_SETUP_8021QBB
2175 * 802.1Qbb control. 3360 * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
3361 * disable 802.Qbb for a given priority.
2176 */ 3362 */
2177#define MC_CMD_SETUP_8021QBB 0x44 3363#define MC_CMD_SETUP_8021QBB 0x44
2178 3364
@@ -2187,7 +3373,7 @@
2187 3373
2188/***********************************/ 3374/***********************************/
2189/* MC_CMD_WOL_FILTER_GET 3375/* MC_CMD_WOL_FILTER_GET
2190 * Retrieve ID of any WoL filters. 3376 * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
2191 */ 3377 */
2192#define MC_CMD_WOL_FILTER_GET 0x45 3378#define MC_CMD_WOL_FILTER_GET 0x45
2193 3379
@@ -2201,7 +3387,8 @@
2201 3387
2202/***********************************/ 3388/***********************************/
2203/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD 3389/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
2204 * Add a protocol offload to NIC for lights-out state. 3390 * Add a protocol offload to NIC for lights-out state. Locks required: None.
3391 * Returns: 0, ENOSYS
2205 */ 3392 */
2206#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 3393#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
2207 3394
@@ -2241,7 +3428,8 @@
2241 3428
2242/***********************************/ 3429/***********************************/
2243/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 3430/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
2244 * Remove a protocol offload from NIC for lights-out state. 3431 * Remove a protocol offload from NIC for lights-out state. Locks required:
3432 * None. Returns: 0, ENOSYS
2245 */ 3433 */
2246#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 3434#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
2247 3435
@@ -2256,7 +3444,7 @@
2256 3444
2257/***********************************/ 3445/***********************************/
2258/* MC_CMD_MAC_RESET_RESTORE 3446/* MC_CMD_MAC_RESET_RESTORE
2259 * Restore MAC after block reset. 3447 * Restore MAC after block reset. Locks required: None. Returns: 0.
2260 */ 3448 */
2261#define MC_CMD_MAC_RESET_RESTORE 0x48 3449#define MC_CMD_MAC_RESET_RESTORE 0x48
2262 3450
@@ -2269,6 +3457,9 @@
2269 3457
2270/***********************************/ 3458/***********************************/
2271/* MC_CMD_TESTASSERT 3459/* MC_CMD_TESTASSERT
3460 * Deliberately trigger an assert-detonation in the firmware for testing
3461 * purposes (i.e. to allow tests that the driver copes gracefully). Locks
3462 * required: None Returns: 0
2272 */ 3463 */
2273#define MC_CMD_TESTASSERT 0x49 3464#define MC_CMD_TESTASSERT 0x49
2274 3465
@@ -2281,14 +3472,23 @@
2281 3472
2282/***********************************/ 3473/***********************************/
2283/* MC_CMD_WORKAROUND 3474/* MC_CMD_WORKAROUND
2284 * Enable/Disable a given workaround. 3475 * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
3476 * understand the given workaround number - which should not be treated as a
3477 * hard error by client code. This op does not imply any semantics about each
3478 * workaround, that's between the driver and the mcfw on a per-workaround
3479 * basis. Locks required: None. Returns: 0, EINVAL .
2285 */ 3480 */
2286#define MC_CMD_WORKAROUND 0x4a 3481#define MC_CMD_WORKAROUND 0x4a
2287 3482
2288/* MC_CMD_WORKAROUND_IN msgrequest */ 3483/* MC_CMD_WORKAROUND_IN msgrequest */
2289#define MC_CMD_WORKAROUND_IN_LEN 8 3484#define MC_CMD_WORKAROUND_IN_LEN 8
2290#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 3485#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
2291#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */ 3486/* enum: Bug 17230 work around. */
3487#define MC_CMD_WORKAROUND_BUG17230 0x1
3488/* enum: Bug 35388 work around (unsafe EVQ writes). */
3489#define MC_CMD_WORKAROUND_BUG35388 0x2
3490/* enum: Bug35017 workaround (A64 tables must be identity map) */
3491#define MC_CMD_WORKAROUND_BUG35017 0x3
2292#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 3492#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
2293 3493
2294/* MC_CMD_WORKAROUND_OUT msgresponse */ 3494/* MC_CMD_WORKAROUND_OUT msgresponse */
@@ -2297,7 +3497,12 @@
2297 3497
2298/***********************************/ 3498/***********************************/
2299/* MC_CMD_GET_PHY_MEDIA_INFO 3499/* MC_CMD_GET_PHY_MEDIA_INFO
2300 * Read media-specific data from PHY. 3500 * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
3501 * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
3502 * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
3503 * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
3504 * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
3505 * Anything else: currently undefined. Locks required: None. Return code: 0.
2301 */ 3506 */
2302#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b 3507#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
2303 3508
@@ -2309,6 +3514,7 @@
2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 3514#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 3515#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) 3516#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
3517/* in bytes */
2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 3518#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 3519#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 3520#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
@@ -2318,7 +3524,8 @@
2318 3524
2319/***********************************/ 3525/***********************************/
2320/* MC_CMD_NVRAM_TEST 3526/* MC_CMD_NVRAM_TEST
2321 * Test a particular NVRAM partition. 3527 * Test a particular NVRAM partition for valid contents (where "valid" depends
3528 * on the type of partition).
2322 */ 3529 */
2323#define MC_CMD_NVRAM_TEST 0x4c 3530#define MC_CMD_NVRAM_TEST 0x4c
2324 3531
@@ -2331,22 +3538,31 @@
2331/* MC_CMD_NVRAM_TEST_OUT msgresponse */ 3538/* MC_CMD_NVRAM_TEST_OUT msgresponse */
2332#define MC_CMD_NVRAM_TEST_OUT_LEN 4 3539#define MC_CMD_NVRAM_TEST_OUT_LEN 4
2333#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 3540#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
2334#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */ 3541/* enum: Passed. */
2335#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */ 3542#define MC_CMD_NVRAM_TEST_PASS 0x0
2336#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */ 3543/* enum: Failed. */
3544#define MC_CMD_NVRAM_TEST_FAIL 0x1
3545/* enum: Not supported. */
3546#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
2337 3547
2338 3548
2339/***********************************/ 3549/***********************************/
2340/* MC_CMD_MRSFP_TWEAK 3550/* MC_CMD_MRSFP_TWEAK
2341 * Read status and/or set parameters for the 'mrsfp' driver. 3551 * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
3552 * I2C I/O expander bits are always read; if equaliser parameters are supplied,
3553 * they are configured first. Locks required: None. Return code: 0, EINVAL.
2342 */ 3554 */
2343#define MC_CMD_MRSFP_TWEAK 0x4d 3555#define MC_CMD_MRSFP_TWEAK 0x4d
2344 3556
2345/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ 3557/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
2346#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 3558#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
3559/* 0-6 low->high de-emph. */
2347#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 3560#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
3561/* 0-8 low->high ref.V */
2348#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 3562#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
3563/* 0-8 0-8 low->high boost */
2349#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 3564#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
3565/* 0-8 low->high ref.V */
2350#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 3566#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
2351 3567
2352/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ 3568/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
@@ -2354,16 +3570,23 @@
2354 3570
2355/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ 3571/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
2356#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 3572#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
3573/* input bits */
2357#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 3574#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
3575/* output bits */
2358#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 3576#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
3577/* direction */
2359#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 3578#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
2360#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */ 3579/* enum: Out. */
2361#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */ 3580#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
3581/* enum: In. */
3582#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
2362 3583
2363 3584
2364/***********************************/ 3585/***********************************/
2365/* MC_CMD_SENSOR_SET_LIMS 3586/* MC_CMD_SENSOR_SET_LIMS
2366 * Adjusts the sensor limits. 3587 * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
3588 * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
3589 * of range.
2367 */ 3590 */
2368#define MC_CMD_SENSOR_SET_LIMS 0x4e 3591#define MC_CMD_SENSOR_SET_LIMS 0x4e
2369 3592
@@ -2372,9 +3595,13 @@
2372#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 3595#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
2373/* Enum values, see field(s): */ 3596/* Enum values, see field(s): */
2374/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ 3597/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
3598/* interpretation is is sensor-specific. */
2375#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 3599#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
3600/* interpretation is is sensor-specific. */
2376#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 3601#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
3602/* interpretation is is sensor-specific. */
2377#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 3603#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
3604/* interpretation is is sensor-specific. */
2378#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 3605#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
2379 3606
2380/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ 3607/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
@@ -2396,9 +3623,3640 @@
2396#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 3623#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
2397#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 3624#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
2398 3625
3626
3627/***********************************/
3628/* MC_CMD_NVRAM_PARTITIONS
3629 * Reads the list of available virtual NVRAM partition types. Locks required:
3630 * none. Returns: 0, EINVAL (bad type).
3631 */
3632#define MC_CMD_NVRAM_PARTITIONS 0x51
3633
3634/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
3635#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
3636
3637/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
3638#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
3639#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
3640#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
3641/* total number of partitions */
3642#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
3643/* type ID code for each of NUM_PARTITIONS partitions */
3644#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
3645#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
3646#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
3647#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
3648
3649
3650/***********************************/
3651/* MC_CMD_NVRAM_METADATA
3652 * Reads soft metadata for a virtual NVRAM partition type. Locks required:
3653 * none. Returns: 0, EINVAL (bad type).
3654 */
3655#define MC_CMD_NVRAM_METADATA 0x52
3656
3657/* MC_CMD_NVRAM_METADATA_IN msgrequest */
3658#define MC_CMD_NVRAM_METADATA_IN_LEN 4
3659/* Partition type ID code */
3660#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
3661
3662/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
3663#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
3664#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
3665#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
3666/* Partition type ID code */
3667#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
3668#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
3669#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
3670#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
3671#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
3672#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
3673#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
3674#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
3675/* Subtype ID code for content of this partition */
3676#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
3677/* 1st component of W.X.Y.Z version number for content of this partition */
3678#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
3679#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
3680/* 2nd component of W.X.Y.Z version number for content of this partition */
3681#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
3682#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
3683/* 3rd component of W.X.Y.Z version number for content of this partition */
3684#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
3685#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
3686/* 4th component of W.X.Y.Z version number for content of this partition */
3687#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
3688#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
3689/* Zero-terminated string describing the content of this partition */
3690#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
3691#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
3692#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
3693#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
3694
3695
3696/***********************************/
3697/* MC_CMD_GET_MAC_ADDRESSES
3698 * Returns the base MAC, count and stride for the requestiong function
3699 */
3700#define MC_CMD_GET_MAC_ADDRESSES 0x55
3701
3702/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
3703#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
3704
3705/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
3706#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
3707/* Base MAC address */
3708#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
3709#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
3710/* Padding */
3711#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
3712#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
3713/* Number of allocated MAC addresses */
3714#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
3715/* Spacing of allocated MAC addresses */
3716#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
3717
2399/* MC_CMD_RESOURCE_SPECIFIER enum */ 3718/* MC_CMD_RESOURCE_SPECIFIER enum */
2400#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */ 3719/* enum: Any */
2401#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */ 3720#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
3721/* enum: None */
3722#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
3723
3724/* EVB_PORT_ID structuredef */
3725#define EVB_PORT_ID_LEN 4
3726#define EVB_PORT_ID_PORT_ID_OFST 0
3727/* enum: An invalid port handle. */
3728#define EVB_PORT_ID_NULL 0x0
3729/* enum: The port assigned to this function.. */
3730#define EVB_PORT_ID_ASSIGNED 0x1000000
3731/* enum: External network port 0 */
3732#define EVB_PORT_ID_MAC0 0x2000000
3733/* enum: External network port 1 */
3734#define EVB_PORT_ID_MAC1 0x2000001
3735/* enum: External network port 2 */
3736#define EVB_PORT_ID_MAC2 0x2000002
3737/* enum: External network port 3 */
3738#define EVB_PORT_ID_MAC3 0x2000003
3739#define EVB_PORT_ID_PORT_ID_LBN 0
3740#define EVB_PORT_ID_PORT_ID_WIDTH 32
3741
3742/* EVB_VLAN_TAG structuredef */
3743#define EVB_VLAN_TAG_LEN 2
3744/* The VLAN tag value */
3745#define EVB_VLAN_TAG_VLAN_ID_LBN 0
3746#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
3747#define EVB_VLAN_TAG_MODE_LBN 12
3748#define EVB_VLAN_TAG_MODE_WIDTH 4
3749/* enum: Insert the VLAN. */
3750#define EVB_VLAN_TAG_INSERT 0x0
3751/* enum: Replace the VLAN if already present. */
3752#define EVB_VLAN_TAG_REPLACE 0x1
3753
3754/* BUFTBL_ENTRY structuredef */
3755#define BUFTBL_ENTRY_LEN 12
3756/* the owner ID */
3757#define BUFTBL_ENTRY_OID_OFST 0
3758#define BUFTBL_ENTRY_OID_LEN 2
3759#define BUFTBL_ENTRY_OID_LBN 0
3760#define BUFTBL_ENTRY_OID_WIDTH 16
3761/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
3762#define BUFTBL_ENTRY_PGSZ_OFST 2
3763#define BUFTBL_ENTRY_PGSZ_LEN 2
3764#define BUFTBL_ENTRY_PGSZ_LBN 16
3765#define BUFTBL_ENTRY_PGSZ_WIDTH 16
3766/* the raw 64-bit address field from the SMC, not adjusted for page size */
3767#define BUFTBL_ENTRY_RAWADDR_OFST 4
3768#define BUFTBL_ENTRY_RAWADDR_LEN 8
3769#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
3770#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
3771#define BUFTBL_ENTRY_RAWADDR_LBN 32
3772#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
3773
3774/* NVRAM_PARTITION_TYPE structuredef */
3775#define NVRAM_PARTITION_TYPE_LEN 2
3776#define NVRAM_PARTITION_TYPE_ID_OFST 0
3777#define NVRAM_PARTITION_TYPE_ID_LEN 2
3778/* enum: Primary MC firmware partition */
3779#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
3780/* enum: Secondary MC firmware partition */
3781#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
3782/* enum: Expansion ROM partition */
3783#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
3784/* enum: Static configuration TLV partition */
3785#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
3786/* enum: Dynamic configuration TLV partition */
3787#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
3788/* enum: Expansion ROM configuration data for port 0 */
3789#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
3790/* enum: Expansion ROM configuration data for port 1 */
3791#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
3792/* enum: Expansion ROM configuration data for port 2 */
3793#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
3794/* enum: Expansion ROM configuration data for port 3 */
3795#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
3796/* enum: Non-volatile log output partition */
3797#define NVRAM_PARTITION_TYPE_LOG 0x700
3798/* enum: Device state dump output partition */
3799#define NVRAM_PARTITION_TYPE_DUMP 0x800
3800/* enum: Application license key storage partition */
3801#define NVRAM_PARTITION_TYPE_LICENSE 0x900
3802/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
3803#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
3804/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
3805#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
3806/* enum: Start of reserved value range (firmware may use for any purpose) */
3807#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
3808/* enum: End of reserved value range (firmware may use for any purpose) */
3809#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
3810/* enum: Recovery partition map (provided if real map is missing or corrupt) */
3811#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
3812/* enum: Partition map (real map as stored in flash) */
3813#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
3814#define NVRAM_PARTITION_TYPE_ID_LBN 0
3815#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
3816
3817
3818/***********************************/
3819/* MC_CMD_READ_REGS
3820 * Get a dump of the MCPU registers
3821 */
3822#define MC_CMD_READ_REGS 0x50
3823
3824/* MC_CMD_READ_REGS_IN msgrequest */
3825#define MC_CMD_READ_REGS_IN_LEN 0
3826
3827/* MC_CMD_READ_REGS_OUT msgresponse */
3828#define MC_CMD_READ_REGS_OUT_LEN 308
3829/* Whether the corresponding register entry contains a valid value */
3830#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
3831#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
3832/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
3833 * fir, fp)
3834 */
3835#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
3836#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
3837#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
3838
3839
3840/***********************************/
3841/* MC_CMD_INIT_EVQ
3842 * Set up an event queue according to the supplied parameters. The IN arguments
3843 * end with an address for each 4k of host memory required to back the EVQ.
3844 */
3845#define MC_CMD_INIT_EVQ 0x80
3846
3847/* MC_CMD_INIT_EVQ_IN msgrequest */
3848#define MC_CMD_INIT_EVQ_IN_LENMIN 44
3849#define MC_CMD_INIT_EVQ_IN_LENMAX 548
3850#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
3851/* Size, in entries */
3852#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
3853/* Desired instance. Must be set to a specific instance, which is a function
3854 * local queue index.
3855 */
3856#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
3857/* The initial timer value. The load value is ignored if the timer mode is DIS.
3858 */
3859#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
3860/* The reload value is ignored in one-shot modes */
3861#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
3862/* tbd */
3863#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
3864#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
3865#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
3866#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
3867#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
3868#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
3869#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
3870#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
3871#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
3872#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
3873#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
3874#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
3875#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
3876#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
3877/* enum: Disabled */
3878#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
3879/* enum: Immediate */
3880#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
3881/* enum: Triggered */
3882#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
3883/* enum: Hold-off */
3884#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
3885/* Target EVQ for wakeups if in wakeup mode. */
3886#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
3887/* Target interrupt if in interrupting mode (note union with target EVQ). Use
3888 * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
3889 * purposes.
3890 */
3891#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
3892/* Event Counter Mode. */
3893#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
3894/* enum: Disabled */
3895#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
3896/* enum: Disabled */
3897#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
3898/* enum: Disabled */
3899#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
3900/* enum: Disabled */
3901#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
3902/* Event queue packet count threshold. */
3903#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
3904/* 64-bit address of 4k of 4k-aligned host memory buffer */
3905#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
3906#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
3907#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
3908#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
3909#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
3910#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
3911
3912/* MC_CMD_INIT_EVQ_OUT msgresponse */
3913#define MC_CMD_INIT_EVQ_OUT_LEN 4
3914/* Only valid if INTRFLAG was true */
3915#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
3916
3917/* QUEUE_CRC_MODE structuredef */
3918#define QUEUE_CRC_MODE_LEN 1
3919#define QUEUE_CRC_MODE_MODE_LBN 0
3920#define QUEUE_CRC_MODE_MODE_WIDTH 4
3921/* enum: No CRC. */
3922#define QUEUE_CRC_MODE_NONE 0x0
3923/* enum: CRC Fiber channel over ethernet. */
3924#define QUEUE_CRC_MODE_FCOE 0x1
3925/* enum: CRC (digest) iSCSI header only. */
3926#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
3927/* enum: CRC (digest) iSCSI header and payload. */
3928#define QUEUE_CRC_MODE_ISCSI 0x3
3929/* enum: CRC Fiber channel over IP over ethernet. */
3930#define QUEUE_CRC_MODE_FCOIPOE 0x4
3931/* enum: CRC MPA. */
3932#define QUEUE_CRC_MODE_MPA 0x5
3933#define QUEUE_CRC_MODE_SPARE_LBN 4
3934#define QUEUE_CRC_MODE_SPARE_WIDTH 4
3935
3936
3937/***********************************/
3938/* MC_CMD_INIT_RXQ
3939 * set up a receive queue according to the supplied parameters. The IN
3940 * arguments end with an address for each 4k of host memory required to back
3941 * the RXQ.
3942 */
3943#define MC_CMD_INIT_RXQ 0x81
3944
3945/* MC_CMD_INIT_RXQ_IN msgrequest */
3946#define MC_CMD_INIT_RXQ_IN_LENMIN 36
3947#define MC_CMD_INIT_RXQ_IN_LENMAX 252
3948#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
3949/* Size, in entries */
3950#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
3951/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
3952 */
3953#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
3954/* The value to put in the event data. Check hardware spec. for valid range. */
3955#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
3956/* Desired instance. Must be set to a specific instance, which is a function
3957 * local queue index.
3958 */
3959#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
3960/* There will be more flags here. */
3961#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
3962#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
3963#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
3964#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
3965#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
3966#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
3967#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
3968#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
3969#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
3970#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
3971#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
3972#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
3973#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
3974/* Owner ID to use if in buffer mode (zero if physical) */
3975#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
3976/* The port ID associated with the v-adaptor which should contain this DMAQ. */
3977#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
3978/* 64-bit address of 4k of 4k-aligned host memory buffer */
3979#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
3980#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
3981#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
3982#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
3983#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
3984#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
3985
3986/* MC_CMD_INIT_RXQ_OUT msgresponse */
3987#define MC_CMD_INIT_RXQ_OUT_LEN 0
3988
3989
3990/***********************************/
3991/* MC_CMD_INIT_TXQ
3992 */
3993#define MC_CMD_INIT_TXQ 0x82
3994
3995/* MC_CMD_INIT_TXQ_IN msgrequest */
3996#define MC_CMD_INIT_TXQ_IN_LENMIN 36
3997#define MC_CMD_INIT_TXQ_IN_LENMAX 252
3998#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
3999/* Size, in entries */
4000#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
4001/* The EVQ to send events to. This is an index originally specified to
4002 * INIT_EVQ.
4003 */
4004#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
4005/* The value to put in the event data. Check hardware spec. for valid range. */
4006#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
4007/* Desired instance. Must be set to a specific instance, which is a function
4008 * local queue index.
4009 */
4010#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
4011/* There will be more flags here. */
4012#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
4013#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
4014#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
4015#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
4016#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
4017#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
4018#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
4019#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
4020#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
4021#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
4022#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
4023#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
4024#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
4025#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
4026#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
4027/* Owner ID to use if in buffer mode (zero if physical) */
4028#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
4029/* The port ID associated with the v-adaptor which should contain this DMAQ. */
4030#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
4031/* 64-bit address of 4k of 4k-aligned host memory buffer */
4032#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
4033#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
4034#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
4035#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
4036#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
4037#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
4038
4039/* MC_CMD_INIT_TXQ_OUT msgresponse */
4040#define MC_CMD_INIT_TXQ_OUT_LEN 0
4041
4042
4043/***********************************/
4044/* MC_CMD_FINI_EVQ
4045 * Teardown an EVQ.
4046 *
4047 * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
4048 * or the operation will fail with EBUSY
4049 */
4050#define MC_CMD_FINI_EVQ 0x83
4051
4052/* MC_CMD_FINI_EVQ_IN msgrequest */
4053#define MC_CMD_FINI_EVQ_IN_LEN 4
4054/* Instance of EVQ to destroy. Should be the same instance as that previously
4055 * passed to INIT_EVQ
4056 */
4057#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
4058
4059/* MC_CMD_FINI_EVQ_OUT msgresponse */
4060#define MC_CMD_FINI_EVQ_OUT_LEN 0
4061
4062
4063/***********************************/
4064/* MC_CMD_FINI_RXQ
4065 * Teardown a RXQ.
4066 */
4067#define MC_CMD_FINI_RXQ 0x84
4068
4069/* MC_CMD_FINI_RXQ_IN msgrequest */
4070#define MC_CMD_FINI_RXQ_IN_LEN 4
4071/* Instance of RXQ to destroy */
4072#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
4073
4074/* MC_CMD_FINI_RXQ_OUT msgresponse */
4075#define MC_CMD_FINI_RXQ_OUT_LEN 0
4076
4077
4078/***********************************/
4079/* MC_CMD_FINI_TXQ
4080 * Teardown a TXQ.
4081 */
4082#define MC_CMD_FINI_TXQ 0x85
4083
4084/* MC_CMD_FINI_TXQ_IN msgrequest */
4085#define MC_CMD_FINI_TXQ_IN_LEN 4
4086/* Instance of TXQ to destroy */
4087#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
4088
4089/* MC_CMD_FINI_TXQ_OUT msgresponse */
4090#define MC_CMD_FINI_TXQ_OUT_LEN 0
4091
4092
4093/***********************************/
4094/* MC_CMD_DRIVER_EVENT
4095 * Generate an event on an EVQ belonging to the function issuing the command.
4096 */
4097#define MC_CMD_DRIVER_EVENT 0x86
4098
4099/* MC_CMD_DRIVER_EVENT_IN msgrequest */
4100#define MC_CMD_DRIVER_EVENT_IN_LEN 12
4101/* Handle of target EVQ */
4102#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
4103/* Bits 0 - 63 of event */
4104#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
4105#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
4106#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
4107#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
4108
4109/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
4110#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
4111
4112
4113/***********************************/
4114/* MC_CMD_PROXY_CMD
4115 * Execute an arbitrary MCDI command on behalf of a different function, subject
4116 * to security restrictions. The command to be proxied follows immediately
4117 * afterward in the host buffer (or on the UART). This command supercedes
4118 * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
4119 */
4120#define MC_CMD_PROXY_CMD 0x5b
4121
4122/* MC_CMD_PROXY_CMD_IN msgrequest */
4123#define MC_CMD_PROXY_CMD_IN_LEN 4
4124/* The handle of the target function. */
4125#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
4126#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
4127#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
4128#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
4129#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
4130#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
4131
4132
4133/***********************************/
4134/* MC_CMD_ALLOC_BUFTBL_CHUNK
4135 * Allocate a set of buffer table entries using the specified owner ID. This
4136 * operation allocates the required buffer table entries (and fails if it
4137 * cannot do so). The buffer table entries will initially be zeroed.
4138 */
4139#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
4140
4141/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
4142#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
4143/* Owner ID to use */
4144#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
4145/* Size of buffer table pages to use, in bytes (note that only a few values are
4146 * legal on any specific hardware).
4147 */
4148#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
4149
4150/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
4151#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
4152#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
4153#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
4154/* Buffer table IDs for use in DMA descriptors. */
4155#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
4156
4157
4158/***********************************/
4159/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
4160 * Reprogram a set of buffer table entries in the specified chunk.
4161 */
4162#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
4163
4164/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
4165#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
4166#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
4167#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
4168#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
4169/* ID */
4170#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
4171/* Num entries */
4172#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
4173/* Buffer table entry address */
4174#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
4175#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
4176#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
4177#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
4178#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
4179#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
4180
4181/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
4182#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
4183
4184
4185/***********************************/
4186/* MC_CMD_FREE_BUFTBL_CHUNK
4187 */
4188#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
4189
4190/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
4191#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
4192#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
4193
4194/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
4195#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
4196
4197
4198/***********************************/
4199/* MC_CMD_FILTER_OP
4200 * Multiplexed MCDI call for filter operations
4201 */
4202#define MC_CMD_FILTER_OP 0x8a
4203
4204/* MC_CMD_FILTER_OP_IN msgrequest */
4205#define MC_CMD_FILTER_OP_IN_LEN 108
4206/* identifies the type of operation requested */
4207#define MC_CMD_FILTER_OP_IN_OP_OFST 0
4208/* enum: single-recipient filter insert */
4209#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
4210/* enum: single-recipient filter remove */
4211#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
4212/* enum: multi-recipient filter subscribe */
4213#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
4214/* enum: multi-recipient filter unsubscribe */
4215#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
4216/* enum: replace one recipient with another (warning - the filter handle may
4217 * change)
4218 */
4219#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
4220/* filter handle (for remove / unsubscribe operations) */
4221#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
4222#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
4223#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
4224#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
4225/* The port ID associated with the v-adaptor which should contain this filter.
4226 */
4227#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
4228/* fields to include in match criteria */
4229#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
4230#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
4231#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
4232#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
4233#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
4234#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
4235#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
4236#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
4237#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
4238#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
4239#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
4240#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
4241#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
4242#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
4243#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
4244#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
4245#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
4246#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
4247#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
4248#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
4249#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
4250#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
4251#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
4252#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
4253#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
4254#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
4255#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
4256#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
4257#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
4258/* receive destination */
4259#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
4260/* enum: drop packets */
4261#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
4262/* enum: receive to host */
4263#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
4264/* enum: receive to MC */
4265#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
4266/* enum: loop back to port 0 TX MAC */
4267#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
4268/* enum: loop back to port 1 TX MAC */
4269#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
4270/* receive queue handle (for multiple queue modes, this is the base queue) */
4271#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
4272/* receive mode */
4273#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
4274/* enum: receive to just the specified queue */
4275#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
4276/* enum: receive to multiple queues using RSS context */
4277#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
4278/* enum: receive to multiple queues using .1p mapping */
4279#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
4280/* enum: install a filter entry that will never match; for test purposes only
4281 */
4282#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
4283/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
4284 * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
4285 * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
4286 * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
4287 * a valid handle.
4288 */
4289#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
4290/* transmit domain (reserved; set to 0) */
4291#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
4292/* transmit destination (either set the MAC and/or PM bits for explicit
4293 * control, or set this field to TX_DEST_DEFAULT for sensible default
4294 * behaviour)
4295 */
4296#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
4297/* enum: request default behaviour (based on filter type) */
4298#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
4299#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
4300#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
4301#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
4302#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
4303/* source MAC address to match (as bytes in network order) */
4304#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
4305#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
4306/* source port to match (as bytes in network order) */
4307#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
4308#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
4309/* destination MAC address to match (as bytes in network order) */
4310#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
4311#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
4312/* destination port to match (as bytes in network order) */
4313#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
4314#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
4315/* Ethernet type to match (as bytes in network order) */
4316#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
4317#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
4318/* Inner VLAN tag to match (as bytes in network order) */
4319#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
4320#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
4321/* Outer VLAN tag to match (as bytes in network order) */
4322#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
4323#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
4324/* IP protocol to match (in low byte; set high byte to 0) */
4325#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
4326#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
4327/* Firmware defined register 0 to match (reserved; set to 0) */
4328#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
4329/* Firmware defined register 1 to match (reserved; set to 0) */
4330#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
4331/* source IP address to match (as bytes in network order; set last 12 bytes to
4332 * 0 for IPv4 address)
4333 */
4334#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
4335#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
4336/* destination IP address to match (as bytes in network order; set last 12
4337 * bytes to 0 for IPv4 address)
4338 */
4339#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
4340#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
4341
4342/* MC_CMD_FILTER_OP_OUT msgresponse */
4343#define MC_CMD_FILTER_OP_OUT_LEN 12
4344/* identifies the type of operation requested */
4345#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
4346/* Enum values, see field(s): */
4347/* MC_CMD_FILTER_OP_IN/OP */
4348/* Returned filter handle (for insert / subscribe operations). Note that these
4349 * handles should be considered opaque to the host, although a value of
4350 * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
4351 */
4352#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
4353#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
4354#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
4355#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
4356
4357
4358/***********************************/
4359/* MC_CMD_GET_PARSER_DISP_INFO
4360 * Get information related to the parser-dispatcher subsystem
4361 */
4362#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
4363
4364/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
4365#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
4366/* identifies the type of operation requested */
4367#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
4368/* enum: read the list of supported RX filter matches */
4369#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
4370
4371/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
4372#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
4373#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
4374#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
4375/* identifies the type of operation requested */
4376#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
4377/* Enum values, see field(s): */
4378/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
4379/* number of supported match types */
4380#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
4381/* array of supported match types (valid MATCH_FIELDS values for
4382 * MC_CMD_FILTER_OP) sorted in decreasing priority order
4383 */
4384#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
4385#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
4386#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
4387#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
4388
4389
4390/***********************************/
4391/* MC_CMD_PARSER_DISP_RW
4392 * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
4393 */
4394#define MC_CMD_PARSER_DISP_RW 0xe5
4395
4396/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
4397#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
4398/* identifies the target of the operation */
4399#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
4400/* enum: RX dispatcher CPU */
4401#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
4402/* enum: TX dispatcher CPU */
4403#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
4404/* enum: Lookup engine */
4405#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
4406/* identifies the type of operation requested */
4407#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
4408/* enum: read a word of DICPU DMEM or a LUE entry */
4409#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
4410/* enum: write a word of DICPU DMEM or a LUE entry */
4411#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
4412/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
4413#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
4414/* data memory address or LUE index */
4415#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
4416/* value to write (for DMEM writes) */
4417#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
4418/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
4419#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
4420/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
4421#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
4422/* value to write (for LUE writes) */
4423#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
4424#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
4425
4426/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
4427#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
4428/* value read (for DMEM reads) */
4429#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
4430/* value read (for LUE reads) */
4431#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
4432#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
4433/* up to 8 32-bit words of additional soft state from the LUE manager (the
4434 * exact content is firmware-dependent and intended only for debug use)
4435 */
4436#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
4437#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
4438
4439
4440/***********************************/
4441/* MC_CMD_GET_PF_COUNT
4442 * Get number of PFs on the device.
4443 */
4444#define MC_CMD_GET_PF_COUNT 0xb6
4445
4446/* MC_CMD_GET_PF_COUNT_IN msgrequest */
4447#define MC_CMD_GET_PF_COUNT_IN_LEN 0
4448
4449/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
4450#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
4451/* Identifies the number of PFs on the device. */
4452#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
4453#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
4454
4455
4456/***********************************/
4457/* MC_CMD_SET_PF_COUNT
4458 * Set number of PFs on the device.
4459 */
4460#define MC_CMD_SET_PF_COUNT 0xb7
4461
4462/* MC_CMD_SET_PF_COUNT_IN msgrequest */
4463#define MC_CMD_SET_PF_COUNT_IN_LEN 4
4464/* New number of PFs on the device. */
4465#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
4466
4467/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
4468#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
4469
4470
4471/***********************************/
4472/* MC_CMD_GET_PORT_ASSIGNMENT
4473 * Get port assignment for current PCI function.
4474 */
4475#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
4476
4477/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
4478#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
4479
4480/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
4481#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
4482/* Identifies the port assignment for this function. */
4483#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
4484
4485
4486/***********************************/
4487/* MC_CMD_SET_PORT_ASSIGNMENT
4488 * Set port assignment for current PCI function.
4489 */
4490#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
4491
4492/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
4493#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
4494/* Identifies the port assignment for this function. */
4495#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
4496
4497/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
4498#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
4499
4500
4501/***********************************/
4502/* MC_CMD_ALLOC_VIS
4503 * Allocate VIs for current PCI function.
4504 */
4505#define MC_CMD_ALLOC_VIS 0x8b
4506
4507/* MC_CMD_ALLOC_VIS_IN msgrequest */
4508#define MC_CMD_ALLOC_VIS_IN_LEN 8
4509/* The minimum number of VIs that is acceptable */
4510#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
4511/* The maximum number of VIs that would be useful */
4512#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
4513
4514/* MC_CMD_ALLOC_VIS_OUT msgresponse */
4515#define MC_CMD_ALLOC_VIS_OUT_LEN 8
4516/* The number of VIs allocated on this function */
4517#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
4518/* The base absolute VI number allocated to this function. Required to
4519 * correctly interpret wakeup events.
4520 */
4521#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
4522
4523
4524/***********************************/
4525/* MC_CMD_FREE_VIS
4526 * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
4527 * but not freed.
4528 */
4529#define MC_CMD_FREE_VIS 0x8c
4530
4531/* MC_CMD_FREE_VIS_IN msgrequest */
4532#define MC_CMD_FREE_VIS_IN_LEN 0
4533
4534/* MC_CMD_FREE_VIS_OUT msgresponse */
4535#define MC_CMD_FREE_VIS_OUT_LEN 0
4536
4537
4538/***********************************/
4539/* MC_CMD_GET_SRIOV_CFG
4540 * Get SRIOV config for this PF.
4541 */
4542#define MC_CMD_GET_SRIOV_CFG 0xba
4543
4544/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
4545#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
4546
4547/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
4548#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
4549/* Number of VFs currently enabled. */
4550#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
4551/* Max number of VFs before sriov stride and offset may need to be changed. */
4552#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
4553#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
4554#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
4555#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
4556/* RID offset of first VF from PF. */
4557#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
4558/* RID offset of each subsequent VF from the previous. */
4559#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
4560
4561
4562/***********************************/
4563/* MC_CMD_SET_SRIOV_CFG
4564 * Set SRIOV config for this PF.
4565 */
4566#define MC_CMD_SET_SRIOV_CFG 0xbb
4567
4568/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
4569#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
4570/* Number of VFs currently enabled. */
4571#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
4572/* Max number of VFs before sriov stride and offset may need to be changed. */
4573#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
4574#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
4575#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
4576#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
4577/* RID offset of first VF from PF, or 0 for no change, or
4578 * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
4579 */
4580#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
4581/* RID offset of each subsequent VF from the previous, 0 for no change, or
4582 * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
4583 */
4584#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
4585
4586/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
4587#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
4588
4589
4590/***********************************/
4591/* MC_CMD_GET_VI_ALLOC_INFO
4592 * Get information about number of VI's and base VI number allocated to this
4593 * function.
4594 */
4595#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
4596
4597/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
4598#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
4599
4600/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
4601#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
4602/* The number of VIs allocated on this function */
4603#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
4604/* The base absolute VI number allocated to this function. Required to
4605 * correctly interpret wakeup events.
4606 */
4607#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
4608
4609
4610/***********************************/
4611/* MC_CMD_DUMP_VI_STATE
4612 * For CmdClient use. Dump pertinent information on a specific absolute VI.
4613 */
4614#define MC_CMD_DUMP_VI_STATE 0x8e
4615
4616/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
4617#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
4618/* The VI number to query. */
4619#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
4620
4621/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
4622#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
4623/* The PF part of the function owning this VI. */
4624#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
4625#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
4626/* The VF part of the function owning this VI. */
4627#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
4628#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
4629/* Base of VIs allocated to this function. */
4630#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
4631#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
4632/* Count of VIs allocated to the owner function. */
4633#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
4634#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
4635/* Base interrupt vector allocated to this function. */
4636#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
4637#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
4638/* Number of interrupt vectors allocated to this function. */
4639#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
4640#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
4641/* Raw evq ptr table data. */
4642#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
4643#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
4644#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
4645#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
4646/* Raw evq timer table data. */
4647#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
4648#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
4649#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
4650#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
4651/* Combined metadata field. */
4652#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
4653#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
4654#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
4655#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
4656#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
4657#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
4658#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
4659/* TXDPCPU raw table data for queue. */
4660#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
4661#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
4662#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
4663#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
4664/* TXDPCPU raw table data for queue. */
4665#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
4666#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
4667#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
4668#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
4669/* TXDPCPU raw table data for queue. */
4670#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
4671#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
4672#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
4673#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
4674/* Combined metadata field. */
4675#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
4676#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
4677#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
4678#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
4679#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
4680#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
4681#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
4682#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
4683#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
4684#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
4685#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
4686#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
4687#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
4688#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
4689/* RXDPCPU raw table data for queue. */
4690#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
4691#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
4692#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
4693#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
4694/* RXDPCPU raw table data for queue. */
4695#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
4696#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
4697#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
4698#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
4699/* Reserved, currently 0. */
4700#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
4701#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
4702#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
4703#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
4704/* Combined metadata field. */
4705#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
4706#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
4707#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
4708#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
4709#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
4710#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
4711#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
4712#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
4713#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
4714#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
4715#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
4716#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
4717
4718
4719/***********************************/
4720/* MC_CMD_ALLOC_PIOBUF
4721 * Allocate a push I/O buffer for later use with a tx queue.
4722 */
4723#define MC_CMD_ALLOC_PIOBUF 0x8f
4724
4725/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
4726#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
4727
4728/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
4729#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
4730/* Handle for allocated push I/O buffer. */
4731#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
4732
4733
4734/***********************************/
4735/* MC_CMD_FREE_PIOBUF
4736 * Free a push I/O buffer.
4737 */
4738#define MC_CMD_FREE_PIOBUF 0x90
4739
4740/* MC_CMD_FREE_PIOBUF_IN msgrequest */
4741#define MC_CMD_FREE_PIOBUF_IN_LEN 4
4742/* Handle for allocated push I/O buffer. */
4743#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
4744
4745/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
4746#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
4747
4748
4749/***********************************/
4750/* MC_CMD_GET_VI_TLP_PROCESSING
4751 * Get TLP steering and ordering information for a VI.
4752 */
4753#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
4754
4755/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
4756#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
4757/* VI number to get information for. */
4758#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
4759
4760/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
4761#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
4762/* Transaction processing steering hint 1 for use with the Rx Queue. */
4763#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
4764#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
4765/* Transaction processing steering hint 2 for use with the Ev Queue. */
4766#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
4767#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
4768/* Use Relaxed ordering model for TLPs on this VI. */
4769#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
4770#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
4771/* Use ID based ordering for TLPs on this VI. */
4772#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
4773#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
4774/* Set no snoop bit for TLPs on this VI. */
4775#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
4776#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
4777/* Enable TPH for TLPs on this VI. */
4778#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
4779#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
4780#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
4781
4782
4783/***********************************/
4784/* MC_CMD_SET_VI_TLP_PROCESSING
4785 * Set TLP steering and ordering information for a VI.
4786 */
4787#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
4788
4789/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
4790#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
4791/* VI number to set information for. */
4792#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
4793/* Transaction processing steering hint 1 for use with the Rx Queue. */
4794#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
4795#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
4796/* Transaction processing steering hint 2 for use with the Ev Queue. */
4797#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
4798#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
4799/* Use Relaxed ordering model for TLPs on this VI. */
4800#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
4801#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
4802/* Use ID based ordering for TLPs on this VI. */
4803#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
4804#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
4805/* Set the no snoop bit for TLPs on this VI. */
4806#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
4807#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
4808/* Enable TPH for TLPs on this VI. */
4809#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
4810#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
4811#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
4812
4813/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
4814#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
4815
4816
4817/***********************************/
4818/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
4819 * Get global PCIe steering and transaction processing configuration.
4820 */
4821#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
4822
4823/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
4824#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
4825#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
4826/* enum: MISC. */
4827#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
4828/* enum: IDO. */
4829#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
4830/* enum: RO. */
4831#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
4832/* enum: TPH Type. */
4833#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
4834
4835/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
4836#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
4837#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
4838/* Enum values, see field(s): */
4839/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
4840/* Amalgamated TLP info word. */
4841#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
4842#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
4843#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
4844#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
4845#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
4846#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
4847#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
4848#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
4849#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
4850#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
4851#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
4852#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
4853#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
4854#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
4855#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
4856#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
4857#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
4858#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
4859#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
4860#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
4861#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
4862#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
4863#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
4864#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
4865#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
4866#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
4867#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
4868#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
4869#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
4870#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
4871#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
4872#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
4873#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
4874#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
4875#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
4876
4877
4878/***********************************/
4879/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
4880 * Set global PCIe steering and transaction processing configuration.
4881 */
4882#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
4883
4884/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
4885#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
4886#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
4887/* Enum values, see field(s): */
4888/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
4889/* Amalgamated TLP info word. */
4890#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
4891#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
4892#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
4893#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
4894#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
4895#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
4896#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
4897#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
4898#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
4899#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
4900#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
4901#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
4902#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
4903#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
4904#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
4905#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
4906#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
4907#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
4908#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
4909#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
4910#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
4911#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
4912#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
4913#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
4914#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
4915#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
4916#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
4917#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
4918#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
4919
4920/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
4921#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
4922
4923
4924/***********************************/
4925/* MC_CMD_SATELLITE_DOWNLOAD
4926 * Download a new set of images to the satellite CPUs from the host.
4927 */
4928#define MC_CMD_SATELLITE_DOWNLOAD 0x91
4929
4930/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
4931 * are subtle, and so downloads must proceed in a number of phases.
4932 *
4933 * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
4934 *
4935 * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
4936 * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
4937 * be a checksum (a simple 32-bit sum) of the transferred data. An individual
4938 * download may be aborted using CHUNK_ID_ABORT.
4939 *
4940 * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
4941 * similar to PHASE_IMEMS.
4942 *
4943 * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
4944 *
4945 * After any error (a requested abort is not considered to be an error) the
4946 * sequence must be restarted from PHASE_RESET.
4947 */
4948#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
4949#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
4950#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
4951/* Download phase. (Note: the IDLE phase is used internally and is never valid
4952 * in a command from the host.)
4953 */
4954#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
4955#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
4956#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
4957#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
4958#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
4959#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
4960/* Target for download. (These match the blob numbers defined in
4961 * mc_flash_layout.h.)
4962 */
4963#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
4964/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4965#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
4966/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4967#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
4968/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4969#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
4970/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4971#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
4972/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4973#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
4974/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4975#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
4976/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4977#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
4978/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4979#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
4980/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4981#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
4982/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4983#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
4984/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4985#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
4986/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4987#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
4988/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4989#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
4990/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4991#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
4992/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4993#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
4994/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4995#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
4996/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
4997#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
4998/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
4999#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
5000/* enum: Last chunk, containing checksum rather than data */
5001#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
5002/* enum: Abort download of this item */
5003#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
5004/* Length of this chunk in bytes */
5005#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
5006/* Data for this chunk */
5007#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
5008#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
5009#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
5010#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
5011
5012/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
5013#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
5014/* Same as MC_CMD_ERR field, but included as 0 in success cases */
5015#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
5016/* Extra status information */
5017#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
5018/* enum: Code download OK, completed. */
5019#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
5020/* enum: Code download aborted as requested. */
5021#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
5022/* enum: Code download OK so far, send next chunk. */
5023#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
5024/* enum: Download phases out of sequence */
5025#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
5026/* enum: Bad target for this phase */
5027#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
5028/* enum: Chunk ID out of sequence */
5029#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
5030/* enum: Chunk length zero or too large */
5031#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
5032/* enum: Checksum was incorrect */
5033#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
5034
5035
5036/***********************************/
5037/* MC_CMD_GET_CAPABILITIES
5038 * Get device capabilities.
5039 *
5040 * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
5041 * reference inherent device capabilities as opposed to current NVRAM config.
5042 */
5043#define MC_CMD_GET_CAPABILITIES 0xbe
5044
5045/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
5046#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
5047
5048/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
5049#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
5050/* First word of flags. */
5051#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
5052#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
5053#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
5054#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
5055#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
5056#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
5057#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
5058#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
5059#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
5060#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
5061#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
5062#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
5063#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
5064#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
5065#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
5066#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
5067#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
5068/* RxDPCPU firmware id. */
5069#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
5070#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
5071/* enum: Standard RXDP firmware */
5072#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
5073/* enum: Low latency RXDP firmware */
5074#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
5075/* enum: RXDP Test firmware image 1 */
5076#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
5077/* enum: RXDP Test firmware image 2 */
5078#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
5079/* enum: RXDP Test firmware image 3 */
5080#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
5081/* enum: RXDP Test firmware image 4 */
5082#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
5083/* enum: RXDP Test firmware image 5 */
5084#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
5085/* enum: RXDP Test firmware image 6 */
5086#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
5087/* enum: RXDP Test firmware image 7 */
5088#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
5089/* enum: RXDP Test firmware image 8 */
5090#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
5091/* TxDPCPU firmware id. */
5092#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
5093#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
5094/* enum: Standard TXDP firmware */
5095#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
5096/* enum: Low latency TXDP firmware */
5097#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
5098/* enum: TXDP Test firmware image 1 */
5099#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
5100/* enum: TXDP Test firmware image 2 */
5101#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
5102#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
5103#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
5104#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
5105#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
5106#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
5107#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
5108#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
5109#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
5110#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
5111#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
5112#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
5113#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
5114#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
5115#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
5116#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
5117#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
5118#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
5119#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
5120#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
5121#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
5122#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
5123#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
5124/* Hardware capabilities of NIC */
5125#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
5126/* Licensed capabilities */
5127#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
5128
5129
5130/***********************************/
5131/* MC_CMD_V2_EXTN
5132 * Encapsulation for a v2 extended command
5133 */
5134#define MC_CMD_V2_EXTN 0x7f
5135
5136/* MC_CMD_V2_EXTN_IN msgrequest */
5137#define MC_CMD_V2_EXTN_IN_LEN 4
5138/* the extended command number */
5139#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
5140#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
5141#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
5142#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
5143/* the actual length of the encapsulated command (which is not in the v1
5144 * header)
5145 */
5146#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
5147#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
5148#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
5149#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
5150
5151
5152/***********************************/
5153/* MC_CMD_TCM_BUCKET_ALLOC
5154 * Allocate a pacer bucket (for qau rp or a snapper test)
5155 */
5156#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
5157
5158/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
5159#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
5160
5161/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
5162#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
5163/* the bucket id */
5164#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
5165
5166
5167/***********************************/
5168/* MC_CMD_TCM_BUCKET_FREE
5169 * Free a pacer bucket
5170 */
5171#define MC_CMD_TCM_BUCKET_FREE 0xb3
5172
5173/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
5174#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
5175/* the bucket id */
5176#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
5177
5178/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
5179#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
5180
5181
5182/***********************************/
5183/* MC_CMD_TCM_BUCKET_INIT
5184 * Initialise pacer bucket with a given rate
5185 */
5186#define MC_CMD_TCM_BUCKET_INIT 0xb4
5187
5188/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
5189#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
5190/* the bucket id */
5191#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
5192/* the rate in mbps */
5193#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
5194
5195/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
5196#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
5197
5198
5199/***********************************/
5200/* MC_CMD_TCM_TXQ_INIT
5201 * Initialise txq in pacer with given options or set options
5202 */
5203#define MC_CMD_TCM_TXQ_INIT 0xb5
5204
5205/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
5206#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
5207/* the txq id */
5208#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
5209/* the static priority associated with the txq */
5210#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
5211/* bitmask of the priority queues this txq is inserted into */
5212#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
5213/* the reaction point (RP) bucket */
5214#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
5215/* an already reserved bucket (typically set to bucket associated with outer
5216 * vswitch)
5217 */
5218#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
5219/* an already reserved bucket (typically set to bucket associated with inner
5220 * vswitch)
5221 */
5222#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
5223/* the min bucket (typically for ETS/minimum bandwidth) */
5224#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
5225
5226/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
5227#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
5228
5229
5230/***********************************/
5231/* MC_CMD_LINK_PIOBUF
5232 * Link a push I/O buffer to a TxQ
5233 */
5234#define MC_CMD_LINK_PIOBUF 0x92
5235
5236/* MC_CMD_LINK_PIOBUF_IN msgrequest */
5237#define MC_CMD_LINK_PIOBUF_IN_LEN 8
5238/* Handle for allocated push I/O buffer. */
5239#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
5240/* Function Local Instance (VI) number. */
5241#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
5242
5243/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
5244#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
5245
5246
5247/***********************************/
5248/* MC_CMD_UNLINK_PIOBUF
5249 * Unlink a push I/O buffer from a TxQ
5250 */
5251#define MC_CMD_UNLINK_PIOBUF 0x93
5252
5253/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
5254#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
5255/* Function Local Instance (VI) number. */
5256#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
5257
5258/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
5259#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
5260
5261
5262/***********************************/
5263/* MC_CMD_VSWITCH_ALLOC
5264 * allocate and initialise a v-switch.
5265 */
5266#define MC_CMD_VSWITCH_ALLOC 0x94
5267
5268/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
5269#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
5270/* The port to connect to the v-switch's upstream port. */
5271#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5272/* The type of v-switch to create. */
5273#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
5274/* enum: VLAN */
5275#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
5276/* enum: VEB */
5277#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
5278/* enum: VEPA */
5279#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
5280/* Flags controlling v-port creation */
5281#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
5282#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
5283#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
5284/* The number of VLAN tags to support. */
5285#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
5286
5287/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
5288#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
5289
5290
5291/***********************************/
5292/* MC_CMD_VSWITCH_FREE
5293 * de-allocate a v-switch.
5294 */
5295#define MC_CMD_VSWITCH_FREE 0x95
5296
5297/* MC_CMD_VSWITCH_FREE_IN msgrequest */
5298#define MC_CMD_VSWITCH_FREE_IN_LEN 4
5299/* The port to which the v-switch is connected. */
5300#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
5301
5302/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
5303#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
5304
5305
5306/***********************************/
5307/* MC_CMD_VPORT_ALLOC
5308 * allocate a v-port.
5309 */
5310#define MC_CMD_VPORT_ALLOC 0x96
5311
5312/* MC_CMD_VPORT_ALLOC_IN msgrequest */
5313#define MC_CMD_VPORT_ALLOC_IN_LEN 20
5314/* The port to which the v-switch is connected. */
5315#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5316/* The type of the new v-port. */
5317#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
5318/* enum: VLAN (obsolete) */
5319#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
5320/* enum: VEB (obsolete) */
5321#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
5322/* enum: VEPA (obsolete) */
5323#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
5324/* enum: A normal v-port receives packets which match a specified MAC and/or
5325 * VLAN.
5326 */
5327#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
5328/* enum: An expansion v-port packets traffic which don't match any other
5329 * v-port.
5330 */
5331#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
5332/* enum: An test v-port receives packets which match any filters installed by
5333 * its downstream components.
5334 */
5335#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
5336/* Flags controlling v-port creation */
5337#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
5338#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
5339#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
5340/* The number of VLAN tags to insert/remove. */
5341#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
5342/* The actual VLAN tags to insert/remove */
5343#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
5344#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
5345#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
5346#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
5347#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
5348
5349/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
5350#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
5351/* The handle of the new v-port */
5352#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
5353
5354
5355/***********************************/
5356/* MC_CMD_VPORT_FREE
5357 * de-allocate a v-port.
5358 */
5359#define MC_CMD_VPORT_FREE 0x97
5360
5361/* MC_CMD_VPORT_FREE_IN msgrequest */
5362#define MC_CMD_VPORT_FREE_IN_LEN 4
5363/* The handle of the v-port */
5364#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
5365
5366/* MC_CMD_VPORT_FREE_OUT msgresponse */
5367#define MC_CMD_VPORT_FREE_OUT_LEN 0
5368
5369
5370/***********************************/
5371/* MC_CMD_VADAPTOR_ALLOC
5372 * allocate a v-adaptor.
5373 */
5374#define MC_CMD_VADAPTOR_ALLOC 0x98
5375
5376/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
5377#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
5378/* The port to connect to the v-adaptor's port. */
5379#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5380/* Flags controlling v-adaptor creation */
5381#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
5382#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
5383#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
5384/* The number of VLAN tags to strip on receive */
5385#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
5386
5387/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
5388#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
5389
5390
5391/***********************************/
5392/* MC_CMD_VADAPTOR_FREE
5393 * de-allocate a v-adaptor.
5394 */
5395#define MC_CMD_VADAPTOR_FREE 0x99
5396
5397/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
5398#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
5399/* The port to which the v-adaptor is connected. */
5400#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
5401
5402/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
5403#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
5404
5405
5406/***********************************/
5407/* MC_CMD_EVB_PORT_ASSIGN
5408 * assign a port to a PCI function.
5409 */
5410#define MC_CMD_EVB_PORT_ASSIGN 0x9a
5411
5412/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
5413#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
5414/* The port to assign. */
5415#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
5416/* The target function to modify. */
5417#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
5418#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
5419#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
5420#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
5421#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
5422
5423/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
5424#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
5425
5426
5427/***********************************/
5428/* MC_CMD_RDWR_A64_REGIONS
5429 * Assign the 64 bit region addresses.
5430 */
5431#define MC_CMD_RDWR_A64_REGIONS 0x9b
5432
5433/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
5434#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
5435#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
5436#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
5437#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
5438#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
5439/* Write enable bits 0-3, set to write, clear to read. */
5440#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
5441#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
5442#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
5443#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
5444
5445/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
5446 * regardless of state of write bits in the request.
5447 */
5448#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
5449#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
5450#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
5451#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
5452#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
5453
5454
5455/***********************************/
5456/* MC_CMD_ONLOAD_STACK_ALLOC
5457 * Allocate an Onload stack ID.
5458 */
5459#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
5460
5461/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
5462#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
5463/* The handle of the owning upstream port */
5464#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5465
5466/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
5467#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
5468/* The handle of the new Onload stack */
5469#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
5470
5471
5472/***********************************/
5473/* MC_CMD_ONLOAD_STACK_FREE
5474 * Free an Onload stack ID.
5475 */
5476#define MC_CMD_ONLOAD_STACK_FREE 0x9d
5477
5478/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
5479#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
5480/* The handle of the Onload stack */
5481#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
5482
5483/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
5484#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
5485
5486
5487/***********************************/
5488/* MC_CMD_RSS_CONTEXT_ALLOC
5489 * Allocate an RSS context.
5490 */
5491#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
5492
5493/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
5494#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
5495/* The handle of the owning upstream port */
5496#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5497/* The type of context to allocate */
5498#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
5499/* enum: Allocate a context for exclusive use. The key and indirection table
5500 * must be explicitly configured.
5501 */
5502#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
5503/* enum: Allocate a context for shared use; this will spread across a range of
5504 * queues, but the key and indirection table are pre-configured and may not be
5505 * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
5506 */
5507#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
5508/* Number of queues spanned by this context, in the range 1-64; valid offsets
5509 * in the indirection table will be in the range 0 to NUM_QUEUES-1.
5510 */
5511#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
5512
5513/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
5514#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
5515/* The handle of the new RSS context */
5516#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
5517
5518
5519/***********************************/
5520/* MC_CMD_RSS_CONTEXT_FREE
5521 * Free an RSS context.
5522 */
5523#define MC_CMD_RSS_CONTEXT_FREE 0x9f
5524
5525/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
5526#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
5527/* The handle of the RSS context */
5528#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
5529
5530/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
5531#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
5532
5533
5534/***********************************/
5535/* MC_CMD_RSS_CONTEXT_SET_KEY
5536 * Set the Toeplitz hash key for an RSS context.
5537 */
5538#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
5539
5540/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
5541#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
5542/* The handle of the RSS context */
5543#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
5544/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
5545#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
5546#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
5547
5548/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
5549#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
5550
5551
5552/***********************************/
5553/* MC_CMD_RSS_CONTEXT_GET_KEY
5554 * Get the Toeplitz hash key for an RSS context.
5555 */
5556#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
5557
5558/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
5559#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
5560/* The handle of the RSS context */
5561#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
5562
5563/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
5564#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
5565/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
5566#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
5567#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
5568
5569
5570/***********************************/
5571/* MC_CMD_RSS_CONTEXT_SET_TABLE
5572 * Set the indirection table for an RSS context.
5573 */
5574#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
5575
5576/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
5577#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
5578/* The handle of the RSS context */
5579#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
5580/* The 128-byte indirection table (1 byte per entry) */
5581#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
5582#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
5583
5584/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
5585#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
5586
5587
5588/***********************************/
5589/* MC_CMD_RSS_CONTEXT_GET_TABLE
5590 * Get the indirection table for an RSS context.
5591 */
5592#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
5593
5594/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
5595#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
5596/* The handle of the RSS context */
5597#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
5598
5599/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
5600#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
5601/* The 128-byte indirection table (1 byte per entry) */
5602#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
5603#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
5604
5605
5606/***********************************/
5607/* MC_CMD_RSS_CONTEXT_SET_FLAGS
5608 * Set various control flags for an RSS context.
5609 */
5610#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
5611
5612/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
5613#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
5614/* The handle of the RSS context */
5615#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
5616/* Hash control flags */
5617#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
5618#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
5619#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
5620#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
5621#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
5622#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
5623#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
5624#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
5625#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
5626
5627/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
5628#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
5629
5630
5631/***********************************/
5632/* MC_CMD_RSS_CONTEXT_GET_FLAGS
5633 * Get various control flags for an RSS context.
5634 */
5635#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
5636
5637/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
5638#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
5639/* The handle of the RSS context */
5640#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
5641
5642/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
5643#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
5644/* Hash control flags */
5645#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
5646#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
5647#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
5648#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
5649#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
5650#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
5651#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
5652#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
5653#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
5654
5655
5656/***********************************/
5657/* MC_CMD_DOT1P_MAPPING_ALLOC
5658 * Allocate a .1p mapping.
5659 */
5660#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
5661
5662/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
5663#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
5664/* The handle of the owning upstream port */
5665#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5666/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
5667 * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
5668 * referenced RSS contexts must span no more than this number.
5669 */
5670#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
5671
5672/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
5673#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
5674/* The handle of the new .1p mapping */
5675#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
5676
5677
5678/***********************************/
5679/* MC_CMD_DOT1P_MAPPING_FREE
5680 * Free a .1p mapping.
5681 */
5682#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
5683
5684/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
5685#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
5686/* The handle of the .1p mapping */
5687#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
5688
5689/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
5690#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
5691
5692
5693/***********************************/
5694/* MC_CMD_DOT1P_MAPPING_SET_TABLE
5695 * Set the mapping table for a .1p mapping.
5696 */
5697#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
5698
5699/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
5700#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
5701/* The handle of the .1p mapping */
5702#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
5703/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
5704 * handle)
5705 */
5706#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
5707#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
5708
5709/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
5710#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
5711
5712
5713/***********************************/
5714/* MC_CMD_DOT1P_MAPPING_GET_TABLE
5715 * Get the mapping table for a .1p mapping.
5716 */
5717#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
5718
5719/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
5720#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
5721/* The handle of the .1p mapping */
5722#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
5723
5724/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
5725#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
5726/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
5727 * handle)
5728 */
5729#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
5730#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
5731
5732
5733/***********************************/
5734/* MC_CMD_GET_VECTOR_CFG
5735 * Get Interrupt Vector config for this PF.
5736 */
5737#define MC_CMD_GET_VECTOR_CFG 0xbf
5738
5739/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
5740#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
5741
5742/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
5743#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
5744/* Base absolute interrupt vector number. */
5745#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
5746/* Number of interrupt vectors allocate to this PF. */
5747#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
5748/* Number of interrupt vectors to allocate per VF. */
5749#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
5750
5751
5752/***********************************/
5753/* MC_CMD_SET_VECTOR_CFG
5754 * Set Interrupt Vector config for this PF.
5755 */
5756#define MC_CMD_SET_VECTOR_CFG 0xc0
5757
5758/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
5759#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
5760/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
5761 * let the system find a suitable base.
5762 */
5763#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
5764/* Number of interrupt vectors allocate to this PF. */
5765#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
5766/* Number of interrupt vectors to allocate per VF. */
5767#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
5768
5769/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
5770#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
5771
5772
5773/***********************************/
5774/* MC_CMD_RMON_RX_CLASS_STATS
5775 * Retrieve rmon rx class statistics
5776 */
5777#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
5778
5779/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
5780#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
5781/* flags */
5782#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
5783#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
5784#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
5785#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
5786#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
5787
5788/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
5789#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
5790#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
5791#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5792/* Array of stats */
5793#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
5794#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
5795#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
5796#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5797
5798
5799/***********************************/
5800/* MC_CMD_RMON_TX_CLASS_STATS
5801 * Retrieve rmon tx class statistics
5802 */
5803#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
5804
5805/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
5806#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
5807/* flags */
5808#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
5809#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
5810#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
5811#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
5812#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
5813
5814/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
5815#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
5816#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
5817#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5818/* Array of stats */
5819#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
5820#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
5821#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
5822#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5823
5824
5825/***********************************/
5826/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
5827 * Retrieve rmon rx super_class statistics
5828 */
5829#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
5830
5831/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
5832#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
5833/* flags */
5834#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
5835#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
5836#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
5837#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
5838#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
5839
5840/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
5841#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
5842#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
5843#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5844/* Array of stats */
5845#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
5846#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
5847#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
5848#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5849
5850
5851/***********************************/
5852/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
5853 * Retrieve rmon tx super_class statistics
5854 */
5855#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
5856
5857/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
5858#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
5859/* flags */
5860#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
5861#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
5862#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
5863#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
5864#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
5865
5866/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
5867#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
5868#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
5869#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5870/* Array of stats */
5871#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
5872#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
5873#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
5874#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5875
5876
5877/***********************************/
5878/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
5879 * Add qid to class for statistics collection
5880 */
5881#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
5882
5883/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
5884#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
5885/* class */
5886#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
5887/* qid */
5888#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
5889/* flags */
5890#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
5891#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
5892#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
5893#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
5894#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
5895#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
5896#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
5897
5898/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
5899#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
5900
5901
5902/***********************************/
5903/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
5904 * Add qid to class for statistics collection
5905 */
5906#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
5907
5908/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
5909#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
5910/* class */
5911#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
5912/* qid */
5913#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
5914/* flags */
5915#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
5916#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
5917#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
5918#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
5919#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
5920#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
5921#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
5922
5923/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
5924#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
5925
5926
5927/***********************************/
5928/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
5929 * Add qid to class for statistics collection
5930 */
5931#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
5932
5933/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
5934#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
5935/* class */
5936#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
5937/* qid */
5938#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
5939/* flags */
5940#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
5941#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
5942#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
5943#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
5944#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
5945#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
5946#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
5947
5948/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
5949#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
5950
5951
5952/***********************************/
5953/* MC_CMD_RMON_ALLOC_CLASS
5954 * Allocate an rmon class
5955 */
5956#define MC_CMD_RMON_ALLOC_CLASS 0xca
5957
5958/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
5959#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
5960
5961/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
5962#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
5963/* class */
5964#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
5965
5966
5967/***********************************/
5968/* MC_CMD_RMON_DEALLOC_CLASS
5969 * Deallocate an rmon class
5970 */
5971#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
5972
5973/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
5974#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
5975/* class */
5976#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
5977
5978/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
5979#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
5980
5981
5982/***********************************/
5983/* MC_CMD_RMON_ALLOC_SUPER_CLASS
5984 * Allocate an rmon super_class
5985 */
5986#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
5987
5988/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
5989#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
5990
5991/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
5992#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
5993/* super_class */
5994#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
5995
5996
5997/***********************************/
5998/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
5999 * Deallocate an rmon tx super_class
6000 */
6001#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
6002
6003/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
6004#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
6005/* super_class */
6006#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
6007
6008/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
6009#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
6010
6011
6012/***********************************/
6013/* MC_CMD_RMON_RX_UP_CONV_STATS
6014 * Retrieve up converter statistics
6015 */
6016#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
6017
6018/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
6019#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
6020/* flags */
6021#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
6022#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
6023#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
6024#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
6025#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
6026
6027/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
6028#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
6029#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
6030#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
6031/* Array of stats */
6032#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
6033#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
6034#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
6035#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
6036
6037
6038/***********************************/
6039/* MC_CMD_RMON_RX_IPI_STATS
6040 * Retrieve rx ipi stats
6041 */
6042#define MC_CMD_RMON_RX_IPI_STATS 0xcf
6043
6044/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
6045#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
6046/* flags */
6047#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
6048#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
6049#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
6050#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
6051#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
6052
6053/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
6054#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
6055#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
6056#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
6057/* Array of stats */
6058#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
6059#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
6060#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
6061#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
6062
6063
6064/***********************************/
6065/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
6066 * Retrieve rx ipsec cntxt_ptr indexed stats
6067 */
6068#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
6069
6070/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
6071#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
6072/* flags */
6073#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
6074#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
6075#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
6076#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
6077#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
6078
6079/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
6080#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
6081#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
6082#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
6083/* Array of stats */
6084#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
6085#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
6086#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
6087#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
6088
6089
6090/***********************************/
6091/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
6092 * Retrieve rx ipsec port indexed stats
6093 */
6094#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
6095
6096/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
6097#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
6098/* flags */
6099#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
6100#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
6101#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
6102#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
6103#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
6104
6105/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
6106#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
6107#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
6108#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
6109/* Array of stats */
6110#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
6111#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
6112#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
6113#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
6114
6115
6116/***********************************/
6117/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
6118 * Retrieve tx ipsec overflow
6119 */
6120#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
6121
6122/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
6123#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
6124/* flags */
6125#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
6126#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
6127#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
6128#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
6129#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
6130
6131/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
6132#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
6133#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
6134#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
6135/* Array of stats */
6136#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
6137#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
6138#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
6139#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
6140
6141
6142/***********************************/
6143/* MC_CMD_VPORT_ADD_MAC_ADDRESS
6144 * Add a MAC address to a v-port
6145 */
6146#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
6147
6148/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
6149#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
6150/* The handle of the v-port */
6151#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
6152/* MAC address to add */
6153#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
6154#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
6155
6156/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
6157#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
6158
6159
6160/***********************************/
6161/* MC_CMD_VPORT_DEL_MAC_ADDRESS
6162 * Delete a MAC address from a v-port
6163 */
6164#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
6165
6166/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
6167#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
6168/* The handle of the v-port */
6169#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
6170/* MAC address to add */
6171#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
6172#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
6173
6174/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
6175#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
6176
6177
6178/***********************************/
6179/* MC_CMD_VPORT_GET_MAC_ADDRESSES
6180 * Delete a MAC address from a v-port
6181 */
6182#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
6183
6184/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
6185#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
6186/* The handle of the v-port */
6187#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
6188
6189/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
6190#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
6191#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
6192#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
6193/* The number of MAC addresses returned */
6194#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
6195/* Array of MAC addresses */
6196#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
6197#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
6198#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
6199#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
6200
6201
6202/***********************************/
6203/* MC_CMD_DUMP_BUFTBL_ENTRIES
6204 * Dump buffer table entries, mainly for command client debug use. Dumps
6205 * absolute entries, and does not use chunk handles. All entries must be in
6206 * range, and used for q page mapping, Although the latter restriction may be
6207 * lifted in future.
6208 */
6209#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
6210
6211/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
6212#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
6213/* Index of the first buffer table entry. */
6214#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
6215/* Number of buffer table entries to dump. */
6216#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
6217
6218/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
6219#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
6220#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
6221#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
6222/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
6223#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
6224#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
6225#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
6226#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
6227
6228
6229/***********************************/
6230/* MC_CMD_SET_RXDP_CONFIG
6231 * Set global RXDP configuration settings
6232 */
6233#define MC_CMD_SET_RXDP_CONFIG 0xc1
6234
6235/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
6236#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
6237#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
6238#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
6239#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
6240
6241/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
6242#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
6243
6244
6245/***********************************/
6246/* MC_CMD_GET_RXDP_CONFIG
6247 * Get global RXDP configuration settings
6248 */
6249#define MC_CMD_GET_RXDP_CONFIG 0xc2
6250
6251/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
6252#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
6253
6254/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
6255#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
6256#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
6257#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
6258#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
6259
6260
6261/***********************************/
6262/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
6263 * Retrieve rx class drop stats
6264 */
6265#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
6266
6267/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
6268#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
6269/* flags */
6270#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
6271#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
6272#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
6273#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
6274#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
6275
6276/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
6277#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
6278#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
6279#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
6280/* Array of stats */
6281#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
6282#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
6283#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
6284#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
6285
6286
6287/***********************************/
6288/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
6289 * Retrieve rx super class drop stats
6290 */
6291#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
6292
6293/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
6294#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
6295/* flags */
6296#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
6297#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
6298#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
6299#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
6300#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
6301
6302/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
6303#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
6304#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
6305#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
6306/* Array of stats */
6307#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
6308#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
6309#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
6310#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
6311
6312
6313/***********************************/
6314/* MC_CMD_RMON_RX_ERRORS_STATS
6315 * Retrieve rxdp errors
6316 */
6317#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
6318
6319/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
6320#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
6321/* flags */
6322#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
6323#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
6324#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
6325#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
6326#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
6327
6328/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
6329#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
6330#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
6331#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
6332/* Array of stats */
6333#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
6334#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
6335#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
6336#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
6337
6338
6339/***********************************/
6340/* MC_CMD_RMON_RX_OVERFLOW_STATS
6341 * Retrieve rxdp overflow
6342 */
6343#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
6344
6345/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
6346#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
6347/* flags */
6348#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
6349#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
6350#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
6351#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
6352#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
6353
6354/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
6355#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
6356#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
6357#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
6358/* Array of stats */
6359#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
6360#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
6361#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
6362#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
6363
6364
6365/***********************************/
6366/* MC_CMD_RMON_TX_IPI_STATS
6367 * Retrieve tx ipi stats
6368 */
6369#define MC_CMD_RMON_TX_IPI_STATS 0xd7
6370
6371/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
6372#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
6373/* flags */
6374#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
6375#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
6376#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
6377#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
6378#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
6379
6380/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
6381#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
6382#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
6383#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
6384/* Array of stats */
6385#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
6386#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
6387#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
6388#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
6389
6390
6391/***********************************/
6392/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
6393 * Retrieve tx ipsec counters by cntxt_ptr
6394 */
6395#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
6396
6397/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
6398#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
6399/* flags */
6400#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
6401#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
6402#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
6403#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
6404#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
6405
6406/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
6407#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
6408#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
6409#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
6410/* Array of stats */
6411#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
6412#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
6413#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
6414#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
6415
6416
6417/***********************************/
6418/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
6419 * Retrieve tx ipsec counters by port
6420 */
6421#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
6422
6423/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
6424#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
6425/* flags */
6426#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
6427#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
6428#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
6429#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
6430#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
6431
6432/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
6433#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
6434#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
6435#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
6436/* Array of stats */
6437#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
6438#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
6439#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
6440#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
6441
6442
6443/***********************************/
6444/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
6445 * Retrieve tx ipsec overflow
6446 */
6447#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
6448
6449/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
6450#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
6451/* flags */
6452#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
6453#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
6454#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
6455#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
6456#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
6457
6458/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
6459#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
6460#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
6461#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
6462/* Array of stats */
6463#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
6464#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
6465#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
6466#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
6467
6468
6469/***********************************/
6470/* MC_CMD_RMON_TX_NOWHERE_STATS
6471 * Retrieve tx nowhere stats
6472 */
6473#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
6474
6475/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
6476#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
6477/* flags */
6478#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
6479#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
6480#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
6481#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
6482#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
6483
6484/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
6485#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
6486#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
6487#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
6488/* Array of stats */
6489#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
6490#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
6491#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
6492#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
6493
6494
6495/***********************************/
6496/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
6497 * Retrieve tx nowhere qbb stats
6498 */
6499#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
6500
6501/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
6502#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
6503/* flags */
6504#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
6505#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
6506#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
6507#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
6508#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
6509
6510/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
6511#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
6512#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
6513#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
6514/* Array of stats */
6515#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
6516#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
6517#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
6518#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
6519
6520
6521/***********************************/
6522/* MC_CMD_RMON_TX_ERRORS_STATS
6523 * Retrieve rxdp errors
6524 */
6525#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
6526
6527/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
6528#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
6529/* flags */
6530#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
6531#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
6532#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
6533#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
6534#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
6535
6536/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
6537#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
6538#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
6539#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
6540/* Array of stats */
6541#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
6542#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
6543#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
6544#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
6545
6546
6547/***********************************/
6548/* MC_CMD_RMON_TX_OVERFLOW_STATS
6549 * Retrieve rxdp overflow
6550 */
6551#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
6552
6553/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
6554#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
6555/* flags */
6556#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
6557#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
6558#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
6559#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
6560#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
6561
6562/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
6563#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
6564#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
6565#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
6566/* Array of stats */
6567#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
6568#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
6569#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
6570#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
6571
6572
6573/***********************************/
6574/* MC_CMD_RMON_COLLECT_CLASS_STATS
6575 * Explicitly collect class stats at the specified evb port
6576 */
6577#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
6578
6579/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
6580#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
6581/* The port id associated with the vport/pport at which to collect class stats
6582 */
6583#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
6584
6585/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
6586#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
6587/* class */
6588#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
6589
6590
6591/***********************************/
6592/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
6593 * Explicitly collect class stats at the specified evb port
6594 */
6595#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
6596
6597/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
6598#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
6599/* The port id associated with the vport/pport at which to collect class stats
6600 */
6601#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
6602
6603/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
6604#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
6605/* super_class */
6606#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
6607
6608
6609/***********************************/
6610/* MC_CMD_GET_CLOCK
6611 * Return the system and PDCPU clock frequencies.
6612 */
6613#define MC_CMD_GET_CLOCK 0xac
6614
6615/* MC_CMD_GET_CLOCK_IN msgrequest */
6616#define MC_CMD_GET_CLOCK_IN_LEN 0
6617
6618/* MC_CMD_GET_CLOCK_OUT msgresponse */
6619#define MC_CMD_GET_CLOCK_OUT_LEN 8
6620/* System frequency, MHz */
6621#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
6622/* DPCPU frequency, MHz */
6623#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
6624
6625
6626/***********************************/
6627/* MC_CMD_SET_CLOCK
6628 * Control the system and DPCPU clock frequencies. Changes are lost reboot.
6629 */
6630#define MC_CMD_SET_CLOCK 0xad
6631
6632/* MC_CMD_SET_CLOCK_IN msgrequest */
6633#define MC_CMD_SET_CLOCK_IN_LEN 12
6634/* Requested system frequency in MHz; 0 leaves unchanged. */
6635#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
6636/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
6637#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
6638/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
6639#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
6640
6641/* MC_CMD_SET_CLOCK_OUT msgresponse */
6642#define MC_CMD_SET_CLOCK_OUT_LEN 12
6643/* Resulting system frequency in MHz */
6644#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
6645/* Resulting inter-core frequency in MHz */
6646#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
6647/* Resulting DPCPU frequency in MHz */
6648#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
6649
6650
6651/***********************************/
6652/* MC_CMD_DPCPU_RPC
6653 * Send an arbitrary DPCPU message.
6654 */
6655#define MC_CMD_DPCPU_RPC 0xae
6656
6657/* MC_CMD_DPCPU_RPC_IN msgrequest */
6658#define MC_CMD_DPCPU_RPC_IN_LEN 36
6659#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
6660/* enum: RxDPCPU */
6661#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
6662/* enum: TxDPCPU0 */
6663#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
6664/* enum: TxDPCPU1 */
6665#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
6666/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
6667 * initialised to zero
6668 */
6669#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
6670#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
6671#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
6672#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
6673#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
6674#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
6675#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
6676#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
6677#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
6678#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
6679#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
6680#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
6681#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
6682#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
6683#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
6684#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
6685#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
6686#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
6687#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
6688#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
6689#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
6690#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
6691#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
6692#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
6693#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
6694#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
6695#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
6696#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
6697#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
6698#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
6699#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
6700#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
6701#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
6702#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
6703#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
6704#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
6705#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
6706#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
6707#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
6708#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
6709#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
6710#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
6711#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
6712/* Register data to write. Only valid in write/write-read. */
6713#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
6714/* Register address. */
6715#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
6716
6717/* MC_CMD_DPCPU_RPC_OUT msgresponse */
6718#define MC_CMD_DPCPU_RPC_OUT_LEN 36
6719#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
6720/* DATA */
6721#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
6722#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
6723#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
6724#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
6725#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
6726#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
6727#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
6728#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
6729#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
6730#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
6731#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
6732#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
6733
6734
6735/***********************************/
6736/* MC_CMD_TRIGGER_INTERRUPT
6737 * Trigger an interrupt by prodding the BIU.
6738 */
6739#define MC_CMD_TRIGGER_INTERRUPT 0xe3
6740
6741/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
6742#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
6743/* Interrupt level relative to base for function. */
6744#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
6745
6746/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
6747#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
6748
6749
6750/***********************************/
6751/* MC_CMD_DUMP_DO
6752 * Take a dump of the DUT state
6753 */
6754#define MC_CMD_DUMP_DO 0xe8
6755
6756/* MC_CMD_DUMP_DO_IN msgrequest */
6757#define MC_CMD_DUMP_DO_IN_LEN 52
6758#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
6759#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
6760#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
6761#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
6762#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
6763#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
6764#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
6765#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
6766#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
6767#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
6768#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
6769#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
6770#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
6771#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
6772#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
6773#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
6774#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
6775#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
6776#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
6777#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
6778#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
6779#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
6780#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
6781#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
6782/* Enum values, see field(s): */
6783/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
6784#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
6785#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
6786#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
6787#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
6788#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
6789#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
6790#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
6791#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
6792#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
6793
6794/* MC_CMD_DUMP_DO_OUT msgresponse */
6795#define MC_CMD_DUMP_DO_OUT_LEN 4
6796#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
6797
6798
6799/***********************************/
6800/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
6801 * Configure unsolicited dumps
6802 */
6803#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
6804
6805/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
6806#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
6807#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
6808#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
6809/* Enum values, see field(s): */
6810/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
6811#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
6812/* Enum values, see field(s): */
6813/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
6814#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
6815#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
6816#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
6817#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
6818#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
6819#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
6820#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
6821#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
6822#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
6823#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
6824/* Enum values, see field(s): */
6825/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
6826#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
6827/* Enum values, see field(s): */
6828/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
6829#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
6830#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
6831#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
6832#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
6833#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
6834#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
6835#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
6836#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
6837#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
6838
6839
6840/***********************************/
6841/* MC_CMD_SET_PSU
6842 * Adjusts power supply parameters. This is a warranty-voiding operation.
6843 * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
6844 * the parameter is out of range.
6845 */
6846#define MC_CMD_SET_PSU 0xea
6847
6848/* MC_CMD_SET_PSU_IN msgrequest */
6849#define MC_CMD_SET_PSU_IN_LEN 12
6850#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
6851#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
6852#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
6853#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
6854#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
6855/* desired value, eg voltage in mV */
6856#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
6857
6858/* MC_CMD_SET_PSU_OUT msgresponse */
6859#define MC_CMD_SET_PSU_OUT_LEN 0
6860
6861
6862/***********************************/
6863/* MC_CMD_GET_FUNCTION_INFO
6864 * Get function information. PF and VF number.
6865 */
6866#define MC_CMD_GET_FUNCTION_INFO 0xec
6867
6868/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
6869#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
6870
6871/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
6872#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
6873#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
6874#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
6875
6876
6877/***********************************/
6878/* MC_CMD_ENABLE_OFFLINE_BIST
6879 * Enters offline BIST mode. All queues are torn down, chip enters quiescent
6880 * mode, calling function gets exclusive MCDI ownership. The only way out is
6881 * reboot.
6882 */
6883#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
6884
6885/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
6886#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
6887
6888/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
6889#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
6890
6891
6892/***********************************/
6893/* MC_CMD_START_KR_EYE_PLOT
6894 * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
6895 * signal.
6896 */
6897#define MC_CMD_START_KR_EYE_PLOT 0xee
6898
6899/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
6900#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
6901#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
6902
6903/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
6904#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
6905
6906
6907/***********************************/
6908/* MC_CMD_POLL_KR_EYE_PLOT
6909 * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
6910 * should call this command repeatedly after starting eye plot, until no more
6911 * data is returned.
6912 */
6913#define MC_CMD_POLL_KR_EYE_PLOT 0xef
6914
6915/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
6916#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
6917
6918/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
6919#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
6920#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
6921#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
6922#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
6923#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
6924#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
6925#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
6926
6927
6928/***********************************/
6929/* MC_CMD_READ_FUSES
6930 * Read data programmed into the device One-Time-Programmable (OTP) Fuses
6931 */
6932#define MC_CMD_READ_FUSES 0xf0
6933
6934/* MC_CMD_READ_FUSES_IN msgrequest */
6935#define MC_CMD_READ_FUSES_IN_LEN 8
6936/* Offset in OTP to read */
6937#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
6938/* Length of data to read in bytes */
6939#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
6940
6941/* MC_CMD_READ_FUSES_OUT msgresponse */
6942#define MC_CMD_READ_FUSES_OUT_LENMIN 4
6943#define MC_CMD_READ_FUSES_OUT_LENMAX 252
6944#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
6945/* Length of returned OTP data in bytes */
6946#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
6947/* Returned data */
6948#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
6949#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
6950#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
6951#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
6952
6953
6954/***********************************/
6955/* MC_CMD_KR_TUNE
6956 * Get or set KR Serdes RXEQ and TX Driver settings
6957 */
6958#define MC_CMD_KR_TUNE 0xf1
6959
6960/* MC_CMD_KR_TUNE_IN msgrequest */
6961#define MC_CMD_KR_TUNE_IN_LENMIN 4
6962#define MC_CMD_KR_TUNE_IN_LENMAX 252
6963#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
6964/* Requested operation */
6965#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
6966#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
6967/* enum: Get current RXEQ settings */
6968#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
6969/* enum: Override RXEQ settings */
6970#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
6971/* enum: Get current TX Driver settings */
6972#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
6973/* enum: Override TX Driver settings */
6974#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
6975/* enum: Force KR Serdes reset / recalibration */
6976#define MC_CMD_KR_TUNE_IN_RECAL 0x4
6977/* Align the arguments to 32 bits */
6978#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
6979#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
6980/* Arguments specific to the operation */
6981#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
6982#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
6983#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
6984#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
6985
6986/* MC_CMD_KR_TUNE_OUT msgresponse */
6987#define MC_CMD_KR_TUNE_OUT_LEN 0
6988
6989/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
6990#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
6991/* Requested operation */
6992#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
6993#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
6994/* Align the arguments to 32 bits */
6995#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
6996#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
6997
6998/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
6999#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
7000#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
7001#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
7002/* RXEQ Parameter */
7003#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
7004#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
7005#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
7006#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
7007#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
7008#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
7009/* enum: Attenuation (0-15) */
7010#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
7011/* enum: CTLE Boost (0-15) */
7012#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
7013/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
7014#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
7015/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
7016#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
7017/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
7018#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
7019/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
7020#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
7021/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
7022#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
7023#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
7024#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
7025#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
7026#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
7027#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
7028#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
7029#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
7030#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
7031#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
7032#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
7033#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
7034#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
7035#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
7036#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
7037#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
7038
7039/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
7040#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
7041#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
7042#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
7043/* Requested operation */
7044#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
7045#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
7046/* Align the arguments to 32 bits */
7047#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
7048#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
7049/* RXEQ Parameter */
7050#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
7051#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
7052#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
7053#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
7054#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
7055#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
7056/* Enum values, see field(s): */
7057/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
7058#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
7059#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
7060/* Enum values, see field(s): */
7061/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
7062#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
7063#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
7064#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
7065#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
7066#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
7067#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
7068#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
7069#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
7070
7071/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
7072#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
7073
7074/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
7075#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
7076/* Requested operation */
7077#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
7078#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
7079/* Align the arguments to 32 bits */
7080#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
7081#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
7082
7083/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
7084#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
7085
7086
7087/***********************************/
7088/* MC_CMD_PCIE_TUNE
7089 * Get or set PCIE Serdes RXEQ and TX Driver settings
7090 */
7091#define MC_CMD_PCIE_TUNE 0xf2
7092
7093/* MC_CMD_PCIE_TUNE_IN msgrequest */
7094#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
7095#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
7096#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
7097/* Requested operation */
7098#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
7099#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
7100/* enum: Get current RXEQ settings */
7101#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
7102/* enum: Override RXEQ settings */
7103#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
7104/* enum: Get current TX Driver settings */
7105#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
7106/* enum: Override TX Driver settings */
7107#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
7108/* Align the arguments to 32 bits */
7109#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
7110#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
7111/* Arguments specific to the operation */
7112#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
7113#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
7114#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
7115#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
7116
7117/* MC_CMD_PCIE_TUNE_OUT msgresponse */
7118#define MC_CMD_PCIE_TUNE_OUT_LEN 0
7119
7120/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
7121#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
7122/* Requested operation */
7123#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
7124#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
7125/* Align the arguments to 32 bits */
7126#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
7127#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
7128
7129/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
7130#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
7131#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
7132#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
7133/* RXEQ Parameter */
7134#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
7135#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
7136#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
7137#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
7138#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
7139#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
7140/* enum: Attenuation (0-15) */
7141#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
7142/* enum: CTLE Boost (0-15) */
7143#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
7144/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
7145#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
7146/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
7147#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
7148/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
7149#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
7150/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
7151#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
7152/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
7153#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
7154#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
7155#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
7156#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
7157#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
7158#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
7159#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
7160#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
7161#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
7162#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
7163#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
7164#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
7165#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
7166#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
7167#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
7168#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
7169
7170/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
7171#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
7172/* Requested operation */
7173#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
7174#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
7175/* Align the arguments to 32 bits */
7176#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
7177#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
7178
7179/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
7180#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
7181#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
7182#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
7183/* RXEQ Parameter */
7184#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
7185#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
7186#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
7187#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
7188#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
7189#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
7190/* enum: TxMargin (PIPE) */
7191#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
7192/* enum: TxSwing (PIPE) */
7193#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
7194/* enum: De-emphasis coefficient C(-1) (PIPE) */
7195#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
7196/* enum: De-emphasis coefficient C(0) (PIPE) */
7197#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
7198/* enum: De-emphasis coefficient C(+1) (PIPE) */
7199#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
7200#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
7201#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
7202/* Enum values, see field(s): */
7203/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
7204#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
7205#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
7206#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
7207#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
7208
7209
7210/***********************************/
7211/* MC_CMD_LICENSING
7212 * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
7213 */
7214#define MC_CMD_LICENSING 0xf3
7215
7216/* MC_CMD_LICENSING_IN msgrequest */
7217#define MC_CMD_LICENSING_IN_LEN 4
7218/* identifies the type of operation requested */
7219#define MC_CMD_LICENSING_IN_OP_OFST 0
7220/* enum: re-read and apply licenses after a license key partition update; note
7221 * that this operation returns a zero-length response
7222 */
7223#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
7224/* enum: report counts of installed licenses */
7225#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
7226
7227/* MC_CMD_LICENSING_OUT msgresponse */
7228#define MC_CMD_LICENSING_OUT_LEN 28
7229/* count of application keys which are valid */
7230#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
7231/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
7232 * MC_CMD_FC_OP_LICENSE)
7233 */
7234#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
7235/* count of application keys which are invalid due to being blacklisted */
7236#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
7237/* count of application keys which are invalid due to being unverifiable */
7238#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
7239/* count of application keys which are invalid due to being for the wrong node
7240 */
7241#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
7242/* licensing state (for diagnostics; the exact meaning of the bits in this
7243 * field are private to the firmware)
7244 */
7245#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
7246/* licensing subsystem self-test report (for manftest) */
7247#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
7248/* enum: licensing subsystem self-test failed */
7249#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
7250/* enum: licensing subsystem self-test passed */
7251#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
7252
7253
7254/***********************************/
7255/* MC_CMD_MC2MC_PROXY
7256 * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
7257 * This will fail on a single-core system.
7258 */
7259#define MC_CMD_MC2MC_PROXY 0xf4
2402 7260
2403 7261
2404#endif /* MCDI_PCOL_H */ 7262#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 13cb40fe90c1..8d33da6697fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2009-2010 Solarflare Communications Inc. 3 * Copyright 2009-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -36,7 +36,7 @@ struct efx_mcdi_phy_data {
36static int 36static int
37efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) 37efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
38{ 38{
39 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; 39 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
40 size_t outlen; 40 size_t outlen;
41 int rc; 41 int rc;
42 42
@@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
78 u32 flags, u32 loopback_mode, 78 u32 flags, u32 loopback_mode,
79 u32 loopback_speed) 79 u32 loopback_speed)
80{ 80{
81 u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; 81 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
82 int rc; 82 int rc;
83 83
84 BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); 84 BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -102,7 +102,7 @@ fail:
102 102
103static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) 103static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
104{ 104{
105 u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; 105 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
106 size_t outlen; 106 size_t outlen;
107 int rc; 107 int rc;
108 108
@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
111 if (rc) 111 if (rc)
112 goto fail; 112 goto fail;
113 113
114 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { 114 if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
115 MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
115 rc = -EIO; 116 rc = -EIO;
116 goto fail; 117 goto fail;
117 } 118 }
@@ -125,16 +126,16 @@ fail:
125 return rc; 126 return rc;
126} 127}
127 128
128int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, 129static int efx_mcdi_mdio_read(struct net_device *net_dev,
129 unsigned int prtad, unsigned int devad, u16 addr, 130 int prtad, int devad, u16 addr)
130 u16 *value_out, u32 *status_out)
131{ 131{
132 u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; 132 struct efx_nic *efx = netdev_priv(net_dev);
133 u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; 133 MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
134 MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
134 size_t outlen; 135 size_t outlen;
135 int rc; 136 int rc;
136 137
137 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); 138 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
138 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); 139 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
139 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); 140 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
140 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); 141 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
@@ -144,25 +145,27 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
144 if (rc) 145 if (rc)
145 goto fail; 146 goto fail;
146 147
147 *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); 148 if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
148 *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); 149 MC_CMD_MDIO_STATUS_GOOD)
149 return 0; 150 return -EIO;
151
152 return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
150 153
151fail: 154fail:
152 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 155 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
153 return rc; 156 return rc;
154} 157}
155 158
156int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, 159static int efx_mcdi_mdio_write(struct net_device *net_dev,
157 unsigned int prtad, unsigned int devad, u16 addr, 160 int prtad, int devad, u16 addr, u16 value)
158 u16 value, u32 *status_out)
159{ 161{
160 u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; 162 struct efx_nic *efx = netdev_priv(net_dev);
161 u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; 163 MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
164 MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
162 size_t outlen; 165 size_t outlen;
163 int rc; 166 int rc;
164 167
165 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); 168 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
166 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); 169 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
167 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); 170 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
168 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); 171 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
@@ -173,7 +176,10 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
173 if (rc) 176 if (rc)
174 goto fail; 177 goto fail;
175 178
176 *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); 179 if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
180 MC_CMD_MDIO_STATUS_GOOD)
181 return -EIO;
182
177 return 0; 183 return 0;
178 184
179fail: 185fail:
@@ -304,10 +310,37 @@ static u32 mcdi_to_ethtool_media(u32 media)
304 } 310 }
305} 311}
306 312
313static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
314 struct efx_link_state *link_state,
315 u32 speed, u32 flags, u32 fcntl)
316{
317 switch (fcntl) {
318 case MC_CMD_FCNTL_AUTO:
319 WARN_ON(1); /* This is not a link mode */
320 link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
321 break;
322 case MC_CMD_FCNTL_BIDIR:
323 link_state->fc = EFX_FC_TX | EFX_FC_RX;
324 break;
325 case MC_CMD_FCNTL_RESPOND:
326 link_state->fc = EFX_FC_RX;
327 break;
328 default:
329 WARN_ON(1);
330 case MC_CMD_FCNTL_OFF:
331 link_state->fc = 0;
332 break;
333 }
334
335 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
336 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
337 link_state->speed = speed;
338}
339
307static int efx_mcdi_phy_probe(struct efx_nic *efx) 340static int efx_mcdi_phy_probe(struct efx_nic *efx)
308{ 341{
309 struct efx_mcdi_phy_data *phy_data; 342 struct efx_mcdi_phy_data *phy_data;
310 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 343 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
311 u32 caps; 344 u32 caps;
312 int rc; 345 int rc;
313 346
@@ -403,7 +436,7 @@ fail:
403 return rc; 436 return rc;
404} 437}
405 438
406int efx_mcdi_phy_reconfigure(struct efx_nic *efx) 439int efx_mcdi_port_reconfigure(struct efx_nic *efx)
407{ 440{
408 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 441 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
409 u32 caps = (efx->link_advertising ? 442 u32 caps = (efx->link_advertising ?
@@ -414,37 +447,10 @@ int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
414 efx->loopback_mode, 0); 447 efx->loopback_mode, 0);
415} 448}
416 449
417void efx_mcdi_phy_decode_link(struct efx_nic *efx,
418 struct efx_link_state *link_state,
419 u32 speed, u32 flags, u32 fcntl)
420{
421 switch (fcntl) {
422 case MC_CMD_FCNTL_AUTO:
423 WARN_ON(1); /* This is not a link mode */
424 link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
425 break;
426 case MC_CMD_FCNTL_BIDIR:
427 link_state->fc = EFX_FC_TX | EFX_FC_RX;
428 break;
429 case MC_CMD_FCNTL_RESPOND:
430 link_state->fc = EFX_FC_RX;
431 break;
432 default:
433 WARN_ON(1);
434 case MC_CMD_FCNTL_OFF:
435 link_state->fc = 0;
436 break;
437 }
438
439 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
440 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
441 link_state->speed = speed;
442}
443
444/* Verify that the forced flow control settings (!EFX_FC_AUTO) are 450/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
445 * supported by the link partner. Warn the user if this isn't the case 451 * supported by the link partner. Warn the user if this isn't the case
446 */ 452 */
447void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) 453static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
448{ 454{
449 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 455 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
450 u32 rmtadv; 456 u32 rmtadv;
@@ -472,7 +478,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
472static bool efx_mcdi_phy_poll(struct efx_nic *efx) 478static bool efx_mcdi_phy_poll(struct efx_nic *efx)
473{ 479{
474 struct efx_link_state old_state = efx->link_state; 480 struct efx_link_state old_state = efx->link_state;
475 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 481 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
476 int rc; 482 int rc;
477 483
478 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 484 WARN_ON(!mutex_is_locked(&efx->mac_lock));
@@ -507,7 +513,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
507static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 513static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
508{ 514{
509 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 515 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
510 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 516 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
511 int rc; 517 int rc;
512 518
513 ecmd->supported = 519 ecmd->supported =
@@ -579,7 +585,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
579 585
580static int efx_mcdi_phy_test_alive(struct efx_nic *efx) 586static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
581{ 587{
582 u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; 588 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
583 size_t outlen; 589 size_t outlen;
584 int rc; 590 int rc;
585 591
@@ -615,17 +621,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
615 unsigned int retry, i, count = 0; 621 unsigned int retry, i, count = 0;
616 size_t outlen; 622 size_t outlen;
617 u32 status; 623 u32 status;
618 u8 *buf, *ptr; 624 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
625 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
626 u8 *ptr;
619 int rc; 627 int rc;
620 628
621 buf = kzalloc(0x100, GFP_KERNEL);
622 if (buf == NULL)
623 return -ENOMEM;
624
625 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); 629 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
626 MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode); 630 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
627 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN, 631 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
628 NULL, 0, NULL); 632 inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
629 if (rc) 633 if (rc)
630 goto out; 634 goto out;
631 635
@@ -633,11 +637,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
633 for (retry = 0; retry < 100; ++retry) { 637 for (retry = 0; retry < 100; ++retry) {
634 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); 638 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
635 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 639 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
636 buf, 0x100, &outlen); 640 outbuf, sizeof(outbuf), &outlen);
637 if (rc) 641 if (rc)
638 goto out; 642 goto out;
639 643
640 status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT); 644 status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
641 if (status != MC_CMD_POLL_BIST_RUNNING) 645 if (status != MC_CMD_POLL_BIST_RUNNING)
642 goto finished; 646 goto finished;
643 647
@@ -654,7 +658,7 @@ finished:
654 if (efx->phy_type == PHY_TYPE_SFT9001B && 658 if (efx->phy_type == PHY_TYPE_SFT9001B &&
655 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || 659 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
656 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { 660 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
657 ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); 661 ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
658 if (status == MC_CMD_POLL_BIST_PASSED && 662 if (status == MC_CMD_POLL_BIST_PASSED &&
659 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { 663 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
660 for (i = 0; i < 8; i++) { 664 for (i = 0; i < 8; i++) {
@@ -668,8 +672,6 @@ finished:
668 rc = count; 672 rc = count;
669 673
670out: 674out:
671 kfree(buf);
672
673 return rc; 675 return rc;
674} 676}
675 677
@@ -744,8 +746,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
744static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, 746static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
745 struct ethtool_eeprom *ee, u8 *data) 747 struct ethtool_eeprom *ee, u8 *data)
746{ 748{
747 u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX]; 749 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
748 u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN]; 750 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
749 size_t outlen; 751 size_t outlen;
750 int rc; 752 int rc;
751 unsigned int payload_len; 753 unsigned int payload_len;
@@ -785,8 +787,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
785 space_remaining : payload_len; 787 space_remaining : payload_len;
786 788
787 memcpy(user_data, 789 memcpy(user_data,
788 outbuf + page_off + 790 MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
789 MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
790 to_copy); 791 to_copy);
791 792
792 space_remaining -= to_copy; 793 space_remaining -= to_copy;
@@ -813,10 +814,10 @@ static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
813 } 814 }
814} 815}
815 816
816const struct efx_phy_operations efx_mcdi_phy_ops = { 817static const struct efx_phy_operations efx_mcdi_phy_ops = {
817 .probe = efx_mcdi_phy_probe, 818 .probe = efx_mcdi_phy_probe,
818 .init = efx_port_dummy_op_int, 819 .init = efx_port_dummy_op_int,
819 .reconfigure = efx_mcdi_phy_reconfigure, 820 .reconfigure = efx_mcdi_port_reconfigure,
820 .poll = efx_mcdi_phy_poll, 821 .poll = efx_mcdi_phy_poll,
821 .fini = efx_port_dummy_op_void, 822 .fini = efx_port_dummy_op_void,
822 .remove = efx_mcdi_phy_remove, 823 .remove = efx_mcdi_phy_remove,
@@ -828,3 +829,199 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
828 .get_module_eeprom = efx_mcdi_phy_get_module_eeprom, 829 .get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
829 .get_module_info = efx_mcdi_phy_get_module_info, 830 .get_module_info = efx_mcdi_phy_get_module_info,
830}; 831};
832
833u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
834{
835 struct efx_mcdi_phy_data *phy_data = efx->phy_data;
836
837 return phy_data->supported_cap;
838}
839
840static unsigned int efx_mcdi_event_link_speed[] = {
841 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
842 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
843 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
844};
845
846void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
847{
848 u32 flags, fcntl, speed, lpa;
849
850 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
851 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
852 speed = efx_mcdi_event_link_speed[speed];
853
854 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
855 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
856 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
857
858 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
859 * which is only run after flushing the event queues. Therefore, it
860 * is safe to modify the link state outside of the mac_lock here.
861 */
862 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
863
864 efx_mcdi_phy_check_fcntl(efx, lpa);
865
866 efx_link_status_changed(efx);
867}
868
869int efx_mcdi_set_mac(struct efx_nic *efx)
870{
871 u32 fcntl;
872 MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
873
874 BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
875
876 memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
877 efx->net_dev->dev_addr, ETH_ALEN);
878
879 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
880 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
881 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
882
883 /* Set simple MAC filter for Siena */
884 MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
885 SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
886
887 switch (efx->wanted_fc) {
888 case EFX_FC_RX | EFX_FC_TX:
889 fcntl = MC_CMD_FCNTL_BIDIR;
890 break;
891 case EFX_FC_RX:
892 fcntl = MC_CMD_FCNTL_RESPOND;
893 break;
894 default:
895 fcntl = MC_CMD_FCNTL_OFF;
896 break;
897 }
898 if (efx->wanted_fc & EFX_FC_AUTO)
899 fcntl = MC_CMD_FCNTL_AUTO;
900 if (efx->fc_disable)
901 fcntl = MC_CMD_FCNTL_OFF;
902
903 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
904
905 return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
906 NULL, 0, NULL);
907}
908
909bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
910{
911 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
912 size_t outlength;
913 int rc;
914
915 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
916
917 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
918 outbuf, sizeof(outbuf), &outlength);
919 if (rc) {
920 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
921 __func__, rc);
922 return true;
923 }
924
925 return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
926}
927
928static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
929 u32 dma_len, int enable, int clear)
930{
931 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
932 int rc;
933 int period = enable ? 1000 : 0;
934
935 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
936
937 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
938 MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
939 MAC_STATS_IN_DMA, !!enable,
940 MAC_STATS_IN_CLEAR, clear,
941 MAC_STATS_IN_PERIODIC_CHANGE, 1,
942 MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
943 MAC_STATS_IN_PERIODIC_CLEAR, 0,
944 MAC_STATS_IN_PERIODIC_NOEVENT, 1,
945 MAC_STATS_IN_PERIOD_MS, period);
946 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
947
948 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
949 NULL, 0, NULL);
950 if (rc)
951 goto fail;
952
953 return 0;
954
955fail:
956 netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
957 __func__, enable ? "enable" : "disable", rc);
958 return rc;
959}
960
961void efx_mcdi_mac_start_stats(struct efx_nic *efx)
962{
963 __le64 *dma_stats = efx->stats_buffer.addr;
964
965 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
966
967 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
968 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
969}
970
971void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
972{
973 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
974}
975
976int efx_mcdi_port_probe(struct efx_nic *efx)
977{
978 int rc;
979
980 /* Hook in PHY operations table */
981 efx->phy_op = &efx_mcdi_phy_ops;
982
983 /* Set up MDIO structure for PHY */
984 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
985 efx->mdio.mdio_read = efx_mcdi_mdio_read;
986 efx->mdio.mdio_write = efx_mcdi_mdio_write;
987
988 /* Fill out MDIO structure, loopback modes, and initial link state */
989 rc = efx->phy_op->probe(efx);
990 if (rc != 0)
991 return rc;
992
993 /* Allocate buffer for stats */
994 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
995 MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
996 if (rc)
997 return rc;
998 netif_dbg(efx, probe, efx->net_dev,
999 "stats buffer at %llx (virt %p phys %llx)\n",
1000 (u64)efx->stats_buffer.dma_addr,
1001 efx->stats_buffer.addr,
1002 (u64)virt_to_phys(efx->stats_buffer.addr));
1003
1004 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
1005
1006 return 0;
1007}
1008
1009void efx_mcdi_port_remove(struct efx_nic *efx)
1010{
1011 efx->phy_op->remove(efx);
1012 efx_nic_free_buffer(efx, &efx->stats_buffer);
1013}
1014
1015/* Get physical port number (EF10 only; on Siena it is same as PF number) */
1016int efx_mcdi_port_get_number(struct efx_nic *efx)
1017{
1018 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
1019 int rc;
1020
1021 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
1022 outbuf, sizeof(outbuf), NULL);
1023 if (rc)
1024 return rc;
1025
1026 return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
1027}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ffb..8ff954c59efa 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de99..16824fecc5ee 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 08f825b71ac8..a77a8bd2dd70 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,194 +1,32 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11#include <linux/bitops.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
14#include <linux/delay.h>
15#include <linux/slab.h> 13#include <linux/slab.h>
16#include <linux/rtnetlink.h> 14#include <linux/rtnetlink.h>
17 15
18#include "net_driver.h" 16#include "net_driver.h"
19#include "spi.h"
20#include "efx.h" 17#include "efx.h"
21#include "nic.h"
22#include "mcdi.h"
23#include "mcdi_pcol.h"
24
25#define EFX_SPI_VERIFY_BUF_LEN 16
26
27struct efx_mtd_partition {
28 struct mtd_info mtd;
29 union {
30 struct {
31 bool updating;
32 u8 nvram_type;
33 u16 fw_subtype;
34 } mcdi;
35 size_t offset;
36 };
37 const char *type_name;
38 char name[IFNAMSIZ + 20];
39};
40
41struct efx_mtd_ops {
42 int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
43 size_t *retlen, u8 *buffer);
44 int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
45 int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
46 size_t *retlen, const u8 *buffer);
47 int (*sync)(struct mtd_info *mtd);
48};
49
50struct efx_mtd {
51 struct list_head node;
52 struct efx_nic *efx;
53 const struct efx_spi_device *spi;
54 const char *name;
55 const struct efx_mtd_ops *ops;
56 size_t n_parts;
57 struct efx_mtd_partition part[0];
58};
59
60#define efx_for_each_partition(part, efx_mtd) \
61 for ((part) = &(efx_mtd)->part[0]; \
62 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
63 (part)++)
64 18
65#define to_efx_mtd_partition(mtd) \ 19#define to_efx_mtd_partition(mtd) \
66 container_of(mtd, struct efx_mtd_partition, mtd) 20 container_of(mtd, struct efx_mtd_partition, mtd)
67 21
68static int falcon_mtd_probe(struct efx_nic *efx);
69static int siena_mtd_probe(struct efx_nic *efx);
70
71/* SPI utilities */
72
73static int
74efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
75{
76 struct efx_mtd *efx_mtd = part->mtd.priv;
77 const struct efx_spi_device *spi = efx_mtd->spi;
78 struct efx_nic *efx = efx_mtd->efx;
79 u8 status;
80 int rc, i;
81
82 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
83 for (i = 0; i < 40; i++) {
84 __set_current_state(uninterruptible ?
85 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
86 schedule_timeout(HZ / 10);
87 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
88 &status, sizeof(status));
89 if (rc)
90 return rc;
91 if (!(status & SPI_STATUS_NRDY))
92 return 0;
93 if (signal_pending(current))
94 return -EINTR;
95 }
96 pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
97 return -ETIMEDOUT;
98}
99
100static int
101efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
102{
103 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
104 SPI_STATUS_BP0);
105 u8 status;
106 int rc;
107
108 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
109 &status, sizeof(status));
110 if (rc)
111 return rc;
112
113 if (!(status & unlock_mask))
114 return 0; /* already unlocked */
115
116 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
117 if (rc)
118 return rc;
119 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
120 if (rc)
121 return rc;
122
123 status &= ~unlock_mask;
124 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
125 NULL, sizeof(status));
126 if (rc)
127 return rc;
128 rc = falcon_spi_wait_write(efx, spi);
129 if (rc)
130 return rc;
131
132 return 0;
133}
134
135static int
136efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
137{
138 struct efx_mtd *efx_mtd = part->mtd.priv;
139 const struct efx_spi_device *spi = efx_mtd->spi;
140 struct efx_nic *efx = efx_mtd->efx;
141 unsigned pos, block_len;
142 u8 empty[EFX_SPI_VERIFY_BUF_LEN];
143 u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
144 int rc;
145
146 if (len != spi->erase_size)
147 return -EINVAL;
148
149 if (spi->erase_command == 0)
150 return -EOPNOTSUPP;
151
152 rc = efx_spi_unlock(efx, spi);
153 if (rc)
154 return rc;
155 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
156 if (rc)
157 return rc;
158 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
159 NULL, 0);
160 if (rc)
161 return rc;
162 rc = efx_spi_slow_wait(part, false);
163
164 /* Verify the entire region has been wiped */
165 memset(empty, 0xff, sizeof(empty));
166 for (pos = 0; pos < len; pos += block_len) {
167 block_len = min(len - pos, sizeof(buffer));
168 rc = falcon_spi_read(efx, spi, start + pos, block_len,
169 NULL, buffer);
170 if (rc)
171 return rc;
172 if (memcmp(empty, buffer, block_len))
173 return -EIO;
174
175 /* Avoid locking up the system */
176 cond_resched();
177 if (signal_pending(current))
178 return -EINTR;
179 }
180
181 return rc;
182}
183
184/* MTD interface */ 22/* MTD interface */
185 23
186static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) 24static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
187{ 25{
188 struct efx_mtd *efx_mtd = mtd->priv; 26 struct efx_nic *efx = mtd->priv;
189 int rc; 27 int rc;
190 28
191 rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len); 29 rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
192 if (rc == 0) { 30 if (rc == 0) {
193 erase->state = MTD_ERASE_DONE; 31 erase->state = MTD_ERASE_DONE;
194 } else { 32 } else {
@@ -202,13 +40,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
202static void efx_mtd_sync(struct mtd_info *mtd) 40static void efx_mtd_sync(struct mtd_info *mtd)
203{ 41{
204 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 42 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
205 struct efx_mtd *efx_mtd = mtd->priv; 43 struct efx_nic *efx = mtd->priv;
206 int rc; 44 int rc;
207 45
208 rc = efx_mtd->ops->sync(mtd); 46 rc = efx->type->mtd_sync(mtd);
209 if (rc) 47 if (rc)
210 pr_err("%s: %s sync failed (%d)\n", 48 pr_err("%s: %s sync failed (%d)\n",
211 part->name, efx_mtd->name, rc); 49 part->name, part->dev_type_name, rc);
212} 50}
213 51
214static void efx_mtd_remove_partition(struct efx_mtd_partition *part) 52static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -222,62 +60,44 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
222 ssleep(1); 60 ssleep(1);
223 } 61 }
224 WARN_ON(rc); 62 WARN_ON(rc);
63 list_del(&part->node);
225} 64}
226 65
227static void efx_mtd_remove_device(struct efx_mtd *efx_mtd) 66int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
228{ 67 size_t n_parts, size_t sizeof_part)
229 struct efx_mtd_partition *part;
230
231 efx_for_each_partition(part, efx_mtd)
232 efx_mtd_remove_partition(part);
233 list_del(&efx_mtd->node);
234 kfree(efx_mtd);
235}
236
237static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
238{
239 struct efx_mtd_partition *part;
240
241 efx_for_each_partition(part, efx_mtd)
242 if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
243 snprintf(part->name, sizeof(part->name),
244 "%s %s:%02x", efx_mtd->efx->name,
245 part->type_name, part->mcdi.fw_subtype);
246 else
247 snprintf(part->name, sizeof(part->name),
248 "%s %s", efx_mtd->efx->name,
249 part->type_name);
250}
251
252static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
253{ 68{
254 struct efx_mtd_partition *part; 69 struct efx_mtd_partition *part;
70 size_t i;
255 71
256 efx_mtd->efx = efx; 72 for (i = 0; i < n_parts; i++) {
73 part = (struct efx_mtd_partition *)((char *)parts +
74 i * sizeof_part);
257 75
258 efx_mtd_rename_device(efx_mtd);
259
260 efx_for_each_partition(part, efx_mtd) {
261 part->mtd.writesize = 1; 76 part->mtd.writesize = 1;
262 77
263 part->mtd.owner = THIS_MODULE; 78 part->mtd.owner = THIS_MODULE;
264 part->mtd.priv = efx_mtd; 79 part->mtd.priv = efx;
265 part->mtd.name = part->name; 80 part->mtd.name = part->name;
266 part->mtd._erase = efx_mtd_erase; 81 part->mtd._erase = efx_mtd_erase;
267 part->mtd._read = efx_mtd->ops->read; 82 part->mtd._read = efx->type->mtd_read;
268 part->mtd._write = efx_mtd->ops->write; 83 part->mtd._write = efx->type->mtd_write;
269 part->mtd._sync = efx_mtd_sync; 84 part->mtd._sync = efx_mtd_sync;
270 85
86 efx->type->mtd_rename(part);
87
271 if (mtd_device_register(&part->mtd, NULL, 0)) 88 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail; 89 goto fail;
90
91 /* Add to list in order - efx_mtd_remove() depends on this */
92 list_add_tail(&part->node, &efx->mtd_list);
273 } 93 }
274 94
275 list_add(&efx_mtd->node, &efx->mtd_list);
276 return 0; 95 return 0;
277 96
278fail: 97fail:
279 while (part != &efx_mtd->part[0]) { 98 while (i--) {
280 --part; 99 part = (struct efx_mtd_partition *)((char *)parts +
100 i * sizeof_part);
281 efx_mtd_remove_partition(part); 101 efx_mtd_remove_partition(part);
282 } 102 }
283 /* Failure is unlikely here, but probably means we're out of memory */ 103 /* Failure is unlikely here, but probably means we're out of memory */
@@ -286,410 +106,28 @@ fail:
286 106
287void efx_mtd_remove(struct efx_nic *efx) 107void efx_mtd_remove(struct efx_nic *efx)
288{ 108{
289 struct efx_mtd *efx_mtd, *next; 109 struct efx_mtd_partition *parts, *part, *next;
290 110
291 WARN_ON(efx_dev_registered(efx)); 111 WARN_ON(efx_dev_registered(efx));
292 112
293 list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node) 113 if (list_empty(&efx->mtd_list))
294 efx_mtd_remove_device(efx_mtd); 114 return;
295}
296
297void efx_mtd_rename(struct efx_nic *efx)
298{
299 struct efx_mtd *efx_mtd;
300
301 ASSERT_RTNL();
302
303 list_for_each_entry(efx_mtd, &efx->mtd_list, node)
304 efx_mtd_rename_device(efx_mtd);
305}
306
307int efx_mtd_probe(struct efx_nic *efx)
308{
309 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
310 return siena_mtd_probe(efx);
311 else
312 return falcon_mtd_probe(efx);
313}
314
315/* Implementation of MTD operations for Falcon */
316
317static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
318 size_t len, size_t *retlen, u8 *buffer)
319{
320 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
325 int rc;
326
327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
328 if (rc)
329 return rc;
330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
331 retlen, buffer);
332 mutex_unlock(&nic_data->spi_lock);
333 return rc;
334}
335
336static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
337{
338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
339 struct efx_mtd *efx_mtd = mtd->priv;
340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
342 int rc;
343
344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
345 if (rc)
346 return rc;
347 rc = efx_spi_erase(part, part->offset + start, len);
348 mutex_unlock(&nic_data->spi_lock);
349 return rc;
350}
351
352static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
353 size_t len, size_t *retlen, const u8 *buffer)
354{
355 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
356 struct efx_mtd *efx_mtd = mtd->priv;
357 const struct efx_spi_device *spi = efx_mtd->spi;
358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
360 int rc;
361
362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
363 if (rc)
364 return rc;
365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
366 retlen, buffer);
367 mutex_unlock(&nic_data->spi_lock);
368 return rc;
369}
370
371static int falcon_mtd_sync(struct mtd_info *mtd)
372{
373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
374 struct efx_mtd *efx_mtd = mtd->priv;
375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
377 int rc;
378
379 mutex_lock(&nic_data->spi_lock);
380 rc = efx_spi_slow_wait(part, true);
381 mutex_unlock(&nic_data->spi_lock);
382 return rc;
383}
384
385static const struct efx_mtd_ops falcon_mtd_ops = {
386 .read = falcon_mtd_read,
387 .erase = falcon_mtd_erase,
388 .write = falcon_mtd_write,
389 .sync = falcon_mtd_sync,
390};
391
392static int falcon_mtd_probe(struct efx_nic *efx)
393{
394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
396 struct efx_mtd *efx_mtd;
397 int rc = -ENODEV;
398
399 ASSERT_RTNL();
400
401 spi = &nic_data->spi_flash;
402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
404 GFP_KERNEL);
405 if (!efx_mtd)
406 return -ENOMEM;
407
408 efx_mtd->spi = spi;
409 efx_mtd->name = "flash";
410 efx_mtd->ops = &falcon_mtd_ops;
411
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
419
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
424 }
425 }
426
427 spi = &nic_data->spi_eeprom;
428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
430 GFP_KERNEL);
431 if (!efx_mtd)
432 return -ENOMEM;
433
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
437
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
447
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
452 }
453 }
454
455 return rc;
456}
457
458/* Implementation of MTD operations for Siena */
459
460static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
461 size_t len, size_t *retlen, u8 *buffer)
462{
463 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
464 struct efx_mtd *efx_mtd = mtd->priv;
465 struct efx_nic *efx = efx_mtd->efx;
466 loff_t offset = start;
467 loff_t end = min_t(loff_t, start + len, mtd->size);
468 size_t chunk;
469 int rc = 0;
470
471 while (offset < end) {
472 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
473 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
474 buffer, chunk);
475 if (rc)
476 goto out;
477 offset += chunk;
478 buffer += chunk;
479 }
480out:
481 *retlen = offset - start;
482 return rc;
483}
484
485static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
486{
487 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
488 struct efx_mtd *efx_mtd = mtd->priv;
489 struct efx_nic *efx = efx_mtd->efx;
490 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
491 loff_t end = min_t(loff_t, start + len, mtd->size);
492 size_t chunk = part->mtd.erasesize;
493 int rc = 0;
494
495 if (!part->mcdi.updating) {
496 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
497 if (rc)
498 goto out;
499 part->mcdi.updating = true;
500 }
501
502 /* The MCDI interface can in fact do multiple erase blocks at once;
503 * but erasing may be slow, so we make multiple calls here to avoid
504 * tripping the MCDI RPC timeout. */
505 while (offset < end) {
506 rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
507 chunk);
508 if (rc)
509 goto out;
510 offset += chunk;
511 }
512out:
513 return rc;
514}
515
516static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
517 size_t len, size_t *retlen, const u8 *buffer)
518{
519 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
520 struct efx_mtd *efx_mtd = mtd->priv;
521 struct efx_nic *efx = efx_mtd->efx;
522 loff_t offset = start;
523 loff_t end = min_t(loff_t, start + len, mtd->size);
524 size_t chunk;
525 int rc = 0;
526
527 if (!part->mcdi.updating) {
528 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
529 if (rc)
530 goto out;
531 part->mcdi.updating = true;
532 }
533
534 while (offset < end) {
535 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
536 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
537 buffer, chunk);
538 if (rc)
539 goto out;
540 offset += chunk;
541 buffer += chunk;
542 }
543out:
544 *retlen = offset - start;
545 return rc;
546}
547
548static int siena_mtd_sync(struct mtd_info *mtd)
549{
550 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
551 struct efx_mtd *efx_mtd = mtd->priv;
552 struct efx_nic *efx = efx_mtd->efx;
553 int rc = 0;
554
555 if (part->mcdi.updating) {
556 part->mcdi.updating = false;
557 rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
558 }
559
560 return rc;
561}
562
563static const struct efx_mtd_ops siena_mtd_ops = {
564 .read = siena_mtd_read,
565 .erase = siena_mtd_erase,
566 .write = siena_mtd_write,
567 .sync = siena_mtd_sync,
568};
569
570struct siena_nvram_type_info {
571 int port;
572 const char *name;
573};
574 115
575static const struct siena_nvram_type_info siena_nvram_types[] = { 116 parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, 117 node);
577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
579 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
580 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
581 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
582 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
583 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
584 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
588 [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
589};
590 118
591static int siena_mtd_probe_partition(struct efx_nic *efx, 119 list_for_each_entry_safe(part, next, &efx->mtd_list, node)
592 struct efx_mtd *efx_mtd, 120 efx_mtd_remove_partition(part);
593 unsigned int part_id,
594 unsigned int type)
595{
596 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
597 const struct siena_nvram_type_info *info;
598 size_t size, erase_size;
599 bool protected;
600 int rc;
601
602 if (type >= ARRAY_SIZE(siena_nvram_types) ||
603 siena_nvram_types[type].name == NULL)
604 return -ENODEV;
605
606 info = &siena_nvram_types[type];
607
608 if (info->port != efx_port_num(efx))
609 return -ENODEV;
610
611 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
612 if (rc)
613 return rc;
614 if (protected)
615 return -ENODEV; /* hide it */
616
617 part->mcdi.nvram_type = type;
618 part->type_name = info->name;
619
620 part->mtd.type = MTD_NORFLASH;
621 part->mtd.flags = MTD_CAP_NORFLASH;
622 part->mtd.size = size;
623 part->mtd.erasesize = erase_size;
624 121
625 return 0; 122 kfree(parts);
626} 123}
627 124
628static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, 125void efx_mtd_rename(struct efx_nic *efx)
629 struct efx_mtd *efx_mtd)
630{ 126{
631 struct efx_mtd_partition *part; 127 struct efx_mtd_partition *part;
632 uint16_t fw_subtype_list[
633 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
634 int rc;
635
636 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
637 if (rc)
638 return rc;
639
640 efx_for_each_partition(part, efx_mtd)
641 part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
642
643 return 0;
644}
645
646static int siena_mtd_probe(struct efx_nic *efx)
647{
648 struct efx_mtd *efx_mtd;
649 int rc = -ENODEV;
650 u32 nvram_types;
651 unsigned int type;
652 128
653 ASSERT_RTNL(); 129 ASSERT_RTNL();
654 130
655 rc = efx_mcdi_nvram_types(efx, &nvram_types); 131 list_for_each_entry(part, &efx->mtd_list, node)
656 if (rc) 132 efx->type->mtd_rename(part);
657 return rc;
658
659 efx_mtd = kzalloc(sizeof(*efx_mtd) +
660 hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
661 GFP_KERNEL);
662 if (!efx_mtd)
663 return -ENOMEM;
664
665 efx_mtd->name = "Siena NVRAM manager";
666
667 efx_mtd->ops = &siena_mtd_ops;
668
669 type = 0;
670 efx_mtd->n_parts = 0;
671
672 while (nvram_types != 0) {
673 if (nvram_types & 1) {
674 rc = siena_mtd_probe_partition(efx, efx_mtd,
675 efx_mtd->n_parts, type);
676 if (rc == 0)
677 efx_mtd->n_parts++;
678 else if (rc != -ENODEV)
679 goto fail;
680 }
681 type++;
682 nvram_types >>= 1;
683 }
684
685 rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
686 if (rc)
687 goto fail;
688
689 rc = efx_mtd_probe_device(efx, efx_mtd);
690fail:
691 if (rc)
692 kfree(efx_mtd);
693 return rc;
694} 133}
695
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f4c7e6b67743..b172ed133055 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -27,9 +27,11 @@
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/mtd/mtd.h>
30 31
31#include "enum.h" 32#include "enum.h"
32#include "bitfield.h" 33#include "bitfield.h"
34#include "filter.h"
33 35
34/************************************************************************** 36/**************************************************************************
35 * 37 *
@@ -37,7 +39,7 @@
37 * 39 *
38 **************************************************************************/ 40 **************************************************************************/
39 41
40#define EFX_DRIVER_VERSION "3.2" 42#define EFX_DRIVER_VERSION "4.0"
41 43
42#ifdef DEBUG 44#ifdef DEBUG
43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 45#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -93,21 +95,36 @@ struct efx_ptp_data;
93struct efx_self_tests; 95struct efx_self_tests;
94 96
95/** 97/**
96 * struct efx_special_buffer - An Efx special buffer 98 * struct efx_buffer - A general-purpose DMA buffer
97 * @addr: CPU base address of the buffer 99 * @addr: host base address of the buffer
98 * @dma_addr: DMA base address of the buffer 100 * @dma_addr: DMA base address of the buffer
99 * @len: Buffer length, in bytes 101 * @len: Buffer length, in bytes
100 * @index: Buffer index within controller;s buffer table
101 * @entries: Number of buffer table entries
102 * 102 *
103 * Special buffers are used for the event queues and the TX and RX 103 * The NIC uses these buffers for its interrupt status registers and
104 * descriptor queues for each channel. They are *not* used for the 104 * MAC stats dumps.
105 * actual transmit and receive buffers.
106 */ 105 */
107struct efx_special_buffer { 106struct efx_buffer {
108 void *addr; 107 void *addr;
109 dma_addr_t dma_addr; 108 dma_addr_t dma_addr;
110 unsigned int len; 109 unsigned int len;
110};
111
112/**
113 * struct efx_special_buffer - DMA buffer entered into buffer table
114 * @buf: Standard &struct efx_buffer
115 * @index: Buffer index within controller;s buffer table
116 * @entries: Number of buffer table entries
117 *
118 * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
119 * Event and descriptor rings are addressed via one or more buffer
120 * table entries (and so can be physically non-contiguous, although we
121 * currently do not take advantage of that). On Falcon and Siena we
122 * have to take care of allocating and initialising the entries
123 * ourselves. On later hardware this is managed by the firmware and
124 * @index and @entries are left as 0.
125 */
126struct efx_special_buffer {
127 struct efx_buffer buf;
111 unsigned int index; 128 unsigned int index;
112 unsigned int entries; 129 unsigned int entries;
113}; 130};
@@ -118,6 +135,7 @@ struct efx_special_buffer {
118 * freed when descriptor completes 135 * freed when descriptor completes
119 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be 136 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
120 * freed when descriptor completes. 137 * freed when descriptor completes.
138 * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
121 * @dma_addr: DMA address of the fragment. 139 * @dma_addr: DMA address of the fragment.
122 * @flags: Flags for allocation and DMA mapping type 140 * @flags: Flags for allocation and DMA mapping type
123 * @len: Length of this fragment. 141 * @len: Length of this fragment.
@@ -129,7 +147,10 @@ struct efx_tx_buffer {
129 const struct sk_buff *skb; 147 const struct sk_buff *skb;
130 void *heap_buf; 148 void *heap_buf;
131 }; 149 };
132 dma_addr_t dma_addr; 150 union {
151 efx_qword_t option;
152 dma_addr_t dma_addr;
153 };
133 unsigned short flags; 154 unsigned short flags;
134 unsigned short len; 155 unsigned short len;
135 unsigned short unmap_len; 156 unsigned short unmap_len;
@@ -138,6 +159,7 @@ struct efx_tx_buffer {
138#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ 159#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
139#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ 160#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
140#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ 161#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
162#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
141 163
142/** 164/**
143 * struct efx_tx_queue - An Efx TX queue 165 * struct efx_tx_queue - An Efx TX queue
@@ -169,6 +191,7 @@ struct efx_tx_buffer {
169 * variable indicates that the queue is empty. This is to 191 * variable indicates that the queue is empty. This is to
170 * avoid cache-line ping-pong between the xmit path and the 192 * avoid cache-line ping-pong between the xmit path and the
171 * completion path. 193 * completion path.
194 * @merge_events: Number of TX merged completion events
172 * @insert_count: Current insert pointer 195 * @insert_count: Current insert pointer
173 * This is the number of buffers that have been added to the 196 * This is the number of buffers that have been added to the
174 * software ring. 197 * software ring.
@@ -205,6 +228,7 @@ struct efx_tx_queue {
205 /* Members used mainly on the completion path */ 228 /* Members used mainly on the completion path */
206 unsigned int read_count ____cacheline_aligned_in_smp; 229 unsigned int read_count ____cacheline_aligned_in_smp;
207 unsigned int old_write_count; 230 unsigned int old_write_count;
231 unsigned int merge_events;
208 232
209 /* Members used only on the xmit path */ 233 /* Members used only on the xmit path */
210 unsigned int insert_count ____cacheline_aligned_in_smp; 234 unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -244,6 +268,7 @@ struct efx_rx_buffer {
244#define EFX_RX_PKT_CSUMMED 0x0002 268#define EFX_RX_PKT_CSUMMED 0x0002
245#define EFX_RX_PKT_DISCARD 0x0004 269#define EFX_RX_PKT_DISCARD 0x0004
246#define EFX_RX_PKT_TCP 0x0040 270#define EFX_RX_PKT_TCP 0x0040
271#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
247 272
248/** 273/**
249 * struct efx_rx_page_state - Page-based rx buffer state 274 * struct efx_rx_page_state - Page-based rx buffer state
@@ -271,13 +296,14 @@ struct efx_rx_page_state {
271 * @buffer: The software buffer ring 296 * @buffer: The software buffer ring
272 * @rxd: The hardware descriptor ring 297 * @rxd: The hardware descriptor ring
273 * @ptr_mask: The size of the ring minus 1. 298 * @ptr_mask: The size of the ring minus 1.
274 * @enabled: Receive queue enabled indicator. 299 * @refill_enabled: Enable refill whenever fill level is low
275 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as 300 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
276 * @rxq_flush_pending. 301 * @rxq_flush_pending.
277 * @added_count: Number of buffers added to the receive queue. 302 * @added_count: Number of buffers added to the receive queue.
278 * @notified_count: Number of buffers given to NIC (<= @added_count). 303 * @notified_count: Number of buffers given to NIC (<= @added_count).
279 * @removed_count: Number of buffers removed from the receive queue. 304 * @removed_count: Number of buffers removed from the receive queue.
280 * @scatter_n: Number of buffers used by current packet 305 * @scatter_n: Used by NIC specific receive code.
306 * @scatter_len: Used by NIC specific receive code.
281 * @page_ring: The ring to store DMA mapped pages for reuse. 307 * @page_ring: The ring to store DMA mapped pages for reuse.
282 * @page_add: Counter to calculate the write pointer for the recycle ring. 308 * @page_add: Counter to calculate the write pointer for the recycle ring.
283 * @page_remove: Counter to calculate the read pointer for the recycle ring. 309 * @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -302,13 +328,14 @@ struct efx_rx_queue {
302 struct efx_rx_buffer *buffer; 328 struct efx_rx_buffer *buffer;
303 struct efx_special_buffer rxd; 329 struct efx_special_buffer rxd;
304 unsigned int ptr_mask; 330 unsigned int ptr_mask;
305 bool enabled; 331 bool refill_enabled;
306 bool flush_pending; 332 bool flush_pending;
307 333
308 unsigned int added_count; 334 unsigned int added_count;
309 unsigned int notified_count; 335 unsigned int notified_count;
310 unsigned int removed_count; 336 unsigned int removed_count;
311 unsigned int scatter_n; 337 unsigned int scatter_n;
338 unsigned int scatter_len;
312 struct page **page_ring; 339 struct page **page_ring;
313 unsigned int page_add; 340 unsigned int page_add;
314 unsigned int page_remove; 341 unsigned int page_remove;
@@ -325,22 +352,6 @@ struct efx_rx_queue {
325 unsigned int slow_fill_count; 352 unsigned int slow_fill_count;
326}; 353};
327 354
328/**
329 * struct efx_buffer - An Efx general-purpose buffer
330 * @addr: host base address of the buffer
331 * @dma_addr: DMA base address of the buffer
332 * @len: Buffer length, in bytes
333 *
334 * The NIC uses these buffers for its interrupt status registers and
335 * MAC stats dumps.
336 */
337struct efx_buffer {
338 void *addr;
339 dma_addr_t dma_addr;
340 unsigned int len;
341};
342
343
344enum efx_rx_alloc_method { 355enum efx_rx_alloc_method {
345 RX_ALLOC_METHOD_AUTO = 0, 356 RX_ALLOC_METHOD_AUTO = 0,
346 RX_ALLOC_METHOD_SKB = 1, 357 RX_ALLOC_METHOD_SKB = 1,
@@ -357,12 +368,12 @@ enum efx_rx_alloc_method {
357 * @efx: Associated Efx NIC 368 * @efx: Associated Efx NIC
358 * @channel: Channel instance number 369 * @channel: Channel instance number
359 * @type: Channel type definition 370 * @type: Channel type definition
371 * @eventq_init: Event queue initialised flag
360 * @enabled: Channel enabled indicator 372 * @enabled: Channel enabled indicator
361 * @irq: IRQ number (MSI and MSI-X only) 373 * @irq: IRQ number (MSI and MSI-X only)
362 * @irq_moderation: IRQ moderation value (in hardware ticks) 374 * @irq_moderation: IRQ moderation value (in hardware ticks)
363 * @napi_dev: Net device used with NAPI 375 * @napi_dev: Net device used with NAPI
364 * @napi_str: NAPI control structure 376 * @napi_str: NAPI control structure
365 * @work_pending: Is work pending via NAPI?
366 * @eventq: Event queue buffer 377 * @eventq: Event queue buffer
367 * @eventq_mask: Event queue pointer mask 378 * @eventq_mask: Event queue pointer mask
368 * @eventq_read_ptr: Event queue read pointer 379 * @eventq_read_ptr: Event queue read pointer
@@ -378,6 +389,8 @@ enum efx_rx_alloc_method {
378 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 389 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
379 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to 390 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
380 * lack of descriptors 391 * lack of descriptors
392 * @n_rx_merge_events: Number of RX merged completion events
393 * @n_rx_merge_packets: Number of RX packets completed by merged events
381 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by 394 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
382 * __efx_rx_packet(), or zero if there is none 395 * __efx_rx_packet(), or zero if there is none
383 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered 396 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -389,12 +402,12 @@ struct efx_channel {
389 struct efx_nic *efx; 402 struct efx_nic *efx;
390 int channel; 403 int channel;
391 const struct efx_channel_type *type; 404 const struct efx_channel_type *type;
405 bool eventq_init;
392 bool enabled; 406 bool enabled;
393 int irq; 407 int irq;
394 unsigned int irq_moderation; 408 unsigned int irq_moderation;
395 struct net_device *napi_dev; 409 struct net_device *napi_dev;
396 struct napi_struct napi_str; 410 struct napi_struct napi_str;
397 bool work_pending;
398 struct efx_special_buffer eventq; 411 struct efx_special_buffer eventq;
399 unsigned int eventq_mask; 412 unsigned int eventq_mask;
400 unsigned int eventq_read_ptr; 413 unsigned int eventq_read_ptr;
@@ -414,6 +427,8 @@ struct efx_channel {
414 unsigned n_rx_overlength; 427 unsigned n_rx_overlength;
415 unsigned n_skbuff_leaks; 428 unsigned n_skbuff_leaks;
416 unsigned int n_rx_nodesc_trunc; 429 unsigned int n_rx_nodesc_trunc;
430 unsigned int n_rx_merge_events;
431 unsigned int n_rx_merge_packets;
417 432
418 unsigned int rx_pkt_n_frags; 433 unsigned int rx_pkt_n_frags;
419 unsigned int rx_pkt_index; 434 unsigned int rx_pkt_index;
@@ -423,6 +438,21 @@ struct efx_channel {
423}; 438};
424 439
425/** 440/**
441 * struct efx_msi_context - Context for each MSI
442 * @efx: The associated NIC
443 * @index: Index of the channel/IRQ
444 * @name: Name of the channel/IRQ
445 *
446 * Unlike &struct efx_channel, this is never reallocated and is always
447 * safe for the IRQ handler to access.
448 */
449struct efx_msi_context {
450 struct efx_nic *efx;
451 unsigned int index;
452 char name[IFNAMSIZ + 6];
453};
454
455/**
426 * struct efx_channel_type - distinguishes traffic and extra channels 456 * struct efx_channel_type - distinguishes traffic and extra channels
427 * @handle_no_channel: Handle failure to allocate an extra channel 457 * @handle_no_channel: Handle failure to allocate an extra channel
428 * @pre_probe: Set up extra state prior to initialisation 458 * @pre_probe: Set up extra state prior to initialisation
@@ -579,75 +609,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
579 return !!(mode & ~PHY_MODE_TX_DISABLED); 609 return !!(mode & ~PHY_MODE_TX_DISABLED);
580} 610}
581 611
582/* 612/**
583 * Efx extended statistics 613 * struct efx_hw_stat_desc - Description of a hardware statistic
584 * 614 * @name: Name of the statistic as visible through ethtool, or %NULL if
585 * Not all statistics are provided by all supported MACs. The purpose 615 * it should not be exposed
586 * is this structure is to contain the raw statistics provided by each 616 * @dma_width: Width in bits (0 for non-DMA statistics)
587 * MAC. 617 * @offset: Offset within stats (ignored for non-DMA statistics)
588 */ 618 */
589struct efx_mac_stats { 619struct efx_hw_stat_desc {
590 u64 tx_bytes; 620 const char *name;
591 u64 tx_good_bytes; 621 u16 dma_width;
592 u64 tx_bad_bytes; 622 u16 offset;
593 u64 tx_packets;
594 u64 tx_bad;
595 u64 tx_pause;
596 u64 tx_control;
597 u64 tx_unicast;
598 u64 tx_multicast;
599 u64 tx_broadcast;
600 u64 tx_lt64;
601 u64 tx_64;
602 u64 tx_65_to_127;
603 u64 tx_128_to_255;
604 u64 tx_256_to_511;
605 u64 tx_512_to_1023;
606 u64 tx_1024_to_15xx;
607 u64 tx_15xx_to_jumbo;
608 u64 tx_gtjumbo;
609 u64 tx_collision;
610 u64 tx_single_collision;
611 u64 tx_multiple_collision;
612 u64 tx_excessive_collision;
613 u64 tx_deferred;
614 u64 tx_late_collision;
615 u64 tx_excessive_deferred;
616 u64 tx_non_tcpudp;
617 u64 tx_mac_src_error;
618 u64 tx_ip_src_error;
619 u64 rx_bytes;
620 u64 rx_good_bytes;
621 u64 rx_bad_bytes;
622 u64 rx_packets;
623 u64 rx_good;
624 u64 rx_bad;
625 u64 rx_pause;
626 u64 rx_control;
627 u64 rx_unicast;
628 u64 rx_multicast;
629 u64 rx_broadcast;
630 u64 rx_lt64;
631 u64 rx_64;
632 u64 rx_65_to_127;
633 u64 rx_128_to_255;
634 u64 rx_256_to_511;
635 u64 rx_512_to_1023;
636 u64 rx_1024_to_15xx;
637 u64 rx_15xx_to_jumbo;
638 u64 rx_gtjumbo;
639 u64 rx_bad_lt64;
640 u64 rx_bad_64_to_15xx;
641 u64 rx_bad_15xx_to_jumbo;
642 u64 rx_bad_gtjumbo;
643 u64 rx_overflow;
644 u64 rx_missed;
645 u64 rx_false_carrier;
646 u64 rx_symbol_error;
647 u64 rx_align_error;
648 u64 rx_length_error;
649 u64 rx_internal_error;
650 u64 rx_good_lt64;
651}; 623};
652 624
653/* Number of bits used in a multicast filter hash address */ 625/* Number of bits used in a multicast filter hash address */
@@ -662,7 +634,6 @@ union efx_multicast_hash {
662 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 634 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
663}; 635};
664 636
665struct efx_filter_state;
666struct efx_vf; 637struct efx_vf;
667struct vfdi_status; 638struct vfdi_status;
668 639
@@ -672,7 +643,6 @@ struct vfdi_status;
672 * @pci_dev: The PCI device 643 * @pci_dev: The PCI device
673 * @type: Controller type attributes 644 * @type: Controller type attributes
674 * @legacy_irq: IRQ number 645 * @legacy_irq: IRQ number
675 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
676 * @workqueue: Workqueue for port reconfigures and the HW monitor. 646 * @workqueue: Workqueue for port reconfigures and the HW monitor.
677 * Work items do not hold and must not acquire RTNL. 647 * Work items do not hold and must not acquire RTNL.
678 * @workqueue_name: Name of workqueue 648 * @workqueue_name: Name of workqueue
@@ -689,7 +659,7 @@ struct vfdi_status;
689 * @tx_queue: TX DMA queues 659 * @tx_queue: TX DMA queues
690 * @rx_queue: RX DMA queues 660 * @rx_queue: RX DMA queues
691 * @channel: Channels 661 * @channel: Channels
692 * @channel_name: Names for channels and their IRQs 662 * @msi_context: Context for each MSI
693 * @extra_channel_types: Types of extra (non-traffic) channels that 663 * @extra_channel_types: Types of extra (non-traffic) channels that
694 * should be allocated for this NIC 664 * should be allocated for this NIC
695 * @rxq_entries: Size of receive queues requested by user. 665 * @rxq_entries: Size of receive queues requested by user.
@@ -707,17 +677,25 @@ struct vfdi_status;
707 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 677 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
708 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 678 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
709 * for use in sk_buff::truesize 679 * for use in sk_buff::truesize
680 * @rx_prefix_size: Size of RX prefix before packet data
681 * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
682 * (valid only if @rx_prefix_size != 0; always negative)
683 * @rx_packet_len_offset: Offset of RX packet length from start of packet data
684 * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
710 * @rx_hash_key: Toeplitz hash key for RSS 685 * @rx_hash_key: Toeplitz hash key for RSS
711 * @rx_indir_table: Indirection table for RSS 686 * @rx_indir_table: Indirection table for RSS
712 * @rx_scatter: Scatter mode enabled for receives 687 * @rx_scatter: Scatter mode enabled for receives
713 * @int_error_count: Number of internal errors seen recently 688 * @int_error_count: Number of internal errors seen recently
714 * @int_error_expire: Time at which error count will be expired 689 * @int_error_expire: Time at which error count will be expired
690 * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
691 * acknowledge but do nothing else.
715 * @irq_status: Interrupt status buffer 692 * @irq_status: Interrupt status buffer
716 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 693 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
717 * @irq_level: IRQ level/index for IRQs not triggered by an event queue 694 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
718 * @selftest_work: Work item for asynchronous self-test 695 * @selftest_work: Work item for asynchronous self-test
719 * @mtd_list: List of MTDs attached to the NIC 696 * @mtd_list: List of MTDs attached to the NIC
720 * @nic_data: Hardware dependent state 697 * @nic_data: Hardware dependent state
698 * @mcdi: Management-Controller-to-Driver Interface state
721 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 699 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
722 * efx_monitor() and efx_reconfigure_port() 700 * efx_monitor() and efx_reconfigure_port()
723 * @port_enabled: Port enabled indicator. 701 * @port_enabled: Port enabled indicator.
@@ -737,8 +715,10 @@ struct vfdi_status;
737 * @link_advertising: Autonegotiation advertising flags 715 * @link_advertising: Autonegotiation advertising flags
738 * @link_state: Current state of the link 716 * @link_state: Current state of the link
739 * @n_link_state_changes: Number of times the link has changed state 717 * @n_link_state_changes: Number of times the link has changed state
740 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 718 * @unicast_filter: Flag for Falcon-arch simple unicast filter.
741 * @multicast_hash: Multicast hash table 719 * Protected by @mac_lock.
720 * @multicast_hash: Multicast hash table for Falcon-arch.
721 * Protected by @mac_lock.
742 * @wanted_fc: Wanted flow control flags 722 * @wanted_fc: Wanted flow control flags
743 * @fc_disable: When non-zero flow control is disabled. Typically used to 723 * @fc_disable: When non-zero flow control is disabled. Typically used to
744 * ensure that network back pressure doesn't delay dma queue flushes. 724 * ensure that network back pressure doesn't delay dma queue flushes.
@@ -747,7 +727,12 @@ struct vfdi_status;
747 * @loopback_mode: Loopback status 727 * @loopback_mode: Loopback status
748 * @loopback_modes: Supported loopback mode bitmask 728 * @loopback_modes: Supported loopback mode bitmask
749 * @loopback_selftest: Offline self-test private state 729 * @loopback_selftest: Offline self-test private state
750 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained. 730 * @filter_lock: Filter table lock
731 * @filter_state: Architecture-dependent filter table state
732 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
733 * indexed by filter ID
734 * @rps_expire_index: Next index to check for expiry in @rps_flow_id
735 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
751 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 736 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
752 * Decremented when the efx_flush_rx_queue() is called. 737 * Decremented when the efx_flush_rx_queue() is called.
753 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet 738 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -771,12 +756,8 @@ struct vfdi_status;
771 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 756 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
772 * field is used by efx_test_interrupts() to verify that an 757 * field is used by efx_test_interrupts() to verify that an
773 * interrupt has occurred. 758 * interrupt has occurred.
774 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 759 * @stats_lock: Statistics update lock. Must be held when calling
775 * @mac_stats: MAC statistics. These include all statistics the MACs 760 * efx_nic_type::{update,start,stop}_stats.
776 * can provide. Generic code converts these into a standard
777 * &struct net_device_stats.
778 * @stats_lock: Statistics update lock. Serialises statistics fetches
779 * and access to @mac_stats.
780 * 761 *
781 * This is stored in the private area of the &struct net_device. 762 * This is stored in the private area of the &struct net_device.
782 */ 763 */
@@ -788,7 +769,6 @@ struct efx_nic {
788 unsigned int port_num; 769 unsigned int port_num;
789 const struct efx_nic_type *type; 770 const struct efx_nic_type *type;
790 int legacy_irq; 771 int legacy_irq;
791 bool legacy_irq_enabled;
792 bool eeh_disabled_legacy_irq; 772 bool eeh_disabled_legacy_irq;
793 struct workqueue_struct *workqueue; 773 struct workqueue_struct *workqueue;
794 char workqueue_name[16]; 774 char workqueue_name[16];
@@ -806,7 +786,7 @@ struct efx_nic {
806 unsigned long reset_pending; 786 unsigned long reset_pending;
807 787
808 struct efx_channel *channel[EFX_MAX_CHANNELS]; 788 struct efx_channel *channel[EFX_MAX_CHANNELS];
809 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; 789 struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
810 const struct efx_channel_type * 790 const struct efx_channel_type *
811 extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; 791 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
812 792
@@ -819,6 +799,8 @@ struct efx_nic {
819 unsigned rx_dc_base; 799 unsigned rx_dc_base;
820 unsigned sram_lim_qw; 800 unsigned sram_lim_qw;
821 unsigned next_buffer_table; 801 unsigned next_buffer_table;
802
803 unsigned int max_channels;
822 unsigned n_channels; 804 unsigned n_channels;
823 unsigned n_rx_channels; 805 unsigned n_rx_channels;
824 unsigned rss_spread; 806 unsigned rss_spread;
@@ -830,6 +812,9 @@ struct efx_nic {
830 unsigned int rx_page_buf_step; 812 unsigned int rx_page_buf_step;
831 unsigned int rx_bufs_per_page; 813 unsigned int rx_bufs_per_page;
832 unsigned int rx_pages_per_batch; 814 unsigned int rx_pages_per_batch;
815 unsigned int rx_prefix_size;
816 int rx_packet_hash_offset;
817 int rx_packet_len_offset;
833 u8 rx_hash_key[40]; 818 u8 rx_hash_key[40];
834 u32 rx_indir_table[128]; 819 u32 rx_indir_table[128];
835 bool rx_scatter; 820 bool rx_scatter;
@@ -837,6 +822,7 @@ struct efx_nic {
837 unsigned int_error_count; 822 unsigned int_error_count;
838 unsigned long int_error_expire; 823 unsigned long int_error_expire;
839 824
825 bool irq_soft_enabled;
840 struct efx_buffer irq_status; 826 struct efx_buffer irq_status;
841 unsigned irq_zero_count; 827 unsigned irq_zero_count;
842 unsigned irq_level; 828 unsigned irq_level;
@@ -847,6 +833,7 @@ struct efx_nic {
847#endif 833#endif
848 834
849 void *nic_data; 835 void *nic_data;
836 struct efx_mcdi_data *mcdi;
850 837
851 struct mutex mac_lock; 838 struct mutex mac_lock;
852 struct work_struct mac_work; 839 struct work_struct mac_work;
@@ -868,7 +855,7 @@ struct efx_nic {
868 struct efx_link_state link_state; 855 struct efx_link_state link_state;
869 unsigned int n_link_state_changes; 856 unsigned int n_link_state_changes;
870 857
871 bool promiscuous; 858 bool unicast_filter;
872 union efx_multicast_hash multicast_hash; 859 union efx_multicast_hash multicast_hash;
873 u8 wanted_fc; 860 u8 wanted_fc;
874 unsigned fc_disable; 861 unsigned fc_disable;
@@ -879,9 +866,14 @@ struct efx_nic {
879 866
880 void *loopback_selftest; 867 void *loopback_selftest;
881 868
882 struct efx_filter_state *filter_state; 869 spinlock_t filter_lock;
870 void *filter_state;
871#ifdef CONFIG_RFS_ACCEL
872 u32 *rps_flow_id;
873 unsigned int rps_expire_index;
874#endif
883 875
884 atomic_t drain_pending; 876 atomic_t active_queues;
885 atomic_t rxq_flush_pending; 877 atomic_t rxq_flush_pending;
886 atomic_t rxq_flush_outstanding; 878 atomic_t rxq_flush_outstanding;
887 wait_queue_head_t flush_wq; 879 wait_queue_head_t flush_wq;
@@ -907,8 +899,6 @@ struct efx_nic {
907 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 899 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
908 spinlock_t biu_lock; 900 spinlock_t biu_lock;
909 int last_irq_cpu; 901 int last_irq_cpu;
910 unsigned n_rx_nodesc_drop_cnt;
911 struct efx_mac_stats mac_stats;
912 spinlock_t stats_lock; 902 spinlock_t stats_lock;
913}; 903};
914 904
@@ -922,8 +912,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
922 return efx->port_num; 912 return efx->port_num;
923} 913}
924 914
915struct efx_mtd_partition {
916 struct list_head node;
917 struct mtd_info mtd;
918 const char *dev_type_name;
919 const char *type_name;
920 char name[IFNAMSIZ + 20];
921};
922
925/** 923/**
926 * struct efx_nic_type - Efx device type definition 924 * struct efx_nic_type - Efx device type definition
925 * @mem_map_size: Get memory BAR mapped size
927 * @probe: Probe the controller 926 * @probe: Probe the controller
928 * @remove: Free resources allocated by probe() 927 * @remove: Free resources allocated by probe()
929 * @init: Initialise the controller 928 * @init: Initialise the controller
@@ -938,47 +937,118 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
938 * @probe_port: Probe the MAC and PHY 937 * @probe_port: Probe the MAC and PHY
939 * @remove_port: Free resources allocated by probe_port() 938 * @remove_port: Free resources allocated by probe_port()
940 * @handle_global_event: Handle a "global" event (may be %NULL) 939 * @handle_global_event: Handle a "global" event (may be %NULL)
940 * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
941 * @prepare_flush: Prepare the hardware for flushing the DMA queues 941 * @prepare_flush: Prepare the hardware for flushing the DMA queues
942 * @finish_flush: Clean up after flushing the DMA queues 942 * (for Falcon architecture)
943 * @update_stats: Update statistics not provided by event handling 943 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
944 * architecture)
945 * @describe_stats: Describe statistics for ethtool
946 * @update_stats: Update statistics not provided by event handling.
947 * Either argument may be %NULL.
944 * @start_stats: Start the regular fetching of statistics 948 * @start_stats: Start the regular fetching of statistics
945 * @stop_stats: Stop the regular fetching of statistics 949 * @stop_stats: Stop the regular fetching of statistics
946 * @set_id_led: Set state of identifying LED or revert to automatic function 950 * @set_id_led: Set state of identifying LED or revert to automatic function
947 * @push_irq_moderation: Apply interrupt moderation value 951 * @push_irq_moderation: Apply interrupt moderation value
948 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 952 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
953 * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
949 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings 954 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
950 * to the hardware. Serialised by the mac_lock. 955 * to the hardware. Serialised by the mac_lock.
951 * @check_mac_fault: Check MAC fault state. True if fault present. 956 * @check_mac_fault: Check MAC fault state. True if fault present.
952 * @get_wol: Get WoL configuration from driver state 957 * @get_wol: Get WoL configuration from driver state
953 * @set_wol: Push WoL configuration to the NIC 958 * @set_wol: Push WoL configuration to the NIC
954 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 959 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
955 * @test_chip: Test registers. Should use efx_nic_test_registers(), and is 960 * @test_chip: Test registers. May use efx_farch_test_registers(), and is
956 * expected to reset the NIC. 961 * expected to reset the NIC.
957 * @test_nvram: Test validity of NVRAM contents 962 * @test_nvram: Test validity of NVRAM contents
963 * @mcdi_request: Send an MCDI request with the given header and SDU.
964 * The SDU length may be any value from 0 up to the protocol-
965 * defined maximum, but its buffer will be padded to a multiple
966 * of 4 bytes.
967 * @mcdi_poll_response: Test whether an MCDI response is available.
968 * @mcdi_read_response: Read the MCDI response PDU. The offset will
969 * be a multiple of 4. The length may not be, but the buffer
970 * will be padded so it is safe to round up.
971 * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
972 * return an appropriate error code for aborting any current
973 * request; otherwise return 0.
974 * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
975 * be separately enabled after this.
976 * @irq_test_generate: Generate a test IRQ
977 * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
978 * queue must be separately disabled before this.
979 * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
980 * a pointer to the &struct efx_msi_context for the channel.
981 * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
982 * is a pointer to the &struct efx_nic.
983 * @tx_probe: Allocate resources for TX queue
984 * @tx_init: Initialise TX queue on the NIC
985 * @tx_remove: Free resources for TX queue
986 * @tx_write: Write TX descriptors and doorbell
987 * @rx_push_indir_table: Write RSS indirection table to the NIC
988 * @rx_probe: Allocate resources for RX queue
989 * @rx_init: Initialise RX queue on the NIC
990 * @rx_remove: Free resources for RX queue
991 * @rx_write: Write RX descriptors and doorbell
992 * @rx_defer_refill: Generate a refill reminder event
993 * @ev_probe: Allocate resources for event queue
994 * @ev_init: Initialise event queue on the NIC
995 * @ev_fini: Deinitialise event queue on the NIC
996 * @ev_remove: Free resources for event queue
997 * @ev_process: Process events for a queue, up to the given NAPI quota
998 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
999 * @ev_test_generate: Generate a test event
1000 * @filter_table_probe: Probe filter capabilities and set up filter software state
1001 * @filter_table_restore: Restore filters removed from hardware
1002 * @filter_table_remove: Remove filters from hardware and tear down software state
1003 * @filter_update_rx_scatter: Update filters after change to rx scatter setting
1004 * @filter_insert: add or replace a filter
1005 * @filter_remove_safe: remove a filter by ID, carefully
1006 * @filter_get_safe: retrieve a filter by ID, carefully
1007 * @filter_clear_rx: remove RX filters by priority
1008 * @filter_count_rx_used: Get the number of filters in use at a given priority
1009 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
1010 * @filter_get_rx_ids: Get list of RX filters at a given priority
1011 * @filter_rfs_insert: Add or replace a filter for RFS. This must be
1012 * atomic. The hardware change may be asynchronous but should
1013 * not be delayed for long. It may fail if this can't be done
1014 * atomically.
1015 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
1016 * This must check whether the specified table entry is used by RFS
1017 * and that rps_may_expire_flow() returns true for it.
1018 * @mtd_probe: Probe and add MTD partitions associated with this net device,
1019 * using efx_mtd_add()
1020 * @mtd_rename: Set an MTD partition name using the net device name
1021 * @mtd_read: Read from an MTD partition
1022 * @mtd_erase: Erase part of an MTD partition
1023 * @mtd_write: Write to an MTD partition
1024 * @mtd_sync: Wait for write-back to complete on MTD partition. This
1025 * also notifies the driver that a writer has finished using this
1026 * partition.
958 * @revision: Hardware architecture revision 1027 * @revision: Hardware architecture revision
959 * @mem_map_size: Memory BAR mapped size
960 * @txd_ptr_tbl_base: TX descriptor ring base address 1028 * @txd_ptr_tbl_base: TX descriptor ring base address
961 * @rxd_ptr_tbl_base: RX descriptor ring base address 1029 * @rxd_ptr_tbl_base: RX descriptor ring base address
962 * @buf_tbl_base: Buffer table base address 1030 * @buf_tbl_base: Buffer table base address
963 * @evq_ptr_tbl_base: Event queue pointer table base address 1031 * @evq_ptr_tbl_base: Event queue pointer table base address
964 * @evq_rptr_tbl_base: Event queue read-pointer table base address 1032 * @evq_rptr_tbl_base: Event queue read-pointer table base address
965 * @max_dma_mask: Maximum possible DMA mask 1033 * @max_dma_mask: Maximum possible DMA mask
966 * @rx_buffer_hash_size: Size of hash at start of RX packet 1034 * @rx_prefix_size: Size of RX prefix before packet data
1035 * @rx_hash_offset: Offset of RX flow hash within prefix
967 * @rx_buffer_padding: Size of padding at end of RX packet 1036 * @rx_buffer_padding: Size of padding at end of RX packet
968 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers 1037 * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
1038 * @always_rx_scatter: NIC will always scatter packets to multiple buffers
969 * @max_interrupt_mode: Highest capability interrupt mode supported 1039 * @max_interrupt_mode: Highest capability interrupt mode supported
970 * from &enum efx_init_mode. 1040 * from &enum efx_init_mode.
971 * @phys_addr_channels: Number of channels with physically addressed
972 * descriptors
973 * @timer_period_max: Maximum period of interrupt timer (in ticks) 1041 * @timer_period_max: Maximum period of interrupt timer (in ticks)
974 * @offload_features: net_device feature flags for protocol offload 1042 * @offload_features: net_device feature flags for protocol offload
975 * features implemented in hardware 1043 * features implemented in hardware
1044 * @mcdi_max_ver: Maximum MCDI version supported
976 */ 1045 */
977struct efx_nic_type { 1046struct efx_nic_type {
1047 unsigned int (*mem_map_size)(struct efx_nic *efx);
978 int (*probe)(struct efx_nic *efx); 1048 int (*probe)(struct efx_nic *efx);
979 void (*remove)(struct efx_nic *efx); 1049 void (*remove)(struct efx_nic *efx);
980 int (*init)(struct efx_nic *efx); 1050 int (*init)(struct efx_nic *efx);
981 void (*dimension_resources)(struct efx_nic *efx); 1051 int (*dimension_resources)(struct efx_nic *efx);
982 void (*fini)(struct efx_nic *efx); 1052 void (*fini)(struct efx_nic *efx);
983 void (*monitor)(struct efx_nic *efx); 1053 void (*monitor)(struct efx_nic *efx);
984 enum reset_type (*map_reset_reason)(enum reset_type reason); 1054 enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -987,14 +1057,18 @@ struct efx_nic_type {
987 int (*probe_port)(struct efx_nic *efx); 1057 int (*probe_port)(struct efx_nic *efx);
988 void (*remove_port)(struct efx_nic *efx); 1058 void (*remove_port)(struct efx_nic *efx);
989 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); 1059 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
1060 int (*fini_dmaq)(struct efx_nic *efx);
990 void (*prepare_flush)(struct efx_nic *efx); 1061 void (*prepare_flush)(struct efx_nic *efx);
991 void (*finish_flush)(struct efx_nic *efx); 1062 void (*finish_flush)(struct efx_nic *efx);
992 void (*update_stats)(struct efx_nic *efx); 1063 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1064 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1065 struct rtnl_link_stats64 *core_stats);
993 void (*start_stats)(struct efx_nic *efx); 1066 void (*start_stats)(struct efx_nic *efx);
994 void (*stop_stats)(struct efx_nic *efx); 1067 void (*stop_stats)(struct efx_nic *efx);
995 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); 1068 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
996 void (*push_irq_moderation)(struct efx_channel *channel); 1069 void (*push_irq_moderation)(struct efx_channel *channel);
997 int (*reconfigure_port)(struct efx_nic *efx); 1070 int (*reconfigure_port)(struct efx_nic *efx);
1071 void (*prepare_enable_fc_tx)(struct efx_nic *efx);
998 int (*reconfigure_mac)(struct efx_nic *efx); 1072 int (*reconfigure_mac)(struct efx_nic *efx);
999 bool (*check_mac_fault)(struct efx_nic *efx); 1073 bool (*check_mac_fault)(struct efx_nic *efx);
1000 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 1074 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
@@ -1002,22 +1076,90 @@ struct efx_nic_type {
1002 void (*resume_wol)(struct efx_nic *efx); 1076 void (*resume_wol)(struct efx_nic *efx);
1003 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); 1077 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1004 int (*test_nvram)(struct efx_nic *efx); 1078 int (*test_nvram)(struct efx_nic *efx);
1079 void (*mcdi_request)(struct efx_nic *efx,
1080 const efx_dword_t *hdr, size_t hdr_len,
1081 const efx_dword_t *sdu, size_t sdu_len);
1082 bool (*mcdi_poll_response)(struct efx_nic *efx);
1083 void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
1084 size_t pdu_offset, size_t pdu_len);
1085 int (*mcdi_poll_reboot)(struct efx_nic *efx);
1086 void (*irq_enable_master)(struct efx_nic *efx);
1087 void (*irq_test_generate)(struct efx_nic *efx);
1088 void (*irq_disable_non_ev)(struct efx_nic *efx);
1089 irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
1090 irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
1091 int (*tx_probe)(struct efx_tx_queue *tx_queue);
1092 void (*tx_init)(struct efx_tx_queue *tx_queue);
1093 void (*tx_remove)(struct efx_tx_queue *tx_queue);
1094 void (*tx_write)(struct efx_tx_queue *tx_queue);
1095 void (*rx_push_indir_table)(struct efx_nic *efx);
1096 int (*rx_probe)(struct efx_rx_queue *rx_queue);
1097 void (*rx_init)(struct efx_rx_queue *rx_queue);
1098 void (*rx_remove)(struct efx_rx_queue *rx_queue);
1099 void (*rx_write)(struct efx_rx_queue *rx_queue);
1100 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
1101 int (*ev_probe)(struct efx_channel *channel);
1102 int (*ev_init)(struct efx_channel *channel);
1103 void (*ev_fini)(struct efx_channel *channel);
1104 void (*ev_remove)(struct efx_channel *channel);
1105 int (*ev_process)(struct efx_channel *channel, int quota);
1106 void (*ev_read_ack)(struct efx_channel *channel);
1107 void (*ev_test_generate)(struct efx_channel *channel);
1108 int (*filter_table_probe)(struct efx_nic *efx);
1109 void (*filter_table_restore)(struct efx_nic *efx);
1110 void (*filter_table_remove)(struct efx_nic *efx);
1111 void (*filter_update_rx_scatter)(struct efx_nic *efx);
1112 s32 (*filter_insert)(struct efx_nic *efx,
1113 struct efx_filter_spec *spec, bool replace);
1114 int (*filter_remove_safe)(struct efx_nic *efx,
1115 enum efx_filter_priority priority,
1116 u32 filter_id);
1117 int (*filter_get_safe)(struct efx_nic *efx,
1118 enum efx_filter_priority priority,
1119 u32 filter_id, struct efx_filter_spec *);
1120 void (*filter_clear_rx)(struct efx_nic *efx,
1121 enum efx_filter_priority priority);
1122 u32 (*filter_count_rx_used)(struct efx_nic *efx,
1123 enum efx_filter_priority priority);
1124 u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
1125 s32 (*filter_get_rx_ids)(struct efx_nic *efx,
1126 enum efx_filter_priority priority,
1127 u32 *buf, u32 size);
1128#ifdef CONFIG_RFS_ACCEL
1129 s32 (*filter_rfs_insert)(struct efx_nic *efx,
1130 struct efx_filter_spec *spec);
1131 bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
1132 unsigned int index);
1133#endif
1134#ifdef CONFIG_SFC_MTD
1135 int (*mtd_probe)(struct efx_nic *efx);
1136 void (*mtd_rename)(struct efx_mtd_partition *part);
1137 int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
1138 size_t *retlen, u8 *buffer);
1139 int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
1140 int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
1141 size_t *retlen, const u8 *buffer);
1142 int (*mtd_sync)(struct mtd_info *mtd);
1143#endif
1144 void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
1005 1145
1006 int revision; 1146 int revision;
1007 unsigned int mem_map_size;
1008 unsigned int txd_ptr_tbl_base; 1147 unsigned int txd_ptr_tbl_base;
1009 unsigned int rxd_ptr_tbl_base; 1148 unsigned int rxd_ptr_tbl_base;
1010 unsigned int buf_tbl_base; 1149 unsigned int buf_tbl_base;
1011 unsigned int evq_ptr_tbl_base; 1150 unsigned int evq_ptr_tbl_base;
1012 unsigned int evq_rptr_tbl_base; 1151 unsigned int evq_rptr_tbl_base;
1013 u64 max_dma_mask; 1152 u64 max_dma_mask;
1014 unsigned int rx_buffer_hash_size; 1153 unsigned int rx_prefix_size;
1154 unsigned int rx_hash_offset;
1015 unsigned int rx_buffer_padding; 1155 unsigned int rx_buffer_padding;
1016 bool can_rx_scatter; 1156 bool can_rx_scatter;
1157 bool always_rx_scatter;
1017 unsigned int max_interrupt_mode; 1158 unsigned int max_interrupt_mode;
1018 unsigned int phys_addr_channels;
1019 unsigned int timer_period_max; 1159 unsigned int timer_period_max;
1020 netdev_features_t offload_features; 1160 netdev_features_t offload_features;
1161 int mcdi_max_ver;
1162 unsigned int max_rx_ip_filters;
1021}; 1163};
1022 1164
1023/************************************************************************** 1165/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 56ed3bc71e00..e7dbd2dd202e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -19,295 +19,22 @@
19#include "bitfield.h" 19#include "bitfield.h"
20#include "efx.h" 20#include "efx.h"
21#include "nic.h" 21#include "nic.h"
22#include "regs.h" 22#include "farch_regs.h"
23#include "io.h" 23#include "io.h"
24#include "workarounds.h" 24#include "workarounds.h"
25 25
26/************************************************************************** 26/**************************************************************************
27 * 27 *
28 * Configurable values
29 *
30 **************************************************************************
31 */
32
33/* This is set to 16 for a good reason. In summary, if larger than
34 * 16, the descriptor cache holds more than a default socket
35 * buffer's worth of packets (for UDP we can only have at most one
36 * socket buffer's worth outstanding). This combined with the fact
37 * that we only get 1 TX event per descriptor cache means the NIC
38 * goes idle.
39 */
40#define TX_DC_ENTRIES 16
41#define TX_DC_ENTRIES_ORDER 1
42
43#define RX_DC_ENTRIES 64
44#define RX_DC_ENTRIES_ORDER 3
45
46/* If EFX_MAX_INT_ERRORS internal errors occur within
47 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
48 * disable it.
49 */
50#define EFX_INT_ERROR_EXPIRE 3600
51#define EFX_MAX_INT_ERRORS 5
52
53/* Depth of RX flush request fifo */
54#define EFX_RX_FLUSH_COUNT 4
55
56/* Driver generated events */
57#define _EFX_CHANNEL_MAGIC_TEST 0x000101
58#define _EFX_CHANNEL_MAGIC_FILL 0x000102
59#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
60#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
61
62#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
63#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64
65#define EFX_CHANNEL_MAGIC_TEST(_channel) \
66 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
67#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
69 efx_rx_queue_index(_rx_queue))
70#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
71 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
72 efx_rx_queue_index(_rx_queue))
73#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
74 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
75 (_tx_queue)->queue)
76
77static void efx_magic_event(struct efx_channel *channel, u32 magic);
78
79/**************************************************************************
80 *
81 * Solarstorm hardware access
82 *
83 **************************************************************************/
84
85static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
86 unsigned int index)
87{
88 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
89 value, index);
90}
91
92/* Read the current event from the event queue */
93static inline efx_qword_t *efx_event(struct efx_channel *channel,
94 unsigned int index)
95{
96 return ((efx_qword_t *) (channel->eventq.addr)) +
97 (index & channel->eventq_mask);
98}
99
100/* See if an event is present
101 *
102 * We check both the high and low dword of the event for all ones. We
103 * wrote all ones when we cleared the event, and no valid event can
104 * have all ones in either its high or low dwords. This approach is
105 * robust against reordering.
106 *
107 * Note that using a single 64-bit comparison is incorrect; even
108 * though the CPU read will be atomic, the DMA write may not be.
109 */
110static inline int efx_event_present(efx_qword_t *event)
111{
112 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
113 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
114}
115
116static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
117 const efx_oword_t *mask)
118{
119 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
120 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
121}
122
123int efx_nic_test_registers(struct efx_nic *efx,
124 const struct efx_nic_register_test *regs,
125 size_t n_regs)
126{
127 unsigned address = 0, i, j;
128 efx_oword_t mask, imask, original, reg, buf;
129
130 for (i = 0; i < n_regs; ++i) {
131 address = regs[i].address;
132 mask = imask = regs[i].mask;
133 EFX_INVERT_OWORD(imask);
134
135 efx_reado(efx, &original, address);
136
137 /* bit sweep on and off */
138 for (j = 0; j < 128; j++) {
139 if (!EFX_EXTRACT_OWORD32(mask, j, j))
140 continue;
141
142 /* Test this testable bit can be set in isolation */
143 EFX_AND_OWORD(reg, original, mask);
144 EFX_SET_OWORD32(reg, j, j, 1);
145
146 efx_writeo(efx, &reg, address);
147 efx_reado(efx, &buf, address);
148
149 if (efx_masked_compare_oword(&reg, &buf, &mask))
150 goto fail;
151
152 /* Test this testable bit can be cleared in isolation */
153 EFX_OR_OWORD(reg, original, mask);
154 EFX_SET_OWORD32(reg, j, j, 0);
155
156 efx_writeo(efx, &reg, address);
157 efx_reado(efx, &buf, address);
158
159 if (efx_masked_compare_oword(&reg, &buf, &mask))
160 goto fail;
161 }
162
163 efx_writeo(efx, &original, address);
164 }
165
166 return 0;
167
168fail:
169 netif_err(efx, hw, efx->net_dev,
170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
173 return -EIO;
174}
175
176/**************************************************************************
177 *
178 * Special buffer handling
179 * Special buffers are used for event queues and the TX and RX
180 * descriptor rings.
181 *
182 *************************************************************************/
183
184/*
185 * Initialise a special buffer
186 *
187 * This will define a buffer (previously allocated via
188 * efx_alloc_special_buffer()) in the buffer table, allowing
189 * it to be used for event queues, descriptor rings etc.
190 */
191static void
192efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
193{
194 efx_qword_t buf_desc;
195 unsigned int index;
196 dma_addr_t dma_addr;
197 int i;
198
199 EFX_BUG_ON_PARANOID(!buffer->addr);
200
201 /* Write buffer descriptors to NIC */
202 for (i = 0; i < buffer->entries; i++) {
203 index = buffer->index + i;
204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
205 netif_dbg(efx, probe, efx->net_dev,
206 "mapping special buffer %d at %llx\n",
207 index, (unsigned long long)dma_addr);
208 EFX_POPULATE_QWORD_3(buf_desc,
209 FRF_AZ_BUF_ADR_REGION, 0,
210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
211 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
212 efx_write_buf_tbl(efx, &buf_desc, index);
213 }
214}
215
216/* Unmaps a buffer and clears the buffer table entries */
217static void
218efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
219{
220 efx_oword_t buf_tbl_upd;
221 unsigned int start = buffer->index;
222 unsigned int end = (buffer->index + buffer->entries - 1);
223
224 if (!buffer->entries)
225 return;
226
227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
228 buffer->index, buffer->index + buffer->entries - 1);
229
230 EFX_POPULATE_OWORD_4(buf_tbl_upd,
231 FRF_AZ_BUF_UPD_CMD, 0,
232 FRF_AZ_BUF_CLR_CMD, 1,
233 FRF_AZ_BUF_CLR_END_ID, end,
234 FRF_AZ_BUF_CLR_START_ID, start);
235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
236}
237
238/*
239 * Allocate a new special buffer
240 *
241 * This allocates memory for a new buffer, clears it and allocates a
242 * new buffer ID range. It does not write into the buffer table.
243 *
244 * This call will allocate 4KB buffers, since 8KB buffers can't be
245 * used for event queues and descriptor rings.
246 */
247static int efx_alloc_special_buffer(struct efx_nic *efx,
248 struct efx_special_buffer *buffer,
249 unsigned int len)
250{
251 len = ALIGN(len, EFX_BUF_SIZE);
252
253 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
254 &buffer->dma_addr, GFP_KERNEL);
255 if (!buffer->addr)
256 return -ENOMEM;
257 buffer->len = len;
258 buffer->entries = len / EFX_BUF_SIZE;
259 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
260
261 /* Select new buffer ID */
262 buffer->index = efx->next_buffer_table;
263 efx->next_buffer_table += buffer->entries;
264#ifdef CONFIG_SFC_SRIOV
265 BUG_ON(efx_sriov_enabled(efx) &&
266 efx->vf_buftbl_base < efx->next_buffer_table);
267#endif
268
269 netif_dbg(efx, probe, efx->net_dev,
270 "allocating special buffers %d-%d at %llx+%x "
271 "(virt %p phys %llx)\n", buffer->index,
272 buffer->index + buffer->entries - 1,
273 (u64)buffer->dma_addr, len,
274 buffer->addr, (u64)virt_to_phys(buffer->addr));
275
276 return 0;
277}
278
279static void
280efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
281{
282 if (!buffer->addr)
283 return;
284
285 netif_dbg(efx, hw, efx->net_dev,
286 "deallocating special buffers %d-%d at %llx+%x "
287 "(virt %p phys %llx)\n", buffer->index,
288 buffer->index + buffer->entries - 1,
289 (u64)buffer->dma_addr, buffer->len,
290 buffer->addr, (u64)virt_to_phys(buffer->addr));
291
292 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
293 buffer->dma_addr);
294 buffer->addr = NULL;
295 buffer->entries = 0;
296}
297
298/**************************************************************************
299 *
300 * Generic buffer handling 28 * Generic buffer handling
301 * These buffers are used for interrupt status, MAC stats, etc. 29 * These buffers are used for interrupt status, MAC stats, etc.
302 * 30 *
303 **************************************************************************/ 31 **************************************************************************/
304 32
305int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 33int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
306 unsigned int len) 34 unsigned int len, gfp_t gfp_flags)
307{ 35{
308 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 36 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
309 &buffer->dma_addr, 37 &buffer->dma_addr, gfp_flags);
310 GFP_ATOMIC | __GFP_ZERO);
311 if (!buffer->addr) 38 if (!buffer->addr)
312 return -ENOMEM; 39 return -ENOMEM;
313 buffer->len = len; 40 buffer->len = len;
@@ -323,1057 +50,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
323 } 50 }
324} 51}
325 52
326/**************************************************************************
327 *
328 * TX path
329 *
330 **************************************************************************/
331
332/* Returns a pointer to the specified transmit descriptor in the TX
333 * descriptor queue belonging to the specified channel.
334 */
335static inline efx_qword_t *
336efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
337{
338 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
339}
340
341/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
342static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
343{
344 unsigned write_ptr;
345 efx_dword_t reg;
346
347 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
348 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
349 efx_writed_page(tx_queue->efx, &reg,
350 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
351}
352
353/* Write pointer and first descriptor for TX descriptor ring */
354static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
355 const efx_qword_t *txd)
356{
357 unsigned write_ptr;
358 efx_oword_t reg;
359
360 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
361 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
362
363 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
364 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
365 FRF_AZ_TX_DESC_WPTR, write_ptr);
366 reg.qword[0] = *txd;
367 efx_writeo_page(tx_queue->efx, &reg,
368 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
369}
370
371static inline bool
372efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
373{
374 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
375
376 if (empty_read_count == 0)
377 return false;
378
379 tx_queue->empty_read_count = 0;
380 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
381 && tx_queue->write_count - write_count == 1;
382}
383
384/* For each entry inserted into the software descriptor ring, create a
385 * descriptor in the hardware TX descriptor ring (in host memory), and
386 * write a doorbell.
387 */
388void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
389{
390
391 struct efx_tx_buffer *buffer;
392 efx_qword_t *txd;
393 unsigned write_ptr;
394 unsigned old_write_count = tx_queue->write_count;
395
396 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
397
398 do {
399 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
400 buffer = &tx_queue->buffer[write_ptr];
401 txd = efx_tx_desc(tx_queue, write_ptr);
402 ++tx_queue->write_count;
403
404 /* Create TX descriptor ring entry */
405 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
406 EFX_POPULATE_QWORD_4(*txd,
407 FSF_AZ_TX_KER_CONT,
408 buffer->flags & EFX_TX_BUF_CONT,
409 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
410 FSF_AZ_TX_KER_BUF_REGION, 0,
411 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
412 } while (tx_queue->write_count != tx_queue->insert_count);
413
414 wmb(); /* Ensure descriptors are written before they are fetched */
415
416 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
417 txd = efx_tx_desc(tx_queue,
418 old_write_count & tx_queue->ptr_mask);
419 efx_push_tx_desc(tx_queue, txd);
420 ++tx_queue->pushes;
421 } else {
422 efx_notify_tx_desc(tx_queue);
423 }
424}
425
426/* Allocate hardware resources for a TX queue */
427int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
428{
429 struct efx_nic *efx = tx_queue->efx;
430 unsigned entries;
431
432 entries = tx_queue->ptr_mask + 1;
433 return efx_alloc_special_buffer(efx, &tx_queue->txd,
434 entries * sizeof(efx_qword_t));
435}
436
437void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
438{
439 struct efx_nic *efx = tx_queue->efx;
440 efx_oword_t reg;
441
442 /* Pin TX descriptor ring */
443 efx_init_special_buffer(efx, &tx_queue->txd);
444
445 /* Push TX descriptor ring to card */
446 EFX_POPULATE_OWORD_10(reg,
447 FRF_AZ_TX_DESCQ_EN, 1,
448 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
449 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
450 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
451 FRF_AZ_TX_DESCQ_EVQ_ID,
452 tx_queue->channel->channel,
453 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
454 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
455 FRF_AZ_TX_DESCQ_SIZE,
456 __ffs(tx_queue->txd.entries),
457 FRF_AZ_TX_DESCQ_TYPE, 0,
458 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
459
460 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
461 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
463 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
464 !csum);
465 }
466
467 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
468 tx_queue->queue);
469
470 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
471 /* Only 128 bits in this register */
472 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
473
474 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
475 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
476 __clear_bit_le(tx_queue->queue, &reg);
477 else
478 __set_bit_le(tx_queue->queue, &reg);
479 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
480 }
481
482 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
483 EFX_POPULATE_OWORD_1(reg,
484 FRF_BZ_TX_PACE,
485 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
486 FFE_BZ_TX_PACE_OFF :
487 FFE_BZ_TX_PACE_RESERVED);
488 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
489 tx_queue->queue);
490 }
491}
492
493static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
494{
495 struct efx_nic *efx = tx_queue->efx;
496 efx_oword_t tx_flush_descq;
497
498 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
499 atomic_set(&tx_queue->flush_outstanding, 1);
500
501 EFX_POPULATE_OWORD_2(tx_flush_descq,
502 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
503 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
504 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
505}
506
507void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
508{
509 struct efx_nic *efx = tx_queue->efx;
510 efx_oword_t tx_desc_ptr;
511
512 /* Remove TX descriptor ring from card */
513 EFX_ZERO_OWORD(tx_desc_ptr);
514 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
515 tx_queue->queue);
516
517 /* Unpin TX descriptor ring */
518 efx_fini_special_buffer(efx, &tx_queue->txd);
519}
520
521/* Free buffers backing TX queue */
522void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
523{
524 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
525}
526
527/**************************************************************************
528 *
529 * RX path
530 *
531 **************************************************************************/
532
533/* Returns a pointer to the specified descriptor in the RX descriptor queue */
534static inline efx_qword_t *
535efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
536{
537 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
538}
539
540/* This creates an entry in the RX descriptor queue */
541static inline void
542efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
543{
544 struct efx_rx_buffer *rx_buf;
545 efx_qword_t *rxd;
546
547 rxd = efx_rx_desc(rx_queue, index);
548 rx_buf = efx_rx_buffer(rx_queue, index);
549 EFX_POPULATE_QWORD_3(*rxd,
550 FSF_AZ_RX_KER_BUF_SIZE,
551 rx_buf->len -
552 rx_queue->efx->type->rx_buffer_padding,
553 FSF_AZ_RX_KER_BUF_REGION, 0,
554 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
555}
556
557/* This writes to the RX_DESC_WPTR register for the specified receive
558 * descriptor ring.
559 */
560void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
561{
562 struct efx_nic *efx = rx_queue->efx;
563 efx_dword_t reg;
564 unsigned write_ptr;
565
566 while (rx_queue->notified_count != rx_queue->added_count) {
567 efx_build_rx_desc(
568 rx_queue,
569 rx_queue->notified_count & rx_queue->ptr_mask);
570 ++rx_queue->notified_count;
571 }
572
573 wmb();
574 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
575 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
576 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
577 efx_rx_queue_index(rx_queue));
578}
579
580int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
581{
582 struct efx_nic *efx = rx_queue->efx;
583 unsigned entries;
584
585 entries = rx_queue->ptr_mask + 1;
586 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
587 entries * sizeof(efx_qword_t));
588}
589
590void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
591{
592 efx_oword_t rx_desc_ptr;
593 struct efx_nic *efx = rx_queue->efx;
594 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
595 bool iscsi_digest_en = is_b0;
596 bool jumbo_en;
597
598 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
599 * DMA to continue after a PCIe page boundary (and scattering
600 * is not possible). In Falcon B0 and Siena, it enables
601 * scatter.
602 */
603 jumbo_en = !is_b0 || efx->rx_scatter;
604
605 netif_dbg(efx, hw, efx->net_dev,
606 "RX queue %d ring in special buffers %d-%d\n",
607 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
608 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
609
610 rx_queue->scatter_n = 0;
611
612 /* Pin RX descriptor ring */
613 efx_init_special_buffer(efx, &rx_queue->rxd);
614
615 /* Push RX descriptor ring to card */
616 EFX_POPULATE_OWORD_10(rx_desc_ptr,
617 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
618 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
619 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
620 FRF_AZ_RX_DESCQ_EVQ_ID,
621 efx_rx_queue_channel(rx_queue)->channel,
622 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
623 FRF_AZ_RX_DESCQ_LABEL,
624 efx_rx_queue_index(rx_queue),
625 FRF_AZ_RX_DESCQ_SIZE,
626 __ffs(rx_queue->rxd.entries),
627 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
628 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
629 FRF_AZ_RX_DESCQ_EN, 1);
630 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
631 efx_rx_queue_index(rx_queue));
632}
633
634static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
635{
636 struct efx_nic *efx = rx_queue->efx;
637 efx_oword_t rx_flush_descq;
638
639 EFX_POPULATE_OWORD_2(rx_flush_descq,
640 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
641 FRF_AZ_RX_FLUSH_DESCQ,
642 efx_rx_queue_index(rx_queue));
643 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
644}
645
646void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
647{
648 efx_oword_t rx_desc_ptr;
649 struct efx_nic *efx = rx_queue->efx;
650
651 /* Remove RX descriptor ring from card */
652 EFX_ZERO_OWORD(rx_desc_ptr);
653 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
654 efx_rx_queue_index(rx_queue));
655
656 /* Unpin RX descriptor ring */
657 efx_fini_special_buffer(efx, &rx_queue->rxd);
658}
659
660/* Free buffers backing RX queue */
661void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
662{
663 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
664}
665
666/**************************************************************************
667 *
668 * Flush handling
669 *
670 **************************************************************************/
671
672/* efx_nic_flush_queues() must be woken up when all flushes are completed,
673 * or more RX flushes can be kicked off.
674 */
675static bool efx_flush_wake(struct efx_nic *efx)
676{
677 /* Ensure that all updates are visible to efx_nic_flush_queues() */
678 smp_mb();
679
680 return (atomic_read(&efx->drain_pending) == 0 ||
681 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
682 && atomic_read(&efx->rxq_flush_pending) > 0));
683}
684
685static bool efx_check_tx_flush_complete(struct efx_nic *efx)
686{
687 bool i = true;
688 efx_oword_t txd_ptr_tbl;
689 struct efx_channel *channel;
690 struct efx_tx_queue *tx_queue;
691
692 efx_for_each_channel(channel, efx) {
693 efx_for_each_channel_tx_queue(tx_queue, channel) {
694 efx_reado_table(efx, &txd_ptr_tbl,
695 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
696 if (EFX_OWORD_FIELD(txd_ptr_tbl,
697 FRF_AZ_TX_DESCQ_FLUSH) ||
698 EFX_OWORD_FIELD(txd_ptr_tbl,
699 FRF_AZ_TX_DESCQ_EN)) {
700 netif_dbg(efx, hw, efx->net_dev,
701 "flush did not complete on TXQ %d\n",
702 tx_queue->queue);
703 i = false;
704 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
705 1, 0)) {
706 /* The flush is complete, but we didn't
707 * receive a flush completion event
708 */
709 netif_dbg(efx, hw, efx->net_dev,
710 "flush complete on TXQ %d, so drain "
711 "the queue\n", tx_queue->queue);
712 /* Don't need to increment drain_pending as it
713 * has already been incremented for the queues
714 * which did not drain
715 */
716 efx_magic_event(channel,
717 EFX_CHANNEL_MAGIC_TX_DRAIN(
718 tx_queue));
719 }
720 }
721 }
722
723 return i;
724}
725
726/* Flush all the transmit queues, and continue flushing receive queues until
727 * they're all flushed. Wait for the DRAIN events to be recieved so that there
728 * are no more RX and TX events left on any channel. */
729int efx_nic_flush_queues(struct efx_nic *efx)
730{
731 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
732 struct efx_channel *channel;
733 struct efx_rx_queue *rx_queue;
734 struct efx_tx_queue *tx_queue;
735 int rc = 0;
736
737 efx->type->prepare_flush(efx);
738
739 efx_for_each_channel(channel, efx) {
740 efx_for_each_channel_tx_queue(tx_queue, channel) {
741 atomic_inc(&efx->drain_pending);
742 efx_flush_tx_queue(tx_queue);
743 }
744 efx_for_each_channel_rx_queue(rx_queue, channel) {
745 atomic_inc(&efx->drain_pending);
746 rx_queue->flush_pending = true;
747 atomic_inc(&efx->rxq_flush_pending);
748 }
749 }
750
751 while (timeout && atomic_read(&efx->drain_pending) > 0) {
752 /* If SRIOV is enabled, then offload receive queue flushing to
753 * the firmware (though we will still have to poll for
754 * completion). If that fails, fall back to the old scheme.
755 */
756 if (efx_sriov_enabled(efx)) {
757 rc = efx_mcdi_flush_rxqs(efx);
758 if (!rc)
759 goto wait;
760 }
761
762 /* The hardware supports four concurrent rx flushes, each of
763 * which may need to be retried if there is an outstanding
764 * descriptor fetch
765 */
766 efx_for_each_channel(channel, efx) {
767 efx_for_each_channel_rx_queue(rx_queue, channel) {
768 if (atomic_read(&efx->rxq_flush_outstanding) >=
769 EFX_RX_FLUSH_COUNT)
770 break;
771
772 if (rx_queue->flush_pending) {
773 rx_queue->flush_pending = false;
774 atomic_dec(&efx->rxq_flush_pending);
775 atomic_inc(&efx->rxq_flush_outstanding);
776 efx_flush_rx_queue(rx_queue);
777 }
778 }
779 }
780
781 wait:
782 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
783 timeout);
784 }
785
786 if (atomic_read(&efx->drain_pending) &&
787 !efx_check_tx_flush_complete(efx)) {
788 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
789 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
790 atomic_read(&efx->rxq_flush_outstanding),
791 atomic_read(&efx->rxq_flush_pending));
792 rc = -ETIMEDOUT;
793
794 atomic_set(&efx->drain_pending, 0);
795 atomic_set(&efx->rxq_flush_pending, 0);
796 atomic_set(&efx->rxq_flush_outstanding, 0);
797 }
798
799 efx->type->finish_flush(efx);
800
801 return rc;
802}
803
804/**************************************************************************
805 *
806 * Event queue processing
807 * Event queues are processed by per-channel tasklets.
808 *
809 **************************************************************************/
810
811/* Update a channel's event queue's read pointer (RPTR) register
812 *
813 * This writes the EVQ_RPTR_REG register for the specified channel's
814 * event queue.
815 */
816void efx_nic_eventq_read_ack(struct efx_channel *channel)
817{
818 efx_dword_t reg;
819 struct efx_nic *efx = channel->efx;
820
821 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
822 channel->eventq_read_ptr & channel->eventq_mask);
823
824 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
825 * of 4 bytes, but it is really 16 bytes just like later revisions.
826 */
827 efx_writed(efx, &reg,
828 efx->type->evq_rptr_tbl_base +
829 FR_BZ_EVQ_RPTR_STEP * channel->channel);
830}
831
832/* Use HW to insert a SW defined event */
833void efx_generate_event(struct efx_nic *efx, unsigned int evq,
834 efx_qword_t *event)
835{
836 efx_oword_t drv_ev_reg;
837
838 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
839 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
840 drv_ev_reg.u32[0] = event->u32[0];
841 drv_ev_reg.u32[1] = event->u32[1];
842 drv_ev_reg.u32[2] = 0;
843 drv_ev_reg.u32[3] = 0;
844 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
845 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
846}
847
848static void efx_magic_event(struct efx_channel *channel, u32 magic)
849{
850 efx_qword_t event;
851
852 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
853 FSE_AZ_EV_CODE_DRV_GEN_EV,
854 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
855 efx_generate_event(channel->efx, channel->channel, &event);
856}
857
858/* Handle a transmit completion event
859 *
860 * The NIC batches TX completion events; the message we receive is of
861 * the form "complete all TX events up to this index".
862 */
863static int
864efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
865{
866 unsigned int tx_ev_desc_ptr;
867 unsigned int tx_ev_q_label;
868 struct efx_tx_queue *tx_queue;
869 struct efx_nic *efx = channel->efx;
870 int tx_packets = 0;
871
872 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
873 return 0;
874
875 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
876 /* Transmit completion */
877 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
878 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
879 tx_queue = efx_channel_get_tx_queue(
880 channel, tx_ev_q_label % EFX_TXQ_TYPES);
881 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
882 tx_queue->ptr_mask);
883 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
884 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
885 /* Rewrite the FIFO write pointer */
886 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
887 tx_queue = efx_channel_get_tx_queue(
888 channel, tx_ev_q_label % EFX_TXQ_TYPES);
889
890 netif_tx_lock(efx->net_dev);
891 efx_notify_tx_desc(tx_queue);
892 netif_tx_unlock(efx->net_dev);
893 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
894 EFX_WORKAROUND_10727(efx)) {
895 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
896 } else {
897 netif_err(efx, tx_err, efx->net_dev,
898 "channel %d unexpected TX event "
899 EFX_QWORD_FMT"\n", channel->channel,
900 EFX_QWORD_VAL(*event));
901 }
902
903 return tx_packets;
904}
905
906/* Detect errors included in the rx_evt_pkt_ok bit. */
907static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
908 const efx_qword_t *event)
909{
910 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
911 struct efx_nic *efx = rx_queue->efx;
912 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
913 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
914 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
915 bool rx_ev_other_err, rx_ev_pause_frm;
916 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
917 unsigned rx_ev_pkt_type;
918
919 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
920 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
921 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
922 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
923 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
924 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
925 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
926 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
927 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
928 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
929 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
930 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
931 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
932 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
933 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
934
935 /* Every error apart from tobe_disc and pause_frm */
936 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
937 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
938 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
939
940 /* Count errors that are not in MAC stats. Ignore expected
941 * checksum errors during self-test. */
942 if (rx_ev_frm_trunc)
943 ++channel->n_rx_frm_trunc;
944 else if (rx_ev_tobe_disc)
945 ++channel->n_rx_tobe_disc;
946 else if (!efx->loopback_selftest) {
947 if (rx_ev_ip_hdr_chksum_err)
948 ++channel->n_rx_ip_hdr_chksum_err;
949 else if (rx_ev_tcp_udp_chksum_err)
950 ++channel->n_rx_tcp_udp_chksum_err;
951 }
952
953 /* TOBE_DISC is expected on unicast mismatches; don't print out an
954 * error message. FRM_TRUNC indicates RXDP dropped the packet due
955 * to a FIFO overflow.
956 */
957#ifdef DEBUG
958 if (rx_ev_other_err && net_ratelimit()) {
959 netif_dbg(efx, rx_err, efx->net_dev,
960 " RX queue %d unexpected RX event "
961 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
962 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
963 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
964 rx_ev_ip_hdr_chksum_err ?
965 " [IP_HDR_CHKSUM_ERR]" : "",
966 rx_ev_tcp_udp_chksum_err ?
967 " [TCP_UDP_CHKSUM_ERR]" : "",
968 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
969 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
970 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
971 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
972 rx_ev_pause_frm ? " [PAUSE]" : "");
973 }
974#endif
975
976 /* The frame must be discarded if any of these are true. */
977 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
978 rx_ev_tobe_disc | rx_ev_pause_frm) ?
979 EFX_RX_PKT_DISCARD : 0;
980}
981
982/* Handle receive events that are not in-order. Return true if this
983 * can be handled as a partial packet discard, false if it's more
984 * serious.
985 */
986static bool
987efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
988{
989 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
990 struct efx_nic *efx = rx_queue->efx;
991 unsigned expected, dropped;
992
993 if (rx_queue->scatter_n &&
994 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
995 rx_queue->ptr_mask)) {
996 ++channel->n_rx_nodesc_trunc;
997 return true;
998 }
999
1000 expected = rx_queue->removed_count & rx_queue->ptr_mask;
1001 dropped = (index - expected) & rx_queue->ptr_mask;
1002 netif_info(efx, rx_err, efx->net_dev,
1003 "dropped %d events (index=%d expected=%d)\n",
1004 dropped, index, expected);
1005
1006 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
1007 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1008 return false;
1009}
1010
1011/* Handle a packet received event
1012 *
1013 * The NIC gives a "discard" flag if it's a unicast packet with the
1014 * wrong destination address
1015 * Also "is multicast" and "matches multicast filter" flags can be used to
1016 * discard non-matching multicast packets.
1017 */
1018static void
1019efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1020{
1021 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
1022 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1023 unsigned expected_ptr;
1024 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
1025 u16 flags;
1026 struct efx_rx_queue *rx_queue;
1027 struct efx_nic *efx = channel->efx;
1028
1029 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1030 return;
1031
1032 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
1033 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
1034 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1035 channel->channel);
1036
1037 rx_queue = efx_channel_get_rx_queue(channel);
1038
1039 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1040 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1041 rx_queue->ptr_mask);
1042
1043 /* Check for partial drops and other errors */
1044 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1045 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1046 if (rx_ev_desc_ptr != expected_ptr &&
1047 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1048 return;
1049
1050 /* Discard all pending fragments */
1051 if (rx_queue->scatter_n) {
1052 efx_rx_packet(
1053 rx_queue,
1054 rx_queue->removed_count & rx_queue->ptr_mask,
1055 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1056 rx_queue->removed_count += rx_queue->scatter_n;
1057 rx_queue->scatter_n = 0;
1058 }
1059
1060 /* Return if there is no new fragment */
1061 if (rx_ev_desc_ptr != expected_ptr)
1062 return;
1063
1064 /* Discard new fragment if not SOP */
1065 if (!rx_ev_sop) {
1066 efx_rx_packet(
1067 rx_queue,
1068 rx_queue->removed_count & rx_queue->ptr_mask,
1069 1, 0, EFX_RX_PKT_DISCARD);
1070 ++rx_queue->removed_count;
1071 return;
1072 }
1073 }
1074
1075 ++rx_queue->scatter_n;
1076 if (rx_ev_cont)
1077 return;
1078
1079 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1080 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1081 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1082
1083 if (likely(rx_ev_pkt_ok)) {
1084 /* If packet is marked as OK then we can rely on the
1085 * hardware checksum and classification.
1086 */
1087 flags = 0;
1088 switch (rx_ev_hdr_type) {
1089 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1090 flags |= EFX_RX_PKT_TCP;
1091 /* fall through */
1092 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1093 flags |= EFX_RX_PKT_CSUMMED;
1094 /* fall through */
1095 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1096 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1097 break;
1098 }
1099 } else {
1100 flags = efx_handle_rx_not_ok(rx_queue, event);
1101 }
1102
1103 /* Detect multicast packets that didn't match the filter */
1104 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1105 if (rx_ev_mcast_pkt) {
1106 unsigned int rx_ev_mcast_hash_match =
1107 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1108
1109 if (unlikely(!rx_ev_mcast_hash_match)) {
1110 ++channel->n_rx_mcast_mismatch;
1111 flags |= EFX_RX_PKT_DISCARD;
1112 }
1113 }
1114
1115 channel->irq_mod_score += 2;
1116
1117 /* Handle received packet */
1118 efx_rx_packet(rx_queue,
1119 rx_queue->removed_count & rx_queue->ptr_mask,
1120 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1121 rx_queue->removed_count += rx_queue->scatter_n;
1122 rx_queue->scatter_n = 0;
1123}
1124
1125/* If this flush done event corresponds to a &struct efx_tx_queue, then
1126 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1127 * of all transmit completions.
1128 */
1129static void
1130efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1131{
1132 struct efx_tx_queue *tx_queue;
1133 int qid;
1134
1135 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1136 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1137 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1138 qid % EFX_TXQ_TYPES);
1139 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1140 efx_magic_event(tx_queue->channel,
1141 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1142 }
1143 }
1144}
1145
1146/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1147 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1148 * the RX queue back to the mask of RX queues in need of flushing.
1149 */
1150static void
1151efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1152{
1153 struct efx_channel *channel;
1154 struct efx_rx_queue *rx_queue;
1155 int qid;
1156 bool failed;
1157
1158 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1159 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1160 if (qid >= efx->n_channels)
1161 return;
1162 channel = efx_get_channel(efx, qid);
1163 if (!efx_channel_has_rx_queue(channel))
1164 return;
1165 rx_queue = efx_channel_get_rx_queue(channel);
1166
1167 if (failed) {
1168 netif_info(efx, hw, efx->net_dev,
1169 "RXQ %d flush retry\n", qid);
1170 rx_queue->flush_pending = true;
1171 atomic_inc(&efx->rxq_flush_pending);
1172 } else {
1173 efx_magic_event(efx_rx_queue_channel(rx_queue),
1174 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1175 }
1176 atomic_dec(&efx->rxq_flush_outstanding);
1177 if (efx_flush_wake(efx))
1178 wake_up(&efx->flush_wq);
1179}
1180
1181static void
1182efx_handle_drain_event(struct efx_channel *channel)
1183{
1184 struct efx_nic *efx = channel->efx;
1185
1186 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1187 atomic_dec(&efx->drain_pending);
1188 if (efx_flush_wake(efx))
1189 wake_up(&efx->flush_wq);
1190}
1191
1192static void
1193efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1194{
1195 struct efx_nic *efx = channel->efx;
1196 struct efx_rx_queue *rx_queue =
1197 efx_channel_has_rx_queue(channel) ?
1198 efx_channel_get_rx_queue(channel) : NULL;
1199 unsigned magic, code;
1200
1201 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1202 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1203
1204 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1205 channel->event_test_cpu = raw_smp_processor_id();
1206 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1207 /* The queue must be empty, so we won't receive any rx
1208 * events, so efx_process_channel() won't refill the
1209 * queue. Refill it here */
1210 efx_fast_push_rx_descriptors(rx_queue);
1211 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1212 rx_queue->enabled = false;
1213 efx_handle_drain_event(channel);
1214 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1215 efx_handle_drain_event(channel);
1216 } else {
1217 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1218 "generated event "EFX_QWORD_FMT"\n",
1219 channel->channel, EFX_QWORD_VAL(*event));
1220 }
1221}
1222
1223static void
1224efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1225{
1226 struct efx_nic *efx = channel->efx;
1227 unsigned int ev_sub_code;
1228 unsigned int ev_sub_data;
1229
1230 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1231 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1232
1233 switch (ev_sub_code) {
1234 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1235 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1236 channel->channel, ev_sub_data);
1237 efx_handle_tx_flush_done(efx, event);
1238 efx_sriov_tx_flush_done(efx, event);
1239 break;
1240 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1241 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1242 channel->channel, ev_sub_data);
1243 efx_handle_rx_flush_done(efx, event);
1244 efx_sriov_rx_flush_done(efx, event);
1245 break;
1246 case FSE_AZ_EVQ_INIT_DONE_EV:
1247 netif_dbg(efx, hw, efx->net_dev,
1248 "channel %d EVQ %d initialised\n",
1249 channel->channel, ev_sub_data);
1250 break;
1251 case FSE_AZ_SRM_UPD_DONE_EV:
1252 netif_vdbg(efx, hw, efx->net_dev,
1253 "channel %d SRAM update done\n", channel->channel);
1254 break;
1255 case FSE_AZ_WAKE_UP_EV:
1256 netif_vdbg(efx, hw, efx->net_dev,
1257 "channel %d RXQ %d wakeup event\n",
1258 channel->channel, ev_sub_data);
1259 break;
1260 case FSE_AZ_TIMER_EV:
1261 netif_vdbg(efx, hw, efx->net_dev,
1262 "channel %d RX queue %d timer expired\n",
1263 channel->channel, ev_sub_data);
1264 break;
1265 case FSE_AA_RX_RECOVER_EV:
1266 netif_err(efx, rx_err, efx->net_dev,
1267 "channel %d seen DRIVER RX_RESET event. "
1268 "Resetting.\n", channel->channel);
1269 atomic_inc(&efx->rx_reset);
1270 efx_schedule_reset(efx,
1271 EFX_WORKAROUND_6555(efx) ?
1272 RESET_TYPE_RX_RECOVERY :
1273 RESET_TYPE_DISABLE);
1274 break;
1275 case FSE_BZ_RX_DSC_ERROR_EV:
1276 if (ev_sub_data < EFX_VI_BASE) {
1277 netif_err(efx, rx_err, efx->net_dev,
1278 "RX DMA Q %d reports descriptor fetch error."
1279 " RX Q %d is disabled.\n", ev_sub_data,
1280 ev_sub_data);
1281 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1282 } else
1283 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1284 break;
1285 case FSE_BZ_TX_DSC_ERROR_EV:
1286 if (ev_sub_data < EFX_VI_BASE) {
1287 netif_err(efx, tx_err, efx->net_dev,
1288 "TX DMA Q %d reports descriptor fetch error."
1289 " TX Q %d is disabled.\n", ev_sub_data,
1290 ev_sub_data);
1291 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1292 } else
1293 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1294 break;
1295 default:
1296 netif_vdbg(efx, hw, efx->net_dev,
1297 "channel %d unknown driver event code %d "
1298 "data %04x\n", channel->channel, ev_sub_code,
1299 ev_sub_data);
1300 break;
1301 }
1302}
1303
1304int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1305{
1306 struct efx_nic *efx = channel->efx;
1307 unsigned int read_ptr;
1308 efx_qword_t event, *p_event;
1309 int ev_code;
1310 int tx_packets = 0;
1311 int spent = 0;
1312
1313 read_ptr = channel->eventq_read_ptr;
1314
1315 for (;;) {
1316 p_event = efx_event(channel, read_ptr);
1317 event = *p_event;
1318
1319 if (!efx_event_present(&event))
1320 /* End of events */
1321 break;
1322
1323 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1324 "channel %d event is "EFX_QWORD_FMT"\n",
1325 channel->channel, EFX_QWORD_VAL(event));
1326
1327 /* Clear this event by marking it all ones */
1328 EFX_SET_QWORD(*p_event);
1329
1330 ++read_ptr;
1331
1332 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1333
1334 switch (ev_code) {
1335 case FSE_AZ_EV_CODE_RX_EV:
1336 efx_handle_rx_event(channel, &event);
1337 if (++spent == budget)
1338 goto out;
1339 break;
1340 case FSE_AZ_EV_CODE_TX_EV:
1341 tx_packets += efx_handle_tx_event(channel, &event);
1342 if (tx_packets > efx->txq_entries) {
1343 spent = budget;
1344 goto out;
1345 }
1346 break;
1347 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1348 efx_handle_generated_event(channel, &event);
1349 break;
1350 case FSE_AZ_EV_CODE_DRIVER_EV:
1351 efx_handle_driver_event(channel, &event);
1352 break;
1353 case FSE_CZ_EV_CODE_USER_EV:
1354 efx_sriov_event(channel, &event);
1355 break;
1356 case FSE_CZ_EV_CODE_MCDI_EV:
1357 efx_mcdi_process_event(channel, &event);
1358 break;
1359 case FSE_AZ_EV_CODE_GLOBAL_EV:
1360 if (efx->type->handle_global_event &&
1361 efx->type->handle_global_event(channel, &event))
1362 break;
1363 /* else fall through */
1364 default:
1365 netif_err(channel->efx, hw, channel->efx->net_dev,
1366 "channel %d unknown event type %d (data "
1367 EFX_QWORD_FMT ")\n", channel->channel,
1368 ev_code, EFX_QWORD_VAL(event));
1369 }
1370 }
1371
1372out:
1373 channel->eventq_read_ptr = read_ptr;
1374 return spent;
1375}
1376
1377/* Check whether an event is present in the eventq at the current 53/* Check whether an event is present in the eventq at the current
1378 * read pointer. Only useful for self-test. 54 * read pointer. Only useful for self-test.
1379 */ 55 */
@@ -1382,323 +58,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
1382 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 58 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1383} 59}
1384 60
1385/* Allocate buffer table entries for event queue */
1386int efx_nic_probe_eventq(struct efx_channel *channel)
1387{
1388 struct efx_nic *efx = channel->efx;
1389 unsigned entries;
1390
1391 entries = channel->eventq_mask + 1;
1392 return efx_alloc_special_buffer(efx, &channel->eventq,
1393 entries * sizeof(efx_qword_t));
1394}
1395
1396void efx_nic_init_eventq(struct efx_channel *channel)
1397{
1398 efx_oword_t reg;
1399 struct efx_nic *efx = channel->efx;
1400
1401 netif_dbg(efx, hw, efx->net_dev,
1402 "channel %d event queue in special buffers %d-%d\n",
1403 channel->channel, channel->eventq.index,
1404 channel->eventq.index + channel->eventq.entries - 1);
1405
1406 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1407 EFX_POPULATE_OWORD_3(reg,
1408 FRF_CZ_TIMER_Q_EN, 1,
1409 FRF_CZ_HOST_NOTIFY_MODE, 0,
1410 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1411 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1412 }
1413
1414 /* Pin event queue buffer */
1415 efx_init_special_buffer(efx, &channel->eventq);
1416
1417 /* Fill event queue with all ones (i.e. empty events) */
1418 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1419
1420 /* Push event queue to card */
1421 EFX_POPULATE_OWORD_3(reg,
1422 FRF_AZ_EVQ_EN, 1,
1423 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1424 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1425 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1426 channel->channel);
1427
1428 efx->type->push_irq_moderation(channel);
1429}
1430
1431void efx_nic_fini_eventq(struct efx_channel *channel)
1432{
1433 efx_oword_t reg;
1434 struct efx_nic *efx = channel->efx;
1435
1436 /* Remove event queue from card */
1437 EFX_ZERO_OWORD(reg);
1438 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1439 channel->channel);
1440 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1441 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1442
1443 /* Unpin event queue */
1444 efx_fini_special_buffer(efx, &channel->eventq);
1445}
1446
1447/* Free buffers backing event queue */
1448void efx_nic_remove_eventq(struct efx_channel *channel)
1449{
1450 efx_free_special_buffer(channel->efx, &channel->eventq);
1451}
1452
1453
1454void efx_nic_event_test_start(struct efx_channel *channel) 61void efx_nic_event_test_start(struct efx_channel *channel)
1455{ 62{
1456 channel->event_test_cpu = -1; 63 channel->event_test_cpu = -1;
1457 smp_wmb(); 64 smp_wmb();
1458 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 65 channel->efx->type->ev_test_generate(channel);
1459} 66}
1460 67
1461void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1462{
1463 efx_magic_event(efx_rx_queue_channel(rx_queue),
1464 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1465}
1466
1467/**************************************************************************
1468 *
1469 * Hardware interrupts
1470 * The hardware interrupt handler does very little work; all the event
1471 * queue processing is carried out by per-channel tasklets.
1472 *
1473 **************************************************************************/
1474
1475/* Enable/disable/generate interrupts */
1476static inline void efx_nic_interrupts(struct efx_nic *efx,
1477 bool enabled, bool force)
1478{
1479 efx_oword_t int_en_reg_ker;
1480
1481 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1482 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1483 FRF_AZ_KER_INT_KER, force,
1484 FRF_AZ_DRV_INT_EN_KER, enabled);
1485 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1486}
1487
1488void efx_nic_enable_interrupts(struct efx_nic *efx)
1489{
1490 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1491 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1492
1493 efx_nic_interrupts(efx, true, false);
1494}
1495
1496void efx_nic_disable_interrupts(struct efx_nic *efx)
1497{
1498 /* Disable interrupts */
1499 efx_nic_interrupts(efx, false, false);
1500}
1501
1502/* Generate a test interrupt
1503 * Interrupt must already have been enabled, otherwise nasty things
1504 * may happen.
1505 */
1506void efx_nic_irq_test_start(struct efx_nic *efx) 68void efx_nic_irq_test_start(struct efx_nic *efx)
1507{ 69{
1508 efx->last_irq_cpu = -1; 70 efx->last_irq_cpu = -1;
1509 smp_wmb(); 71 smp_wmb();
1510 efx_nic_interrupts(efx, true, true); 72 efx->type->irq_test_generate(efx);
1511}
1512
1513/* Process a fatal interrupt
1514 * Disable bus mastering ASAP and schedule a reset
1515 */
1516irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1517{
1518 struct falcon_nic_data *nic_data = efx->nic_data;
1519 efx_oword_t *int_ker = efx->irq_status.addr;
1520 efx_oword_t fatal_intr;
1521 int error, mem_perr;
1522
1523 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1524 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1525
1526 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1527 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1528 EFX_OWORD_VAL(fatal_intr),
1529 error ? "disabling bus mastering" : "no recognised error");
1530
1531 /* If this is a memory parity error dump which blocks are offending */
1532 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1533 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1534 if (mem_perr) {
1535 efx_oword_t reg;
1536 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1537 netif_err(efx, hw, efx->net_dev,
1538 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1539 EFX_OWORD_VAL(reg));
1540 }
1541
1542 /* Disable both devices */
1543 pci_clear_master(efx->pci_dev);
1544 if (efx_nic_is_dual_func(efx))
1545 pci_clear_master(nic_data->pci_dev2);
1546 efx_nic_disable_interrupts(efx);
1547
1548 /* Count errors and reset or disable the NIC accordingly */
1549 if (efx->int_error_count == 0 ||
1550 time_after(jiffies, efx->int_error_expire)) {
1551 efx->int_error_count = 0;
1552 efx->int_error_expire =
1553 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1554 }
1555 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1556 netif_err(efx, hw, efx->net_dev,
1557 "SYSTEM ERROR - reset scheduled\n");
1558 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1559 } else {
1560 netif_err(efx, hw, efx->net_dev,
1561 "SYSTEM ERROR - max number of errors seen."
1562 "NIC will be disabled\n");
1563 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1564 }
1565
1566 return IRQ_HANDLED;
1567}
1568
1569/* Handle a legacy interrupt
1570 * Acknowledges the interrupt and schedule event queue processing.
1571 */
1572static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1573{
1574 struct efx_nic *efx = dev_id;
1575 efx_oword_t *int_ker = efx->irq_status.addr;
1576 irqreturn_t result = IRQ_NONE;
1577 struct efx_channel *channel;
1578 efx_dword_t reg;
1579 u32 queues;
1580 int syserr;
1581
1582 /* Could this be ours? If interrupts are disabled then the
1583 * channel state may not be valid.
1584 */
1585 if (!efx->legacy_irq_enabled)
1586 return result;
1587
1588 /* Read the ISR which also ACKs the interrupts */
1589 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1590 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1591
1592 /* Legacy interrupts are disabled too late by the EEH kernel
1593 * code. Disable them earlier.
1594 * If an EEH error occurred, the read will have returned all ones.
1595 */
1596 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1597 !efx->eeh_disabled_legacy_irq) {
1598 disable_irq_nosync(efx->legacy_irq);
1599 efx->eeh_disabled_legacy_irq = true;
1600 }
1601
1602 /* Handle non-event-queue sources */
1603 if (queues & (1U << efx->irq_level)) {
1604 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1605 if (unlikely(syserr))
1606 return efx_nic_fatal_interrupt(efx);
1607 efx->last_irq_cpu = raw_smp_processor_id();
1608 }
1609
1610 if (queues != 0) {
1611 if (EFX_WORKAROUND_15783(efx))
1612 efx->irq_zero_count = 0;
1613
1614 /* Schedule processing of any interrupting queues */
1615 efx_for_each_channel(channel, efx) {
1616 if (queues & 1)
1617 efx_schedule_channel_irq(channel);
1618 queues >>= 1;
1619 }
1620 result = IRQ_HANDLED;
1621
1622 } else if (EFX_WORKAROUND_15783(efx)) {
1623 efx_qword_t *event;
1624
1625 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1626 * because this might be a shared interrupt. */
1627 if (efx->irq_zero_count++ == 0)
1628 result = IRQ_HANDLED;
1629
1630 /* Ensure we schedule or rearm all event queues */
1631 efx_for_each_channel(channel, efx) {
1632 event = efx_event(channel, channel->eventq_read_ptr);
1633 if (efx_event_present(event))
1634 efx_schedule_channel_irq(channel);
1635 else
1636 efx_nic_eventq_read_ack(channel);
1637 }
1638 }
1639
1640 if (result == IRQ_HANDLED)
1641 netif_vdbg(efx, intr, efx->net_dev,
1642 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1643 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1644
1645 return result;
1646}
1647
1648/* Handle an MSI interrupt
1649 *
1650 * Handle an MSI hardware interrupt. This routine schedules event
1651 * queue processing. No interrupt acknowledgement cycle is necessary.
1652 * Also, we never need to check that the interrupt is for us, since
1653 * MSI interrupts cannot be shared.
1654 */
1655static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1656{
1657 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1658 struct efx_nic *efx = channel->efx;
1659 efx_oword_t *int_ker = efx->irq_status.addr;
1660 int syserr;
1661
1662 netif_vdbg(efx, intr, efx->net_dev,
1663 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1664 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1665
1666 /* Handle non-event-queue sources */
1667 if (channel->channel == efx->irq_level) {
1668 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1669 if (unlikely(syserr))
1670 return efx_nic_fatal_interrupt(efx);
1671 efx->last_irq_cpu = raw_smp_processor_id();
1672 }
1673
1674 /* Schedule processing of the channel */
1675 efx_schedule_channel_irq(channel);
1676
1677 return IRQ_HANDLED;
1678}
1679
1680
1681/* Setup RSS indirection table.
1682 * This maps from the hash value of the packet to RXQ
1683 */
1684void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1685{
1686 size_t i = 0;
1687 efx_dword_t dword;
1688
1689 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1690 return;
1691
1692 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1693 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1694
1695 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1696 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1697 efx->rx_indir_table[i]);
1698 efx_writed(efx, &dword,
1699 FR_BZ_RX_INDIRECTION_TBL +
1700 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1701 }
1702} 73}
1703 74
1704/* Hook interrupt handler(s) 75/* Hook interrupt handler(s)
@@ -1711,13 +82,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1711 int rc; 82 int rc;
1712 83
1713 if (!EFX_INT_MODE_USE_MSI(efx)) { 84 if (!EFX_INT_MODE_USE_MSI(efx)) {
1714 irq_handler_t handler; 85 rc = request_irq(efx->legacy_irq,
1715 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 86 efx->type->irq_handle_legacy, IRQF_SHARED,
1716 handler = efx_legacy_interrupt;
1717 else
1718 handler = falcon_legacy_interrupt_a1;
1719
1720 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1721 efx->name, efx); 87 efx->name, efx);
1722 if (rc) { 88 if (rc) {
1723 netif_err(efx, drv, efx->net_dev, 89 netif_err(efx, drv, efx->net_dev,
@@ -1742,10 +108,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1742 /* Hook MSI or MSI-X interrupt */ 108 /* Hook MSI or MSI-X interrupt */
1743 n_irqs = 0; 109 n_irqs = 0;
1744 efx_for_each_channel(channel, efx) { 110 efx_for_each_channel(channel, efx) {
1745 rc = request_irq(channel->irq, efx_msi_interrupt, 111 rc = request_irq(channel->irq, efx->type->irq_handle_msi,
1746 IRQF_PROBE_SHARED, /* Not shared */ 112 IRQF_PROBE_SHARED, /* Not shared */
1747 efx->channel_name[channel->channel], 113 efx->msi_context[channel->channel].name,
1748 &efx->channel[channel->channel]); 114 &efx->msi_context[channel->channel]);
1749 if (rc) { 115 if (rc) {
1750 netif_err(efx, drv, efx->net_dev, 116 netif_err(efx, drv, efx->net_dev,
1751 "failed to hook IRQ %d\n", channel->irq); 117 "failed to hook IRQ %d\n", channel->irq);
@@ -1774,7 +140,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1774 efx_for_each_channel(channel, efx) { 140 efx_for_each_channel(channel, efx) {
1775 if (n_irqs-- == 0) 141 if (n_irqs-- == 0)
1776 break; 142 break;
1777 free_irq(channel->irq, &efx->channel[channel->channel]); 143 free_irq(channel->irq, &efx->msi_context[channel->channel]);
1778 } 144 }
1779 fail1: 145 fail1:
1780 return rc; 146 return rc;
@@ -1783,7 +149,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1783void efx_nic_fini_interrupt(struct efx_nic *efx) 149void efx_nic_fini_interrupt(struct efx_nic *efx)
1784{ 150{
1785 struct efx_channel *channel; 151 struct efx_channel *channel;
1786 efx_oword_t reg;
1787 152
1788#ifdef CONFIG_RFS_ACCEL 153#ifdef CONFIG_RFS_ACCEL
1789 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 154 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
@@ -1792,167 +157,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1792 157
1793 /* Disable MSI/MSI-X interrupts */ 158 /* Disable MSI/MSI-X interrupts */
1794 efx_for_each_channel(channel, efx) 159 efx_for_each_channel(channel, efx)
1795 free_irq(channel->irq, &efx->channel[channel->channel]); 160 free_irq(channel->irq, &efx->msi_context[channel->channel]);
1796
1797 /* ACK legacy interrupt */
1798 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1799 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1800 else
1801 falcon_irq_ack_a1(efx);
1802 161
1803 /* Disable legacy interrupt */ 162 /* Disable legacy interrupt */
1804 if (efx->legacy_irq) 163 if (efx->legacy_irq)
1805 free_irq(efx->legacy_irq, efx); 164 free_irq(efx->legacy_irq, efx);
1806} 165}
1807 166
1808/* Looks at available SRAM resources and works out how many queues we
1809 * can support, and where things like descriptor caches should live.
1810 *
1811 * SRAM is split up as follows:
1812 * 0 buftbl entries for channels
1813 * efx->vf_buftbl_base buftbl entries for SR-IOV
1814 * efx->rx_dc_base RX descriptor caches
1815 * efx->tx_dc_base TX descriptor caches
1816 */
1817void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1818{
1819 unsigned vi_count, buftbl_min;
1820
1821 /* Account for the buffer table entries backing the datapath channels
1822 * and the descriptor caches for those channels.
1823 */
1824 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1825 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1826 efx->n_channels * EFX_MAX_EVQ_SIZE)
1827 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1828 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1829
1830#ifdef CONFIG_SFC_SRIOV
1831 if (efx_sriov_wanted(efx)) {
1832 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1833
1834 efx->vf_buftbl_base = buftbl_min;
1835
1836 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1837 vi_count = max(vi_count, EFX_VI_BASE);
1838 buftbl_free = (sram_lim_qw - buftbl_min -
1839 vi_count * vi_dc_entries);
1840
1841 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1842 efx_vf_size(efx));
1843 vf_limit = min(buftbl_free / entries_per_vf,
1844 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1845
1846 if (efx->vf_count > vf_limit) {
1847 netif_err(efx, probe, efx->net_dev,
1848 "Reducing VF count from from %d to %d\n",
1849 efx->vf_count, vf_limit);
1850 efx->vf_count = vf_limit;
1851 }
1852 vi_count += efx->vf_count * efx_vf_size(efx);
1853 }
1854#endif
1855
1856 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1857 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1858}
1859
1860u32 efx_nic_fpga_ver(struct efx_nic *efx)
1861{
1862 efx_oword_t altera_build;
1863 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1864 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1865}
1866
1867void efx_nic_init_common(struct efx_nic *efx)
1868{
1869 efx_oword_t temp;
1870
1871 /* Set positions of descriptor caches in SRAM. */
1872 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1873 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1874 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1875 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1876
1877 /* Set TX descriptor cache size. */
1878 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1879 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1880 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1881
1882 /* Set RX descriptor cache size. Set low watermark to size-8, as
1883 * this allows most efficient prefetching.
1884 */
1885 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1886 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1887 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1888 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1889 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1890
1891 /* Program INT_KER address */
1892 EFX_POPULATE_OWORD_2(temp,
1893 FRF_AZ_NORM_INT_VEC_DIS_KER,
1894 EFX_INT_MODE_USE_MSI(efx),
1895 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1896 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1897
1898 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1899 /* Use an interrupt level unused by event queues */
1900 efx->irq_level = 0x1f;
1901 else
1902 /* Use a valid MSI-X vector */
1903 efx->irq_level = 0;
1904
1905 /* Enable all the genuinely fatal interrupts. (They are still
1906 * masked by the overall interrupt mask, controlled by
1907 * falcon_interrupts()).
1908 *
1909 * Note: All other fatal interrupts are enabled
1910 */
1911 EFX_POPULATE_OWORD_3(temp,
1912 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1913 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1914 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1915 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1916 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1917 EFX_INVERT_OWORD(temp);
1918 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1919
1920 efx_nic_push_rx_indir_table(efx);
1921
1922 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1923 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1924 */
1925 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1926 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1927 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1928 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1929 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1930 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1931 /* Enable SW_EV to inherit in char driver - assume harmless here */
1932 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1933 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1934 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1935 /* Disable hardware watchdog which can misfire */
1936 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1937 /* Squash TX of packets of 16 bytes or less */
1938 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1939 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1940 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1941
1942 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1943 EFX_POPULATE_OWORD_4(temp,
1944 /* Default values */
1945 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1946 FRF_BZ_TX_PACE_SB_AF, 0xb,
1947 FRF_BZ_TX_PACE_FB_BASE, 0,
1948 /* Allow large pace values in the
1949 * fast bin. */
1950 FRF_BZ_TX_PACE_BIN_TH,
1951 FFE_BZ_TX_PACE_RESERVED);
1952 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1953 }
1954}
1955
1956/* Register dump */ 167/* Register dump */
1957 168
1958#define REGISTER_REVISION_A 1 169#define REGISTER_REVISION_A 1
@@ -2217,3 +428,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2217 } 428 }
2218 } 429 }
2219} 430}
431
432/**
433 * efx_nic_describe_stats - Describe supported statistics for ethtool
434 * @desc: Array of &struct efx_hw_stat_desc describing the statistics
435 * @count: Length of the @desc array
436 * @mask: Bitmask of which elements of @desc are enabled
437 * @names: Buffer to copy names to, or %NULL. The names are copied
438 * starting at intervals of %ETH_GSTRING_LEN bytes.
439 *
440 * Returns the number of visible statistics, i.e. the number of set
441 * bits in the first @count bits of @mask for which a name is defined.
442 */
443size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
444 const unsigned long *mask, u8 *names)
445{
446 size_t visible = 0;
447 size_t index;
448
449 for_each_set_bit(index, mask, count) {
450 if (desc[index].name) {
451 if (names) {
452 strlcpy(names, desc[index].name,
453 ETH_GSTRING_LEN);
454 names += ETH_GSTRING_LEN;
455 }
456 ++visible;
457 }
458 }
459
460 return visible;
461}
462
463/**
464 * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
465 * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
466 * layout. DMA widths of 0, 16, 32 and 64 are supported; where
467 * the width is specified as 0 the corresponding element of
468 * @stats is not updated.
469 * @count: Length of the @desc array
470 * @mask: Bitmask of which elements of @desc are enabled
471 * @stats: Buffer to update with the converted statistics. The length
472 * of this array must be at least the number of set bits in the
473 * first @count bits of @mask.
474 * @dma_buf: DMA buffer containing hardware statistics
475 * @accumulate: If set, the converted values will be added rather than
476 * directly stored to the corresponding elements of @stats
477 */
478void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
479 const unsigned long *mask,
480 u64 *stats, const void *dma_buf, bool accumulate)
481{
482 size_t index;
483
484 for_each_set_bit(index, mask, count) {
485 if (desc[index].dma_width) {
486 const void *addr = dma_buf + desc[index].offset;
487 u64 val;
488
489 switch (desc[index].dma_width) {
490 case 16:
491 val = le16_to_cpup((__le16 *)addr);
492 break;
493 case 32:
494 val = le32_to_cpup((__le32 *)addr);
495 break;
496 case 64:
497 val = le64_to_cpup((__le64 *)addr);
498 break;
499 default:
500 WARN_ON(1);
501 val = 0;
502 break;
503 }
504
505 if (accumulate)
506 *stats += val;
507 else
508 *stats = val;
509 }
510
511 ++stats;
512 }
513}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d63c2991a751..4b1e188f7a2f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -16,17 +16,13 @@
16#include "net_driver.h" 16#include "net_driver.h"
17#include "efx.h" 17#include "efx.h"
18#include "mcdi.h" 18#include "mcdi.h"
19#include "spi.h"
20
21/*
22 * Falcon hardware control
23 */
24 19
25enum { 20enum {
26 EFX_REV_FALCON_A0 = 0, 21 EFX_REV_FALCON_A0 = 0,
27 EFX_REV_FALCON_A1 = 1, 22 EFX_REV_FALCON_A1 = 1,
28 EFX_REV_FALCON_B0 = 2, 23 EFX_REV_FALCON_B0 = 2,
29 EFX_REV_SIENA_A0 = 3, 24 EFX_REV_SIENA_A0 = 3,
25 EFX_REV_HUNT_A0 = 4,
30}; 26};
31 27
32static inline int efx_nic_rev(struct efx_nic *efx) 28static inline int efx_nic_rev(struct efx_nic *efx)
@@ -34,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
34 return efx->type->revision; 30 return efx->type->revision;
35} 31}
36 32
37extern u32 efx_nic_fpga_ver(struct efx_nic *efx); 33extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
38 34
39/* NIC has two interlinked PCI functions for the same port. */ 35/* NIC has two interlinked PCI functions for the same port. */
40static inline bool efx_nic_is_dual_func(struct efx_nic *efx) 36static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -42,6 +38,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
42 return efx_nic_rev(efx) < EFX_REV_FALCON_B0; 38 return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
43} 39}
44 40
41/* Read the current event from the event queue */
42static inline efx_qword_t *efx_event(struct efx_channel *channel,
43 unsigned int index)
44{
45 return ((efx_qword_t *) (channel->eventq.buf.addr)) +
46 (index & channel->eventq_mask);
47}
48
49/* See if an event is present
50 *
51 * We check both the high and low dword of the event for all ones. We
52 * wrote all ones when we cleared the event, and no valid event can
53 * have all ones in either its high or low dwords. This approach is
54 * robust against reordering.
55 *
56 * Note that using a single 64-bit comparison is incorrect; even
57 * though the CPU read will be atomic, the DMA write may not be.
58 */
59static inline int efx_event_present(efx_qword_t *event)
60{
61 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
62 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
63}
64
65/* Returns a pointer to the specified transmit descriptor in the TX
66 * descriptor queue belonging to the specified channel.
67 */
68static inline efx_qword_t *
69efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
70{
71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
72}
73
74/* Decide whether to push a TX descriptor to the NIC vs merely writing
75 * the doorbell. This can reduce latency when we are adding a single
76 * descriptor to an empty queue, but is otherwise pointless. Further,
77 * Falcon and Siena have hardware bugs (SF bug 33851) that may be
78 * triggered if we don't check this.
79 */
80static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
81 unsigned int write_count)
82{
83 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
84
85 if (empty_read_count == 0)
86 return false;
87
88 tx_queue->empty_read_count = 0;
89 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
90 && tx_queue->write_count - write_count == 1;
91}
92
93/* Returns a pointer to the specified descriptor in the RX descriptor queue */
94static inline efx_qword_t *
95efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
96{
97 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
98}
99
45enum { 100enum {
46 PHY_TYPE_NONE = 0, 101 PHY_TYPE_NONE = 0,
47 PHY_TYPE_TXC43128 = 1, 102 PHY_TYPE_TXC43128 = 1,
@@ -59,9 +114,6 @@ enum {
59 (1 << LOOPBACK_XGXS) | \ 114 (1 << LOOPBACK_XGXS) | \
60 (1 << LOOPBACK_XAUI)) 115 (1 << LOOPBACK_XAUI))
61 116
62#define FALCON_GMAC_LOOPBACKS \
63 (1 << LOOPBACK_GMAC)
64
65/* Alignment of PCIe DMA boundaries (4KB) */ 117/* Alignment of PCIe DMA boundaries (4KB) */
66#define EFX_PAGE_SIZE 4096 118#define EFX_PAGE_SIZE 4096
67/* Size and alignment of buffer table entries (same) */ 119/* Size and alignment of buffer table entries (same) */
@@ -105,13 +157,96 @@ struct falcon_board {
105}; 157};
106 158
107/** 159/**
160 * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
161 * @device_id: Controller's id for the device
162 * @size: Size (in bytes)
163 * @addr_len: Number of address bytes in read/write commands
164 * @munge_address: Flag whether addresses should be munged.
165 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
166 * use bit 3 of the command byte as address bit A8, rather
167 * than having a two-byte address. If this flag is set, then
168 * commands should be munged in this way.
169 * @erase_command: Erase command (or 0 if sector erase not needed).
170 * @erase_size: Erase sector size (in bytes)
171 * Erase commands affect sectors with this size and alignment.
172 * This must be a power of two.
173 * @block_size: Write block size (in bytes).
174 * Write commands are limited to blocks with this size and alignment.
175 */
176struct falcon_spi_device {
177 int device_id;
178 unsigned int size;
179 unsigned int addr_len;
180 unsigned int munge_address:1;
181 u8 erase_command;
182 unsigned int erase_size;
183 unsigned int block_size;
184};
185
186static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
187{
188 return spi->size != 0;
189}
190
191enum {
192 FALCON_STAT_tx_bytes,
193 FALCON_STAT_tx_packets,
194 FALCON_STAT_tx_pause,
195 FALCON_STAT_tx_control,
196 FALCON_STAT_tx_unicast,
197 FALCON_STAT_tx_multicast,
198 FALCON_STAT_tx_broadcast,
199 FALCON_STAT_tx_lt64,
200 FALCON_STAT_tx_64,
201 FALCON_STAT_tx_65_to_127,
202 FALCON_STAT_tx_128_to_255,
203 FALCON_STAT_tx_256_to_511,
204 FALCON_STAT_tx_512_to_1023,
205 FALCON_STAT_tx_1024_to_15xx,
206 FALCON_STAT_tx_15xx_to_jumbo,
207 FALCON_STAT_tx_gtjumbo,
208 FALCON_STAT_tx_non_tcpudp,
209 FALCON_STAT_tx_mac_src_error,
210 FALCON_STAT_tx_ip_src_error,
211 FALCON_STAT_rx_bytes,
212 FALCON_STAT_rx_good_bytes,
213 FALCON_STAT_rx_bad_bytes,
214 FALCON_STAT_rx_packets,
215 FALCON_STAT_rx_good,
216 FALCON_STAT_rx_bad,
217 FALCON_STAT_rx_pause,
218 FALCON_STAT_rx_control,
219 FALCON_STAT_rx_unicast,
220 FALCON_STAT_rx_multicast,
221 FALCON_STAT_rx_broadcast,
222 FALCON_STAT_rx_lt64,
223 FALCON_STAT_rx_64,
224 FALCON_STAT_rx_65_to_127,
225 FALCON_STAT_rx_128_to_255,
226 FALCON_STAT_rx_256_to_511,
227 FALCON_STAT_rx_512_to_1023,
228 FALCON_STAT_rx_1024_to_15xx,
229 FALCON_STAT_rx_15xx_to_jumbo,
230 FALCON_STAT_rx_gtjumbo,
231 FALCON_STAT_rx_bad_lt64,
232 FALCON_STAT_rx_bad_gtjumbo,
233 FALCON_STAT_rx_overflow,
234 FALCON_STAT_rx_symbol_error,
235 FALCON_STAT_rx_align_error,
236 FALCON_STAT_rx_length_error,
237 FALCON_STAT_rx_internal_error,
238 FALCON_STAT_rx_nodesc_drop_cnt,
239 FALCON_STAT_COUNT
240};
241
242/**
108 * struct falcon_nic_data - Falcon NIC state 243 * struct falcon_nic_data - Falcon NIC state
109 * @pci_dev2: Secondary function of Falcon A 244 * @pci_dev2: Secondary function of Falcon A
110 * @board: Board state and functions 245 * @board: Board state and functions
246 * @stats: Hardware statistics
111 * @stats_disable_count: Nest count for disabling statistics fetches 247 * @stats_disable_count: Nest count for disabling statistics fetches
112 * @stats_pending: Is there a pending DMA of MAC statistics. 248 * @stats_pending: Is there a pending DMA of MAC statistics.
113 * @stats_timer: A timer for regularly fetching MAC statistics. 249 * @stats_timer: A timer for regularly fetching MAC statistics.
114 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
115 * @spi_flash: SPI flash device 250 * @spi_flash: SPI flash device
116 * @spi_eeprom: SPI EEPROM device 251 * @spi_eeprom: SPI EEPROM device
117 * @spi_lock: SPI bus lock 252 * @spi_lock: SPI bus lock
@@ -121,12 +256,12 @@ struct falcon_board {
121struct falcon_nic_data { 256struct falcon_nic_data {
122 struct pci_dev *pci_dev2; 257 struct pci_dev *pci_dev2;
123 struct falcon_board board; 258 struct falcon_board board;
259 u64 stats[FALCON_STAT_COUNT];
124 unsigned int stats_disable_count; 260 unsigned int stats_disable_count;
125 bool stats_pending; 261 bool stats_pending;
126 struct timer_list stats_timer; 262 struct timer_list stats_timer;
127 u32 *stats_dma_done; 263 struct falcon_spi_device spi_flash;
128 struct efx_spi_device spi_flash; 264 struct falcon_spi_device spi_eeprom;
129 struct efx_spi_device spi_eeprom;
130 struct mutex spi_lock; 265 struct mutex spi_lock;
131 struct mutex mdio_lock; 266 struct mutex mdio_lock;
132 bool xmac_poll_required; 267 bool xmac_poll_required;
@@ -138,29 +273,148 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
138 return &data->board; 273 return &data->board;
139} 274}
140 275
276enum {
277 SIENA_STAT_tx_bytes,
278 SIENA_STAT_tx_good_bytes,
279 SIENA_STAT_tx_bad_bytes,
280 SIENA_STAT_tx_packets,
281 SIENA_STAT_tx_bad,
282 SIENA_STAT_tx_pause,
283 SIENA_STAT_tx_control,
284 SIENA_STAT_tx_unicast,
285 SIENA_STAT_tx_multicast,
286 SIENA_STAT_tx_broadcast,
287 SIENA_STAT_tx_lt64,
288 SIENA_STAT_tx_64,
289 SIENA_STAT_tx_65_to_127,
290 SIENA_STAT_tx_128_to_255,
291 SIENA_STAT_tx_256_to_511,
292 SIENA_STAT_tx_512_to_1023,
293 SIENA_STAT_tx_1024_to_15xx,
294 SIENA_STAT_tx_15xx_to_jumbo,
295 SIENA_STAT_tx_gtjumbo,
296 SIENA_STAT_tx_collision,
297 SIENA_STAT_tx_single_collision,
298 SIENA_STAT_tx_multiple_collision,
299 SIENA_STAT_tx_excessive_collision,
300 SIENA_STAT_tx_deferred,
301 SIENA_STAT_tx_late_collision,
302 SIENA_STAT_tx_excessive_deferred,
303 SIENA_STAT_tx_non_tcpudp,
304 SIENA_STAT_tx_mac_src_error,
305 SIENA_STAT_tx_ip_src_error,
306 SIENA_STAT_rx_bytes,
307 SIENA_STAT_rx_good_bytes,
308 SIENA_STAT_rx_bad_bytes,
309 SIENA_STAT_rx_packets,
310 SIENA_STAT_rx_good,
311 SIENA_STAT_rx_bad,
312 SIENA_STAT_rx_pause,
313 SIENA_STAT_rx_control,
314 SIENA_STAT_rx_unicast,
315 SIENA_STAT_rx_multicast,
316 SIENA_STAT_rx_broadcast,
317 SIENA_STAT_rx_lt64,
318 SIENA_STAT_rx_64,
319 SIENA_STAT_rx_65_to_127,
320 SIENA_STAT_rx_128_to_255,
321 SIENA_STAT_rx_256_to_511,
322 SIENA_STAT_rx_512_to_1023,
323 SIENA_STAT_rx_1024_to_15xx,
324 SIENA_STAT_rx_15xx_to_jumbo,
325 SIENA_STAT_rx_gtjumbo,
326 SIENA_STAT_rx_bad_gtjumbo,
327 SIENA_STAT_rx_overflow,
328 SIENA_STAT_rx_false_carrier,
329 SIENA_STAT_rx_symbol_error,
330 SIENA_STAT_rx_align_error,
331 SIENA_STAT_rx_length_error,
332 SIENA_STAT_rx_internal_error,
333 SIENA_STAT_rx_nodesc_drop_cnt,
334 SIENA_STAT_COUNT
335};
336
141/** 337/**
142 * struct siena_nic_data - Siena NIC state 338 * struct siena_nic_data - Siena NIC state
143 * @mcdi: Management-Controller-to-Driver Interface
144 * @wol_filter_id: Wake-on-LAN packet filter id 339 * @wol_filter_id: Wake-on-LAN packet filter id
145 * @hwmon: Hardware monitor state 340 * @stats: Hardware statistics
146 */ 341 */
147struct siena_nic_data { 342struct siena_nic_data {
148 struct efx_mcdi_iface mcdi;
149 int wol_filter_id; 343 int wol_filter_id;
150#ifdef CONFIG_SFC_MCDI_MON 344 u64 stats[SIENA_STAT_COUNT];
151 struct efx_mcdi_mon hwmon;
152#endif
153}; 345};
154 346
155#ifdef CONFIG_SFC_MCDI_MON 347enum {
156static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) 348 EF10_STAT_tx_bytes,
157{ 349 EF10_STAT_tx_packets,
158 struct siena_nic_data *nic_data; 350 EF10_STAT_tx_pause,
159 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 351 EF10_STAT_tx_control,
160 nic_data = efx->nic_data; 352 EF10_STAT_tx_unicast,
161 return &nic_data->hwmon; 353 EF10_STAT_tx_multicast,
162} 354 EF10_STAT_tx_broadcast,
163#endif 355 EF10_STAT_tx_lt64,
356 EF10_STAT_tx_64,
357 EF10_STAT_tx_65_to_127,
358 EF10_STAT_tx_128_to_255,
359 EF10_STAT_tx_256_to_511,
360 EF10_STAT_tx_512_to_1023,
361 EF10_STAT_tx_1024_to_15xx,
362 EF10_STAT_tx_15xx_to_jumbo,
363 EF10_STAT_rx_bytes,
364 EF10_STAT_rx_bytes_minus_good_bytes,
365 EF10_STAT_rx_good_bytes,
366 EF10_STAT_rx_bad_bytes,
367 EF10_STAT_rx_packets,
368 EF10_STAT_rx_good,
369 EF10_STAT_rx_bad,
370 EF10_STAT_rx_pause,
371 EF10_STAT_rx_control,
372 EF10_STAT_rx_unicast,
373 EF10_STAT_rx_multicast,
374 EF10_STAT_rx_broadcast,
375 EF10_STAT_rx_lt64,
376 EF10_STAT_rx_64,
377 EF10_STAT_rx_65_to_127,
378 EF10_STAT_rx_128_to_255,
379 EF10_STAT_rx_256_to_511,
380 EF10_STAT_rx_512_to_1023,
381 EF10_STAT_rx_1024_to_15xx,
382 EF10_STAT_rx_15xx_to_jumbo,
383 EF10_STAT_rx_gtjumbo,
384 EF10_STAT_rx_bad_gtjumbo,
385 EF10_STAT_rx_overflow,
386 EF10_STAT_rx_align_error,
387 EF10_STAT_rx_length_error,
388 EF10_STAT_rx_nodesc_drops,
389 EF10_STAT_COUNT
390};
391
392/**
393 * struct efx_ef10_nic_data - EF10 architecture NIC state
394 * @mcdi_buf: DMA buffer for MCDI
395 * @warm_boot_count: Last seen MC warm boot count
396 * @vi_base: Absolute index of first VI in this function
397 * @n_allocated_vis: Number of VIs allocated to this function
398 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
399 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
400 * @rx_rss_context: Firmware handle for our RSS context
401 * @stats: Hardware statistics
402 * @workaround_35388: Flag: firmware supports workaround for bug 35388
403 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
404 * %MC_CMD_GET_CAPABILITIES response)
405 */
406struct efx_ef10_nic_data {
407 struct efx_buffer mcdi_buf;
408 u16 warm_boot_count;
409 unsigned int vi_base;
410 unsigned int n_allocated_vis;
411 bool must_realloc_vis;
412 bool must_restore_filters;
413 u32 rx_rss_context;
414 u64 stats[EF10_STAT_COUNT];
415 bool workaround_35388;
416 u32 datapath_caps;
417};
164 418
165/* 419/*
166 * On the SFC9000 family each port is associated with 1 PCI physical 420 * On the SFC9000 family each port is associated with 1 PCI physical
@@ -263,6 +517,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
263extern const struct efx_nic_type falcon_a1_nic_type; 517extern const struct efx_nic_type falcon_a1_nic_type;
264extern const struct efx_nic_type falcon_b0_nic_type; 518extern const struct efx_nic_type falcon_b0_nic_type;
265extern const struct efx_nic_type siena_a0_nic_type; 519extern const struct efx_nic_type siena_a0_nic_type;
520extern const struct efx_nic_type efx_hunt_a0_nic_type;
266 521
267/************************************************************************** 522/**************************************************************************
268 * 523 *
@@ -274,35 +529,123 @@ extern const struct efx_nic_type siena_a0_nic_type;
274extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); 529extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
275 530
276/* TX data path */ 531/* TX data path */
277extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 532static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
278extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); 533{
279extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); 534 return tx_queue->efx->type->tx_probe(tx_queue);
280extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); 535}
281extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); 536static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
537{
538 tx_queue->efx->type->tx_init(tx_queue);
539}
540static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
541{
542 tx_queue->efx->type->tx_remove(tx_queue);
543}
544static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
545{
546 tx_queue->efx->type->tx_write(tx_queue);
547}
282 548
283/* RX data path */ 549/* RX data path */
284extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); 550static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
285extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); 551{
286extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); 552 return rx_queue->efx->type->rx_probe(rx_queue);
287extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); 553}
288extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); 554static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
289extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue); 555{
556 rx_queue->efx->type->rx_init(rx_queue);
557}
558static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
559{
560 rx_queue->efx->type->rx_remove(rx_queue);
561}
562static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
563{
564 rx_queue->efx->type->rx_write(rx_queue);
565}
566static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
567{
568 rx_queue->efx->type->rx_defer_refill(rx_queue);
569}
290 570
291/* Event data path */ 571/* Event data path */
292extern int efx_nic_probe_eventq(struct efx_channel *channel); 572static inline int efx_nic_probe_eventq(struct efx_channel *channel)
293extern void efx_nic_init_eventq(struct efx_channel *channel); 573{
294extern void efx_nic_fini_eventq(struct efx_channel *channel); 574 return channel->efx->type->ev_probe(channel);
295extern void efx_nic_remove_eventq(struct efx_channel *channel); 575}
296extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 576static inline int efx_nic_init_eventq(struct efx_channel *channel)
297extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 577{
298extern bool efx_nic_event_present(struct efx_channel *channel); 578 return channel->efx->type->ev_init(channel);
579}
580static inline void efx_nic_fini_eventq(struct efx_channel *channel)
581{
582 channel->efx->type->ev_fini(channel);
583}
584static inline void efx_nic_remove_eventq(struct efx_channel *channel)
585{
586 channel->efx->type->ev_remove(channel);
587}
588static inline int
589efx_nic_process_eventq(struct efx_channel *channel, int quota)
590{
591 return channel->efx->type->ev_process(channel, quota);
592}
593static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
594{
595 channel->efx->type->ev_read_ack(channel);
596}
597extern void efx_nic_event_test_start(struct efx_channel *channel);
299 598
300/* MAC/PHY */ 599/* Falcon/Siena queue operations */
301extern void falcon_drain_tx_fifo(struct efx_nic *efx); 600extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
302extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 601extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
303extern bool falcon_xmac_check_fault(struct efx_nic *efx); 602extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
304extern int falcon_reconfigure_xmac(struct efx_nic *efx); 603extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
305extern void falcon_update_stats_xmac(struct efx_nic *efx); 604extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
605extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
606extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
607extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
608extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
609extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
610extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
611extern int efx_farch_ev_probe(struct efx_channel *channel);
612extern int efx_farch_ev_init(struct efx_channel *channel);
613extern void efx_farch_ev_fini(struct efx_channel *channel);
614extern void efx_farch_ev_remove(struct efx_channel *channel);
615extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
616extern void efx_farch_ev_read_ack(struct efx_channel *channel);
617extern void efx_farch_ev_test_generate(struct efx_channel *channel);
618
619/* Falcon/Siena filter operations */
620extern int efx_farch_filter_table_probe(struct efx_nic *efx);
621extern void efx_farch_filter_table_restore(struct efx_nic *efx);
622extern void efx_farch_filter_table_remove(struct efx_nic *efx);
623extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
624extern s32 efx_farch_filter_insert(struct efx_nic *efx,
625 struct efx_filter_spec *spec, bool replace);
626extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
627 enum efx_filter_priority priority,
628 u32 filter_id);
629extern int efx_farch_filter_get_safe(struct efx_nic *efx,
630 enum efx_filter_priority priority,
631 u32 filter_id, struct efx_filter_spec *);
632extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
633 enum efx_filter_priority priority);
634extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
635 enum efx_filter_priority priority);
636extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
637extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
638 enum efx_filter_priority priority,
639 u32 *buf, u32 size);
640#ifdef CONFIG_RFS_ACCEL
641extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
642 struct efx_filter_spec *spec);
643extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
644 unsigned int index);
645#endif
646extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
647
648extern bool efx_nic_event_present(struct efx_channel *channel);
306 649
307/* Some statistics are computed as A - B where A and B each increase 650/* Some statistics are computed as A - B where A and B each increase
308 * linearly with some hardware counter(s) and the counters are read 651 * linearly with some hardware counter(s) and the counters are read
@@ -322,16 +665,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
322 *stat = diff; 665 *stat = diff;
323} 666}
324 667
325/* Interrupts and test events */ 668/* Interrupts */
326extern int efx_nic_init_interrupt(struct efx_nic *efx); 669extern int efx_nic_init_interrupt(struct efx_nic *efx);
327extern void efx_nic_enable_interrupts(struct efx_nic *efx);
328extern void efx_nic_event_test_start(struct efx_channel *channel);
329extern void efx_nic_irq_test_start(struct efx_nic *efx); 670extern void efx_nic_irq_test_start(struct efx_nic *efx);
330extern void efx_nic_disable_interrupts(struct efx_nic *efx);
331extern void efx_nic_fini_interrupt(struct efx_nic *efx); 671extern void efx_nic_fini_interrupt(struct efx_nic *efx);
332extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); 672
333extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); 673/* Falcon/Siena interrupts */
334extern void falcon_irq_ack_a1(struct efx_nic *efx); 674extern void efx_farch_irq_enable_master(struct efx_nic *efx);
675extern void efx_farch_irq_test_generate(struct efx_nic *efx);
676extern void efx_farch_irq_disable_master(struct efx_nic *efx);
677extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
678extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
679extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
335 680
336static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) 681static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
337{ 682{
@@ -345,69 +690,47 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
345/* Global Resources */ 690/* Global Resources */
346extern int efx_nic_flush_queues(struct efx_nic *efx); 691extern int efx_nic_flush_queues(struct efx_nic *efx);
347extern void siena_prepare_flush(struct efx_nic *efx); 692extern void siena_prepare_flush(struct efx_nic *efx);
693extern int efx_farch_fini_dmaq(struct efx_nic *efx);
348extern void siena_finish_flush(struct efx_nic *efx); 694extern void siena_finish_flush(struct efx_nic *efx);
349extern void falcon_start_nic_stats(struct efx_nic *efx); 695extern void falcon_start_nic_stats(struct efx_nic *efx);
350extern void falcon_stop_nic_stats(struct efx_nic *efx); 696extern void falcon_stop_nic_stats(struct efx_nic *efx);
351extern void falcon_setup_xaui(struct efx_nic *efx);
352extern int falcon_reset_xaui(struct efx_nic *efx); 697extern int falcon_reset_xaui(struct efx_nic *efx);
353extern void 698extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
354efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); 699extern void efx_farch_init_common(struct efx_nic *efx);
355extern void efx_nic_init_common(struct efx_nic *efx); 700extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
356extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); 701static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
702{
703 efx->type->rx_push_indir_table(efx);
704}
705extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
357 706
358int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 707int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
359 unsigned int len); 708 unsigned int len, gfp_t gfp_flags);
360void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); 709void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
361 710
362/* Tests */ 711/* Tests */
363struct efx_nic_register_test { 712struct efx_farch_register_test {
364 unsigned address; 713 unsigned address;
365 efx_oword_t mask; 714 efx_oword_t mask;
366}; 715};
367extern int efx_nic_test_registers(struct efx_nic *efx, 716extern int efx_farch_test_registers(struct efx_nic *efx,
368 const struct efx_nic_register_test *regs, 717 const struct efx_farch_register_test *regs,
369 size_t n_regs); 718 size_t n_regs);
370 719
371extern size_t efx_nic_get_regs_len(struct efx_nic *efx); 720extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
372extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); 721extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
373 722
374/************************************************************************** 723extern size_t
375 * 724efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
376 * Falcon MAC stats 725 const unsigned long *mask, u8 *names);
377 * 726extern void
378 ************************************************************************** 727efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
379 */ 728 const unsigned long *mask,
729 u64 *stats, const void *dma_buf, bool accumulate);
730
731#define EFX_MAX_FLUSH_TIME 5000
380 732
381#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) 733extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
382#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) 734 efx_qword_t *event);
383
384/* Retrieve statistic from statistics block */
385#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
386 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
387 (efx)->mac_stats.efx_stat += le16_to_cpu( \
388 *((__force __le16 *) \
389 (efx->stats_buffer.addr + \
390 FALCON_STAT_OFFSET(falcon_stat)))); \
391 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
392 (efx)->mac_stats.efx_stat += le32_to_cpu( \
393 *((__force __le32 *) \
394 (efx->stats_buffer.addr + \
395 FALCON_STAT_OFFSET(falcon_stat)))); \
396 else \
397 (efx)->mac_stats.efx_stat += le64_to_cpu( \
398 *((__force __le64 *) \
399 (efx->stats_buffer.addr + \
400 FALCON_STAT_OFFSET(falcon_stat)))); \
401 } while (0)
402
403#define FALCON_MAC_STATS_SIZE 0x100
404
405#define MAC_DATA_LBN 0
406#define MAC_DATA_WIDTH 32
407
408extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
409 efx_qword_t *event);
410
411extern void falcon_poll_xmac(struct efx_nic *efx);
412 735
413#endif /* EFX_NIC_H */ 736#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd8441..45eeb7075156 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2010 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
@@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); 47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); 48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49 49
50/****************************************************************************
51 * Siena managed PHYs
52 */
53extern const struct efx_phy_operations efx_mcdi_phy_ops;
54
55extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
56 unsigned int prtad, unsigned int devad,
57 u16 addr, u16 *value_out, u32 *status_out);
58extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
59 unsigned int prtad, unsigned int devad,
60 u16 addr, u16 value, u32 *status_out);
61extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
62 struct efx_link_state *link_state,
63 u32 speed, u32 flags, u32 fcntl);
64extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
65extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
66
67#endif 50#endif
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b495394a6dfa..03acf57df045 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc. 3 * Copyright 2011-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -46,7 +46,7 @@
46#include "mcdi.h" 46#include "mcdi.h"
47#include "mcdi_pcol.h" 47#include "mcdi_pcol.h"
48#include "io.h" 48#include "io.h"
49#include "regs.h" 49#include "farch_regs.h"
50#include "nic.h" 50#include "nic.h"
51 51
52/* Maximum number of events expected to make up a PTP event */ 52/* Maximum number of events expected to make up a PTP event */
@@ -294,8 +294,7 @@ struct efx_ptp_data {
294 struct work_struct pps_work; 294 struct work_struct pps_work;
295 struct workqueue_struct *pps_workwq; 295 struct workqueue_struct *pps_workwq;
296 bool nic_ts_enabled; 296 bool nic_ts_enabled;
297 u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN( 297 MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
298 MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
299 struct efx_ptp_timeset 298 struct efx_ptp_timeset
300 timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; 299 timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
301}; 300};
@@ -311,9 +310,10 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
311/* Enable MCDI PTP support. */ 310/* Enable MCDI PTP support. */
312static int efx_ptp_enable(struct efx_nic *efx) 311static int efx_ptp_enable(struct efx_nic *efx)
313{ 312{
314 u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN]; 313 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
315 314
316 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); 315 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
316 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
317 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, 317 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
318 efx->ptp_data->channel->channel); 318 efx->ptp_data->channel->channel);
319 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); 319 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@@ -329,9 +329,10 @@ static int efx_ptp_enable(struct efx_nic *efx)
329 */ 329 */
330static int efx_ptp_disable(struct efx_nic *efx) 330static int efx_ptp_disable(struct efx_nic *efx)
331{ 331{
332 u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN]; 332 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
333 333
334 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); 334 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
335 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
335 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 336 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
336 NULL, 0, NULL); 337 NULL, 0, NULL);
337} 338}
@@ -389,14 +390,14 @@ static void efx_ptp_send_times(struct efx_nic *efx,
389 host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | 390 host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
390 now.ts_real.tv_nsec); 391 now.ts_real.tv_nsec);
391 /* Update host time in NIC memory */ 392 /* Update host time in NIC memory */
392 _efx_writed(efx, cpu_to_le32(host_time), 393 efx->type->ptp_write_host_time(efx, host_time);
393 FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
394 } 394 }
395 *last_time = now; 395 *last_time = now;
396} 396}
397 397
398/* Read a timeset from the MC's results and partial process. */ 398/* Read a timeset from the MC's results and partial process. */
399static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset) 399static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
400 struct efx_ptp_timeset *timeset)
400{ 401{
401 unsigned start_ns, end_ns; 402 unsigned start_ns, end_ns;
402 403
@@ -425,12 +426,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
425 * busy. A number of readings are taken so that, hopefully, at least one good 426 * busy. A number of readings are taken so that, hopefully, at least one good
426 * synchronisation will be seen in the results. 427 * synchronisation will be seen in the results.
427 */ 428 */
428static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, 429static int
429 size_t response_length, 430efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
430 const struct pps_event_time *last_time) 431 size_t response_length,
432 const struct pps_event_time *last_time)
431{ 433{
432 unsigned number_readings = (response_length / 434 unsigned number_readings =
433 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); 435 MCDI_VAR_ARRAY_LEN(response_length,
436 PTP_OUT_SYNCHRONIZE_TIMESET);
434 unsigned i; 437 unsigned i;
435 unsigned total; 438 unsigned total;
436 unsigned ngood = 0; 439 unsigned ngood = 0;
@@ -447,8 +450,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
447 * appera to be erroneous. 450 * appera to be erroneous.
448 */ 451 */
449 for (i = 0; i < number_readings; i++) { 452 for (i = 0; i < number_readings; i++) {
450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); 453 efx_ptp_read_timeset(
451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; 454 MCDI_ARRAY_STRUCT_PTR(synch_buf,
455 PTP_OUT_SYNCHRONIZE_TIMESET, i),
456 &ptp->timeset[i]);
452 } 457 }
453 458
454 /* Find the last good host-MC synchronization result. The MC times 459 /* Find the last good host-MC synchronization result. The MC times
@@ -518,7 +523,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
518static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) 523static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
519{ 524{
520 struct efx_ptp_data *ptp = efx->ptp_data; 525 struct efx_ptp_data *ptp = efx->ptp_data;
521 u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX]; 526 MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
522 size_t response_length; 527 size_t response_length;
523 int rc; 528 int rc;
524 unsigned long timeout; 529 unsigned long timeout;
@@ -527,17 +532,17 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
527 int *start = ptp->start.addr; 532 int *start = ptp->start.addr;
528 533
529 MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); 534 MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
535 MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
530 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, 536 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
531 num_readings); 537 num_readings);
532 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO, 538 MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
533 (u32)ptp->start.dma_addr); 539 ptp->start.dma_addr);
534 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
535 (u32)((u64)ptp->start.dma_addr >> 32));
536 540
537 /* Clear flag that signals MC ready */ 541 /* Clear flag that signals MC ready */
538 ACCESS_ONCE(*start) = 0; 542 ACCESS_ONCE(*start) = 0;
539 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, 543 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
540 MC_CMD_PTP_IN_SYNCHRONIZE_LEN); 544 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
545 EFX_BUG_ON_PARANOID(rc);
541 546
542 /* Wait for start from MCDI (or timeout) */ 547 /* Wait for start from MCDI (or timeout) */
543 timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); 548 timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
@@ -564,15 +569,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
564/* Transmit a PTP packet, via the MCDI interface, to the wire. */ 569/* Transmit a PTP packet, via the MCDI interface, to the wire. */
565static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) 570static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
566{ 571{
567 u8 *txbuf = efx->ptp_data->txbuf; 572 struct efx_ptp_data *ptp_data = efx->ptp_data;
568 struct skb_shared_hwtstamps timestamps; 573 struct skb_shared_hwtstamps timestamps;
569 int rc = -EIO; 574 int rc = -EIO;
570 /* MCDI driver requires word aligned lengths */ 575 MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
571 size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4); 576 size_t len;
572 u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
573 577
574 MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); 578 MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
575 MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); 579 MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
580 MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
576 if (skb_shinfo(skb)->nr_frags != 0) { 581 if (skb_shinfo(skb)->nr_frags != 0) {
577 rc = skb_linearize(skb); 582 rc = skb_linearize(skb);
578 if (rc != 0) 583 if (rc != 0)
@@ -585,10 +590,12 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
585 goto fail; 590 goto fail;
586 } 591 }
587 skb_copy_from_linear_data(skb, 592 skb_copy_from_linear_data(skb,
588 &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST], 593 MCDI_PTR(ptp_data->txbuf,
589 len); 594 PTP_IN_TRANSMIT_PACKET),
590 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime, 595 skb->len);
591 sizeof(txtime), &len); 596 rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
597 ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
598 txtime, sizeof(txtime), &len);
592 if (rc != 0) 599 if (rc != 0)
593 goto fail; 600 goto fail;
594 601
@@ -872,7 +879,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
872 if (!efx->ptp_data) 879 if (!efx->ptp_data)
873 return -ENOMEM; 880 return -ENOMEM;
874 881
875 rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int)); 882 rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
876 if (rc != 0) 883 if (rc != 0)
877 goto fail1; 884 goto fail1;
878 885
@@ -1359,7 +1366,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1359 struct efx_ptp_data, 1366 struct efx_ptp_data,
1360 phc_clock_info); 1367 phc_clock_info);
1361 struct efx_nic *efx = ptp_data->channel->efx; 1368 struct efx_nic *efx = ptp_data->channel->efx;
1362 u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN]; 1369 MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
1363 s64 adjustment_ns; 1370 s64 adjustment_ns;
1364 int rc; 1371 int rc;
1365 1372
@@ -1373,9 +1380,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1373 (PPB_EXTRA_BITS + MAX_PPB_BITS)); 1380 (PPB_EXTRA_BITS + MAX_PPB_BITS));
1374 1381
1375 MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1382 MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1376 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns); 1383 MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
1377 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI, 1384 MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
1378 (u32)(adjustment_ns >> 32));
1379 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); 1385 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
1380 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); 1386 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
1381 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), 1387 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
@@ -1394,11 +1400,11 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1394 phc_clock_info); 1400 phc_clock_info);
1395 struct efx_nic *efx = ptp_data->channel->efx; 1401 struct efx_nic *efx = ptp_data->channel->efx;
1396 struct timespec delta_ts = ns_to_timespec(delta); 1402 struct timespec delta_ts = ns_to_timespec(delta);
1397 u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN]; 1403 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
1398 1404
1399 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1405 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1400 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0); 1406 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1401 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0); 1407 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
1402 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); 1408 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1403 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); 1409 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1404 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1410 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1411,11 +1417,12 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
1411 struct efx_ptp_data, 1417 struct efx_ptp_data,
1412 phc_clock_info); 1418 phc_clock_info);
1413 struct efx_nic *efx = ptp_data->channel->efx; 1419 struct efx_nic *efx = ptp_data->channel->efx;
1414 u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN]; 1420 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
1415 u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN]; 1421 MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
1416 int rc; 1422 int rc;
1417 1423
1418 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); 1424 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
1425 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1419 1426
1420 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1427 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1421 outbuf, sizeof(outbuf), NULL); 1428 outbuf, sizeof(outbuf), NULL);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3c..efa3612affca 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 6af9cfda50fb..4a596725023f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
21#include <net/checksum.h> 21#include <net/checksum.h>
22#include "net_driver.h" 22#include "net_driver.h"
23#include "efx.h" 23#include "efx.h"
24#include "filter.h"
24#include "nic.h" 25#include "nic.h"
25#include "selftest.h" 26#include "selftest.h"
26#include "workarounds.h" 27#include "workarounds.h"
@@ -60,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
60 return page_address(buf->page) + buf->page_offset; 61 return page_address(buf->page) + buf->page_offset;
61} 62}
62 63
63static inline u32 efx_rx_buf_hash(const u8 *eh) 64static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
64{ 65{
65 /* The ethernet header is always directly after any hash. */ 66#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
66#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 67 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
67 return __le32_to_cpup((const __le32 *)(eh - 4));
68#else 68#else
69 const u8 *data = eh - 4; 69 const u8 *data = eh + efx->rx_packet_hash_offset;
70 return (u32)data[0] | 70 return (u32)data[0] |
71 (u32)data[1] << 8 | 71 (u32)data[1] << 8 |
72 (u32)data[2] << 16 | 72 (u32)data[2] << 16 |
@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
326 unsigned int fill_level, batch_size; 326 unsigned int fill_level, batch_size;
327 int space, rc = 0; 327 int space, rc = 0;
328 328
329 if (!rx_queue->refill_enabled)
330 return;
331
329 /* Calculate current fill level, and exit if we don't need to fill */ 332 /* Calculate current fill level, and exit if we don't need to fill */
330 fill_level = (rx_queue->added_count - rx_queue->removed_count); 333 fill_level = (rx_queue->added_count - rx_queue->removed_count);
331 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); 334 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
@@ -435,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
435 } 438 }
436 439
437 if (efx->net_dev->features & NETIF_F_RXHASH) 440 if (efx->net_dev->features & NETIF_F_RXHASH)
438 skb->rxhash = efx_rx_buf_hash(eh); 441 skb->rxhash = efx_rx_buf_hash(efx, eh);
439 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 442 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
440 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 443 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
441 444
@@ -523,10 +526,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
523 526
524 /* Validate the number of fragments and completed length */ 527 /* Validate the number of fragments and completed length */
525 if (n_frags == 1) { 528 if (n_frags == 1) {
526 efx_rx_packet__check_len(rx_queue, rx_buf, len); 529 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
530 efx_rx_packet__check_len(rx_queue, rx_buf, len);
527 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || 531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
528 unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || 532 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
529 unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || 533 unlikely(len > n_frags * efx->rx_dma_len) ||
530 unlikely(!efx->rx_scatter)) { 534 unlikely(!efx->rx_scatter)) {
531 /* If this isn't an explicit discard request, either 535 /* If this isn't an explicit discard request, either
532 * the hardware or the driver is broken. 536 * the hardware or the driver is broken.
@@ -551,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
551 return; 555 return;
552 } 556 }
553 557
554 if (n_frags == 1) 558 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
555 rx_buf->len = len; 559 rx_buf->len = len;
556 560
557 /* Release and/or sync the DMA mapping - assumes all RX buffers 561 /* Release and/or sync the DMA mapping - assumes all RX buffers
@@ -564,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
564 */ 568 */
565 prefetch(efx_rx_buf_va(rx_buf)); 569 prefetch(efx_rx_buf_va(rx_buf));
566 570
567 rx_buf->page_offset += efx->type->rx_buffer_hash_size; 571 rx_buf->page_offset += efx->rx_prefix_size;
568 rx_buf->len -= efx->type->rx_buffer_hash_size; 572 rx_buf->len -= efx->rx_prefix_size;
569 573
570 if (n_frags > 1) { 574 if (n_frags > 1) {
571 /* Release/sync DMA mapping for additional fragments. 575 /* Release/sync DMA mapping for additional fragments.
@@ -577,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
577 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
578 if (--tail_frags == 0) 582 if (--tail_frags == 0)
579 break; 583 break;
580 efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); 584 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
581 } 585 }
582 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; 586 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
583 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
584 } 588 }
585 589
@@ -630,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
630 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); 634 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
631 u8 *eh = efx_rx_buf_va(rx_buf); 635 u8 *eh = efx_rx_buf_va(rx_buf);
632 636
637 /* Read length from the prefix if necessary. This already
638 * excludes the length of the prefix itself.
639 */
640 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
641 rx_buf->len = le16_to_cpup((__le16 *)
642 (eh + efx->rx_packet_len_offset));
643
633 /* If we're in loopback test, then pass the packet directly to the 644 /* If we're in loopback test, then pass the packet directly to the
634 * loopback layer, and free the rx_buf here 645 * loopback layer, and free the rx_buf here
635 */ 646 */
@@ -738,9 +749,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
738 749
739 rx_queue->max_fill = max_fill; 750 rx_queue->max_fill = max_fill;
740 rx_queue->fast_fill_trigger = trigger; 751 rx_queue->fast_fill_trigger = trigger;
752 rx_queue->refill_enabled = true;
741 753
742 /* Set up RX descriptor ring */ 754 /* Set up RX descriptor ring */
743 rx_queue->enabled = true;
744 efx_nic_init_rx(rx_queue); 755 efx_nic_init_rx(rx_queue);
745} 756}
746 757
@@ -753,11 +764,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
753 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 764 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
754 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 765 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
755 766
756 /* A flush failure might have left rx_queue->enabled */
757 rx_queue->enabled = false;
758
759 del_timer_sync(&rx_queue->slow_fill); 767 del_timer_sync(&rx_queue->slow_fill);
760 efx_nic_fini_rx(rx_queue);
761 768
762 /* Release RX buffers from the current read ptr to the write ptr */ 769 /* Release RX buffers from the current read ptr to the write ptr */
763 if (rx_queue->buffer) { 770 if (rx_queue->buffer) {
@@ -803,3 +810,130 @@ module_param(rx_refill_threshold, uint, 0444);
803MODULE_PARM_DESC(rx_refill_threshold, 810MODULE_PARM_DESC(rx_refill_threshold,
804 "RX descriptor ring refill threshold (%)"); 811 "RX descriptor ring refill threshold (%)");
805 812
813#ifdef CONFIG_RFS_ACCEL
814
815int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
816 u16 rxq_index, u32 flow_id)
817{
818 struct efx_nic *efx = netdev_priv(net_dev);
819 struct efx_channel *channel;
820 struct efx_filter_spec spec;
821 const struct iphdr *ip;
822 const __be16 *ports;
823 int nhoff;
824 int rc;
825
826 nhoff = skb_network_offset(skb);
827
828 if (skb->protocol == htons(ETH_P_8021Q)) {
829 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
830 nhoff + sizeof(struct vlan_hdr));
831 if (((const struct vlan_hdr *)skb->data + nhoff)->
832 h_vlan_encapsulated_proto != htons(ETH_P_IP))
833 return -EPROTONOSUPPORT;
834
835 /* This is IP over 802.1q VLAN. We can't filter on the
836 * IP 5-tuple and the vlan together, so just strip the
837 * vlan header and filter on the IP part.
838 */
839 nhoff += sizeof(struct vlan_hdr);
840 } else if (skb->protocol != htons(ETH_P_IP)) {
841 return -EPROTONOSUPPORT;
842 }
843
844 /* RFS must validate the IP header length before calling us */
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
846 ip = (const struct iphdr *)(skb->data + nhoff);
847 if (ip_is_fragment(ip))
848 return -EPROTONOSUPPORT;
849 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
850 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
851
852 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
853 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
854 rxq_index);
855 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
856 ip->daddr, ports[1], ip->saddr, ports[0]);
857 if (rc)
858 return rc;
859
860 rc = efx->type->filter_rfs_insert(efx, &spec);
861 if (rc < 0)
862 return rc;
863
864 /* Remember this so we can check whether to expire the filter later */
865 efx->rps_flow_id[rc] = flow_id;
866 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
867 ++channel->rfs_filters_added;
868
869 netif_info(efx, rx_status, efx->net_dev,
870 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
871 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
872 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
873 rxq_index, flow_id, rc);
874
875 return rc;
876}
877
878bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
879{
880 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
881 unsigned int index, size;
882 u32 flow_id;
883
884 if (!spin_trylock_bh(&efx->filter_lock))
885 return false;
886
887 expire_one = efx->type->filter_rfs_expire_one;
888 index = efx->rps_expire_index;
889 size = efx->type->max_rx_ip_filters;
890 while (quota--) {
891 flow_id = efx->rps_flow_id[index];
892 if (expire_one(efx, flow_id, index))
893 netif_info(efx, rx_status, efx->net_dev,
894 "expired filter %d [flow %u]\n",
895 index, flow_id);
896 if (++index == size)
897 index = 0;
898 }
899 efx->rps_expire_index = index;
900
901 spin_unlock_bh(&efx->filter_lock);
902 return true;
903}
904
905#endif /* CONFIG_RFS_ACCEL */
906
907/**
908 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
909 * @spec: Specification to test
910 *
911 * Return: %true if the specification is a non-drop RX filter that
912 * matches a local MAC address I/G bit value of 1 or matches a local
913 * IPv4 or IPv6 address value in the respective multicast address
914 * range. Otherwise %false.
915 */
916bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
917{
918 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
919 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
920 return false;
921
922 if (spec->match_flags &
923 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
924 is_multicast_ether_addr(spec->loc_mac))
925 return true;
926
927 if ((spec->match_flags &
928 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
929 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
930 if (spec->ether_type == htons(ETH_P_IP) &&
931 ipv4_is_multicast(spec->loc_host[0]))
932 return true;
933 if (spec->ether_type == htons(ETH_P_IPV6) &&
934 ((const u8 *)spec->loc_host)[0] == 0xff)
935 return true;
936 }
937
938 return false;
939}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 2069f51b2aa9..144bbff5a4ae 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
447static int efx_poll_loopback(struct efx_nic *efx) 447static int efx_poll_loopback(struct efx_nic *efx)
448{ 448{
449 struct efx_loopback_state *state = efx->loopback_selftest; 449 struct efx_loopback_state *state = efx->loopback_selftest;
450 struct efx_channel *channel;
451 450
452 /* NAPI polling is not enabled, so process channels
453 * synchronously */
454 efx_for_each_channel(channel, efx) {
455 if (channel->work_pending)
456 efx_process_channel_now(channel);
457 }
458 return atomic_read(&state->rx_good) == state->packet_count; 451 return atomic_read(&state->rx_good) == state->packet_count;
459} 452}
460 453
@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
586 mutex_lock(&efx->mac_lock); 579 mutex_lock(&efx->mac_lock);
587 efx->type->monitor(efx); 580 efx->type->monitor(efx);
588 mutex_unlock(&efx->mac_lock); 581 mutex_unlock(&efx->mac_lock);
589 } else {
590 struct efx_channel *channel = efx_get_channel(efx, 0);
591 if (channel->work_pending)
592 efx_process_channel_now(channel);
593 } 582 }
594 583
595 mutex_lock(&efx->mac_lock); 584 mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b736059..87698ae0bf75 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 8c91775e3c5f..d034bcd124ef 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -18,8 +18,7 @@
18#include "bitfield.h" 18#include "bitfield.h"
19#include "efx.h" 19#include "efx.h"
20#include "nic.h" 20#include "nic.h"
21#include "spi.h" 21#include "farch_regs.h"
22#include "regs.h"
23#include "io.h" 22#include "io.h"
24#include "phy.h" 23#include "phy.h"
25#include "workarounds.h" 24#include "workarounds.h"
@@ -30,7 +29,6 @@
30/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ 29/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
31 30
32static void siena_init_wol(struct efx_nic *efx); 31static void siena_init_wol(struct efx_nic *efx);
33static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
34 32
35 33
36static void siena_push_irq_moderation(struct efx_channel *channel) 34static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -52,81 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
52 channel->channel); 50 channel->channel);
53} 51}
54 52
55static int siena_mdio_write(struct net_device *net_dev,
56 int prtad, int devad, u16 addr, u16 value)
57{
58 struct efx_nic *efx = netdev_priv(net_dev);
59 uint32_t status;
60 int rc;
61
62 rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
63 addr, value, &status);
64 if (rc)
65 return rc;
66 if (status != MC_CMD_MDIO_STATUS_GOOD)
67 return -EIO;
68
69 return 0;
70}
71
72static int siena_mdio_read(struct net_device *net_dev,
73 int prtad, int devad, u16 addr)
74{
75 struct efx_nic *efx = netdev_priv(net_dev);
76 uint16_t value;
77 uint32_t status;
78 int rc;
79
80 rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
81 addr, &value, &status);
82 if (rc)
83 return rc;
84 if (status != MC_CMD_MDIO_STATUS_GOOD)
85 return -EIO;
86
87 return (int)value;
88}
89
90/* This call is responsible for hooking in the MAC and PHY operations */
91static int siena_probe_port(struct efx_nic *efx)
92{
93 int rc;
94
95 /* Hook in PHY operations table */
96 efx->phy_op = &efx_mcdi_phy_ops;
97
98 /* Set up MDIO structure for PHY */
99 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
100 efx->mdio.mdio_read = siena_mdio_read;
101 efx->mdio.mdio_write = siena_mdio_write;
102
103 /* Fill out MDIO structure, loopback modes, and initial link state */
104 rc = efx->phy_op->probe(efx);
105 if (rc != 0)
106 return rc;
107
108 /* Allocate buffer for stats */
109 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
110 MC_CMD_MAC_NSTATS * sizeof(u64));
111 if (rc)
112 return rc;
113 netif_dbg(efx, probe, efx->net_dev,
114 "stats buffer at %llx (virt %p phys %llx)\n",
115 (u64)efx->stats_buffer.dma_addr,
116 efx->stats_buffer.addr,
117 (u64)virt_to_phys(efx->stats_buffer.addr));
118
119 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
120
121 return 0;
122}
123
124static void siena_remove_port(struct efx_nic *efx)
125{
126 efx->phy_op->remove(efx);
127 efx_nic_free_buffer(efx, &efx->stats_buffer);
128}
129
130void siena_prepare_flush(struct efx_nic *efx) 53void siena_prepare_flush(struct efx_nic *efx)
131{ 54{
132 if (efx->fc_disable++ == 0) 55 if (efx->fc_disable++ == 0)
@@ -139,7 +62,7 @@ void siena_finish_flush(struct efx_nic *efx)
139 efx_mcdi_set_mac(efx); 62 efx_mcdi_set_mac(efx);
140} 63}
141 64
142static const struct efx_nic_register_test siena_register_tests[] = { 65static const struct efx_farch_register_test siena_register_tests[] = {
143 { FR_AZ_ADR_REGION, 66 { FR_AZ_ADR_REGION,
144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 67 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
145 { FR_CZ_USR_EV_CFG, 68 { FR_CZ_USR_EV_CFG,
@@ -178,16 +101,16 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
178 /* Reset the chip immediately so that it is completely 101 /* Reset the chip immediately so that it is completely
179 * quiescent regardless of what any VF driver does. 102 * quiescent regardless of what any VF driver does.
180 */ 103 */
181 rc = siena_reset_hw(efx, reset_method); 104 rc = efx_mcdi_reset(efx, reset_method);
182 if (rc) 105 if (rc)
183 goto out; 106 goto out;
184 107
185 tests->registers = 108 tests->registers =
186 efx_nic_test_registers(efx, siena_register_tests, 109 efx_farch_test_registers(efx, siena_register_tests,
187 ARRAY_SIZE(siena_register_tests)) 110 ARRAY_SIZE(siena_register_tests))
188 ? -1 : 1; 111 ? -1 : 1;
189 112
190 rc = siena_reset_hw(efx, reset_method); 113 rc = efx_mcdi_reset(efx, reset_method);
191out: 114out:
192 rc2 = efx_reset_up(efx, reset_method, rc == 0); 115 rc2 = efx_reset_up(efx, reset_method, rc == 0);
193 return rc ? rc : rc2; 116 return rc ? rc : rc2;
@@ -200,11 +123,6 @@ out:
200 ************************************************************************** 123 **************************************************************************
201 */ 124 */
202 125
203static enum reset_type siena_map_reset_reason(enum reset_type reason)
204{
205 return RESET_TYPE_RECOVER_OR_ALL;
206}
207
208static int siena_map_reset_flags(u32 *flags) 126static int siena_map_reset_flags(u32 *flags)
209{ 127{
210 enum { 128 enum {
@@ -230,21 +148,6 @@ static int siena_map_reset_flags(u32 *flags)
230 return -EINVAL; 148 return -EINVAL;
231} 149}
232 150
233static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
234{
235 int rc;
236
237 /* Recover from a failed assertion pre-reset */
238 rc = efx_mcdi_handle_assertion(efx);
239 if (rc)
240 return rc;
241
242 if (method == RESET_TYPE_WORLD)
243 return efx_mcdi_reset_mc(efx);
244 else
245 return efx_mcdi_reset_port(efx);
246}
247
248#ifdef CONFIG_EEH 151#ifdef CONFIG_EEH
249/* When a PCI device is isolated from the bus, a subsequent MMIO read is 152/* When a PCI device is isolated from the bus, a subsequent MMIO read is
250 * required for the kernel EEH mechanisms to notice. As the Solarflare driver 153 * required for the kernel EEH mechanisms to notice. As the Solarflare driver
@@ -274,19 +177,25 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
274 return rc; 177 return rc;
275} 178}
276 179
277static void siena_dimension_resources(struct efx_nic *efx) 180static int siena_dimension_resources(struct efx_nic *efx)
278{ 181{
279 /* Each port has a small block of internal SRAM dedicated to 182 /* Each port has a small block of internal SRAM dedicated to
280 * the buffer table and descriptor caches. In theory we can 183 * the buffer table and descriptor caches. In theory we can
281 * map both blocks to one port, but we don't. 184 * map both blocks to one port, but we don't.
282 */ 185 */
283 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); 186 efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
187 return 0;
188}
189
190static unsigned int siena_mem_map_size(struct efx_nic *efx)
191{
192 return FR_CZ_MC_TREG_SMEM +
193 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
284} 194}
285 195
286static int siena_probe_nic(struct efx_nic *efx) 196static int siena_probe_nic(struct efx_nic *efx)
287{ 197{
288 struct siena_nic_data *nic_data; 198 struct siena_nic_data *nic_data;
289 bool already_attached = false;
290 efx_oword_t reg; 199 efx_oword_t reg;
291 int rc; 200 int rc;
292 201
@@ -296,38 +205,24 @@ static int siena_probe_nic(struct efx_nic *efx)
296 return -ENOMEM; 205 return -ENOMEM;
297 efx->nic_data = nic_data; 206 efx->nic_data = nic_data;
298 207
299 if (efx_nic_fpga_ver(efx) != 0) { 208 if (efx_farch_fpga_ver(efx) != 0) {
300 netif_err(efx, probe, efx->net_dev, 209 netif_err(efx, probe, efx->net_dev,
301 "Siena FPGA not supported\n"); 210 "Siena FPGA not supported\n");
302 rc = -ENODEV; 211 rc = -ENODEV;
303 goto fail1; 212 goto fail1;
304 } 213 }
305 214
215 efx->max_channels = EFX_MAX_CHANNELS;
216
306 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 217 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
307 efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 218 efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
308 219
309 efx_mcdi_init(efx); 220 rc = efx_mcdi_init(efx);
310
311 /* Recover from a failed assertion before probing */
312 rc = efx_mcdi_handle_assertion(efx);
313 if (rc) 221 if (rc)
314 goto fail1; 222 goto fail1;
315 223
316 /* Let the BMC know that the driver is now in charge of link and
317 * filter settings. We must do this before we reset the NIC */
318 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
319 if (rc) {
320 netif_err(efx, probe, efx->net_dev,
321 "Unable to register driver with MCPU\n");
322 goto fail2;
323 }
324 if (already_attached)
325 /* Not a fatal error */
326 netif_err(efx, probe, efx->net_dev,
327 "Host already registered with MCPU\n");
328
329 /* Now we can reset the NIC */ 224 /* Now we can reset the NIC */
330 rc = siena_reset_hw(efx, RESET_TYPE_ALL); 225 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
331 if (rc) { 226 if (rc) {
332 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 227 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
333 goto fail3; 228 goto fail3;
@@ -336,7 +231,8 @@ static int siena_probe_nic(struct efx_nic *efx)
336 siena_init_wol(efx); 231 siena_init_wol(efx);
337 232
338 /* Allocate memory for INT_KER */ 233 /* Allocate memory for INT_KER */
339 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 234 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
235 GFP_KERNEL);
340 if (rc) 236 if (rc)
341 goto fail4; 237 goto fail4;
342 BUG_ON(efx->irq_status.dma_addr & 0x0f); 238 BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -371,8 +267,7 @@ fail5:
371 efx_nic_free_buffer(efx, &efx->irq_status); 267 efx_nic_free_buffer(efx, &efx->irq_status);
372fail4: 268fail4:
373fail3: 269fail3:
374 efx_mcdi_drv_attach(efx, false, NULL); 270 efx_mcdi_fini(efx);
375fail2:
376fail1: 271fail1:
377 kfree(efx->nic_data); 272 kfree(efx->nic_data);
378 return rc; 273 return rc;
@@ -448,7 +343,7 @@ static int siena_init_nic(struct efx_nic *efx)
448 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); 343 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
449 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); 344 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
450 345
451 efx_nic_init_common(efx); 346 efx_farch_init_common(efx);
452 return 0; 347 return 0;
453} 348}
454 349
@@ -458,144 +353,192 @@ static void siena_remove_nic(struct efx_nic *efx)
458 353
459 efx_nic_free_buffer(efx, &efx->irq_status); 354 efx_nic_free_buffer(efx, &efx->irq_status);
460 355
461 siena_reset_hw(efx, RESET_TYPE_ALL); 356 efx_mcdi_reset(efx, RESET_TYPE_ALL);
462 357
463 /* Relinquish the device back to the BMC */ 358 efx_mcdi_fini(efx);
464 efx_mcdi_drv_attach(efx, false, NULL);
465 359
466 /* Tear down the private nic state */ 360 /* Tear down the private nic state */
467 kfree(efx->nic_data); 361 kfree(efx->nic_data);
468 efx->nic_data = NULL; 362 efx->nic_data = NULL;
469} 363}
470 364
471#define STATS_GENERATION_INVALID ((__force __le64)(-1)) 365#define SIENA_DMA_STAT(ext_name, mcdi_name) \
366 [SIENA_STAT_ ## ext_name] = \
367 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
368#define SIENA_OTHER_STAT(ext_name) \
369 [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
370
371static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
372 SIENA_DMA_STAT(tx_bytes, TX_BYTES),
373 SIENA_OTHER_STAT(tx_good_bytes),
374 SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
375 SIENA_DMA_STAT(tx_packets, TX_PKTS),
376 SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
377 SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
378 SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
379 SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
380 SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
381 SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
382 SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
383 SIENA_DMA_STAT(tx_64, TX_64_PKTS),
384 SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
385 SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
386 SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
387 SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
388 SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
389 SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
390 SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
391 SIENA_OTHER_STAT(tx_collision),
392 SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
393 SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
394 SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
395 SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
396 SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
397 SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
398 SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
399 SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
400 SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
401 SIENA_DMA_STAT(rx_bytes, RX_BYTES),
402 SIENA_OTHER_STAT(rx_good_bytes),
403 SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
404 SIENA_DMA_STAT(rx_packets, RX_PKTS),
405 SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
406 SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
407 SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
408 SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
409 SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
410 SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
411 SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
412 SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
413 SIENA_DMA_STAT(rx_64, RX_64_PKTS),
414 SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
415 SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
416 SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
417 SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
418 SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
419 SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
420 SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
421 SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
422 SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
423 SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
424 SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
425 SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
426 SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
427 SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
428 SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
429};
430static const unsigned long siena_stat_mask[] = {
431 [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
432};
433
434static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
435{
436 return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
437 siena_stat_mask, names);
438}
472 439
473static int siena_try_update_nic_stats(struct efx_nic *efx) 440static int siena_try_update_nic_stats(struct efx_nic *efx)
474{ 441{
442 struct siena_nic_data *nic_data = efx->nic_data;
443 u64 *stats = nic_data->stats;
475 __le64 *dma_stats; 444 __le64 *dma_stats;
476 struct efx_mac_stats *mac_stats;
477 __le64 generation_start, generation_end; 445 __le64 generation_start, generation_end;
478 446
479 mac_stats = &efx->mac_stats;
480 dma_stats = efx->stats_buffer.addr; 447 dma_stats = efx->stats_buffer.addr;
481 448
482 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; 449 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
483 if (generation_end == STATS_GENERATION_INVALID) 450 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
484 return 0; 451 return 0;
485 rmb(); 452 rmb();
486 453 efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
487#define MAC_STAT(M, D) \ 454 stats, efx->stats_buffer.addr, false);
488 mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
489
490 MAC_STAT(tx_bytes, TX_BYTES);
491 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
492 efx_update_diff_stat(&mac_stats->tx_good_bytes,
493 mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
494 MAC_STAT(tx_packets, TX_PKTS);
495 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
496 MAC_STAT(tx_pause, TX_PAUSE_PKTS);
497 MAC_STAT(tx_control, TX_CONTROL_PKTS);
498 MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
499 MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
500 MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
501 MAC_STAT(tx_lt64, TX_LT64_PKTS);
502 MAC_STAT(tx_64, TX_64_PKTS);
503 MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
504 MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
505 MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
506 MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
507 MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
508 MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
509 MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
510 mac_stats->tx_collision = 0;
511 MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
512 MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
513 MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
514 MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
515 MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
516 mac_stats->tx_collision = (mac_stats->tx_single_collision +
517 mac_stats->tx_multiple_collision +
518 mac_stats->tx_excessive_collision +
519 mac_stats->tx_late_collision);
520 MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
521 MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
522 MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
523 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
524 MAC_STAT(rx_bytes, RX_BYTES);
525 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
526 efx_update_diff_stat(&mac_stats->rx_good_bytes,
527 mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
528 MAC_STAT(rx_packets, RX_PKTS);
529 MAC_STAT(rx_good, RX_GOOD_PKTS);
530 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
531 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
532 MAC_STAT(rx_control, RX_CONTROL_PKTS);
533 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
534 MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
535 MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
536 MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
537 MAC_STAT(rx_64, RX_64_PKTS);
538 MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
539 MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
540 MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
541 MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
542 MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
543 MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
544 MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
545 mac_stats->rx_bad_lt64 = 0;
546 mac_stats->rx_bad_64_to_15xx = 0;
547 mac_stats->rx_bad_15xx_to_jumbo = 0;
548 MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
549 MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
550 mac_stats->rx_missed = 0;
551 MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
552 MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
553 MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
554 MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
555 MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
556 mac_stats->rx_good_lt64 = 0;
557
558 efx->n_rx_nodesc_drop_cnt =
559 le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
560
561#undef MAC_STAT
562
563 rmb(); 455 rmb();
564 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 456 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
565 if (generation_end != generation_start) 457 if (generation_end != generation_start)
566 return -EAGAIN; 458 return -EAGAIN;
567 459
460 /* Update derived statistics */
461 efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
462 stats[SIENA_STAT_tx_bytes] -
463 stats[SIENA_STAT_tx_bad_bytes]);
464 stats[SIENA_STAT_tx_collision] =
465 stats[SIENA_STAT_tx_single_collision] +
466 stats[SIENA_STAT_tx_multiple_collision] +
467 stats[SIENA_STAT_tx_excessive_collision] +
468 stats[SIENA_STAT_tx_late_collision];
469 efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
470 stats[SIENA_STAT_rx_bytes] -
471 stats[SIENA_STAT_rx_bad_bytes]);
568 return 0; 472 return 0;
569} 473}
570 474
571static void siena_update_nic_stats(struct efx_nic *efx) 475static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
476 struct rtnl_link_stats64 *core_stats)
572{ 477{
478 struct siena_nic_data *nic_data = efx->nic_data;
479 u64 *stats = nic_data->stats;
573 int retry; 480 int retry;
574 481
575 /* If we're unlucky enough to read statistics wduring the DMA, wait 482 /* If we're unlucky enough to read statistics wduring the DMA, wait
576 * up to 10ms for it to finish (typically takes <500us) */ 483 * up to 10ms for it to finish (typically takes <500us) */
577 for (retry = 0; retry < 100; ++retry) { 484 for (retry = 0; retry < 100; ++retry) {
578 if (siena_try_update_nic_stats(efx) == 0) 485 if (siena_try_update_nic_stats(efx) == 0)
579 return; 486 break;
580 udelay(100); 487 udelay(100);
581 } 488 }
582 489
583 /* Use the old values instead */ 490 if (full_stats)
491 memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
492
493 if (core_stats) {
494 core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
495 core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
496 core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
497 core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
498 core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
499 core_stats->multicast = stats[SIENA_STAT_rx_multicast];
500 core_stats->collisions = stats[SIENA_STAT_tx_collision];
501 core_stats->rx_length_errors =
502 stats[SIENA_STAT_rx_gtjumbo] +
503 stats[SIENA_STAT_rx_length_error];
504 core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
505 core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
506 core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
507 core_stats->tx_window_errors =
508 stats[SIENA_STAT_tx_late_collision];
509
510 core_stats->rx_errors = (core_stats->rx_length_errors +
511 core_stats->rx_crc_errors +
512 core_stats->rx_frame_errors +
513 stats[SIENA_STAT_rx_symbol_error]);
514 core_stats->tx_errors = (core_stats->tx_window_errors +
515 stats[SIENA_STAT_tx_bad]);
516 }
517
518 return SIENA_STAT_COUNT;
584} 519}
585 520
586static void siena_start_nic_stats(struct efx_nic *efx) 521static int siena_mac_reconfigure(struct efx_nic *efx)
587{ 522{
588 __le64 *dma_stats = efx->stats_buffer.addr; 523 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
524 int rc;
589 525
590 dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; 526 BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
527 MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
528 sizeof(efx->multicast_hash));
591 529
592 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 530 efx_farch_filter_sync_rx_mode(efx);
593 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
594}
595 531
596static void siena_stop_nic_stats(struct efx_nic *efx) 532 WARN_ON(!mutex_is_locked(&efx->mac_lock));
597{ 533
598 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 534 rc = efx_mcdi_set_mac(efx);
535 if (rc != 0)
536 return rc;
537
538 memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
539 efx->multicast_hash.byte, sizeof(efx->multicast_hash));
540 return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
541 inbuf, sizeof(inbuf), NULL, 0, NULL);
599} 542}
600 543
601/************************************************************************** 544/**************************************************************************
@@ -669,6 +612,241 @@ static void siena_init_wol(struct efx_nic *efx)
669 } 612 }
670} 613}
671 614
615/**************************************************************************
616 *
617 * MCDI
618 *
619 **************************************************************************
620 */
621
622#define MCDI_PDU(efx) \
623 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
624#define MCDI_DOORBELL(efx) \
625 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
626#define MCDI_STATUS(efx) \
627 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
628
629static void siena_mcdi_request(struct efx_nic *efx,
630 const efx_dword_t *hdr, size_t hdr_len,
631 const efx_dword_t *sdu, size_t sdu_len)
632{
633 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
634 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
635 unsigned int i;
636 unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
637
638 EFX_BUG_ON_PARANOID(hdr_len != 4);
639
640 efx_writed(efx, hdr, pdu);
641
642 for (i = 0; i < inlen_dw; i++)
643 efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
644
645 /* Ensure the request is written out before the doorbell */
646 wmb();
647
648 /* ring the doorbell with a distinctive value */
649 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
650}
651
652static bool siena_mcdi_poll_response(struct efx_nic *efx)
653{
654 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
655 efx_dword_t hdr;
656
657 efx_readd(efx, &hdr, pdu);
658
659 /* All 1's indicates that shared memory is in reset (and is
660 * not a valid hdr). Wait for it to come out reset before
661 * completing the command
662 */
663 return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
664 EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
665}
666
667static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
668 size_t offset, size_t outlen)
669{
670 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
671 unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
672 int i;
673
674 for (i = 0; i < outlen_dw; i++)
675 efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
676}
677
678static int siena_mcdi_poll_reboot(struct efx_nic *efx)
679{
680 struct siena_nic_data *nic_data = efx->nic_data;
681 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
682 efx_dword_t reg;
683 u32 value;
684
685 efx_readd(efx, &reg, addr);
686 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
687
688 if (value == 0)
689 return 0;
690
691 EFX_ZERO_DWORD(reg);
692 efx_writed(efx, &reg, addr);
693
694 /* MAC statistics have been cleared on the NIC; clear the local
695 * copies that we update with efx_update_diff_stat().
696 */
697 nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
698 nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
699
700 if (value == MC_STATUS_DWORD_ASSERT)
701 return -EINTR;
702 else
703 return -EIO;
704}
705
706/**************************************************************************
707 *
708 * MTD
709 *
710 **************************************************************************
711 */
712
713#ifdef CONFIG_SFC_MTD
714
715struct siena_nvram_type_info {
716 int port;
717 const char *name;
718};
719
720static const struct siena_nvram_type_info siena_nvram_types[] = {
721 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
722 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
723 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
724 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
725 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
726 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
727 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
728 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
729 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
730 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
731 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
732 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
733 [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
734};
735
736static int siena_mtd_probe_partition(struct efx_nic *efx,
737 struct efx_mcdi_mtd_partition *part,
738 unsigned int type)
739{
740 const struct siena_nvram_type_info *info;
741 size_t size, erase_size;
742 bool protected;
743 int rc;
744
745 if (type >= ARRAY_SIZE(siena_nvram_types) ||
746 siena_nvram_types[type].name == NULL)
747 return -ENODEV;
748
749 info = &siena_nvram_types[type];
750
751 if (info->port != efx_port_num(efx))
752 return -ENODEV;
753
754 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
755 if (rc)
756 return rc;
757 if (protected)
758 return -ENODEV; /* hide it */
759
760 part->nvram_type = type;
761 part->common.dev_type_name = "Siena NVRAM manager";
762 part->common.type_name = info->name;
763
764 part->common.mtd.type = MTD_NORFLASH;
765 part->common.mtd.flags = MTD_CAP_NORFLASH;
766 part->common.mtd.size = size;
767 part->common.mtd.erasesize = erase_size;
768
769 return 0;
770}
771
772static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
773 struct efx_mcdi_mtd_partition *parts,
774 size_t n_parts)
775{
776 uint16_t fw_subtype_list[
777 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
778 size_t i;
779 int rc;
780
781 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
782 if (rc)
783 return rc;
784
785 for (i = 0; i < n_parts; i++)
786 parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
787
788 return 0;
789}
790
791static int siena_mtd_probe(struct efx_nic *efx)
792{
793 struct efx_mcdi_mtd_partition *parts;
794 u32 nvram_types;
795 unsigned int type;
796 size_t n_parts;
797 int rc;
798
799 ASSERT_RTNL();
800
801 rc = efx_mcdi_nvram_types(efx, &nvram_types);
802 if (rc)
803 return rc;
804
805 parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
806 if (!parts)
807 return -ENOMEM;
808
809 type = 0;
810 n_parts = 0;
811
812 while (nvram_types != 0) {
813 if (nvram_types & 1) {
814 rc = siena_mtd_probe_partition(efx, &parts[n_parts],
815 type);
816 if (rc == 0)
817 n_parts++;
818 else if (rc != -ENODEV)
819 goto fail;
820 }
821 type++;
822 nvram_types >>= 1;
823 }
824
825 rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
826 if (rc)
827 goto fail;
828
829 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
830fail:
831 if (rc)
832 kfree(parts);
833 return rc;
834}
835
836#endif /* CONFIG_SFC_MTD */
837
838/**************************************************************************
839 *
840 * PTP
841 *
842 **************************************************************************
843 */
844
845static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
846{
847 _efx_writed(efx, cpu_to_le32(host_time),
848 FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
849}
672 850
673/************************************************************************** 851/**************************************************************************
674 * 852 *
@@ -678,6 +856,7 @@ static void siena_init_wol(struct efx_nic *efx)
678 */ 856 */
679 857
680const struct efx_nic_type siena_a0_nic_type = { 858const struct efx_nic_type siena_a0_nic_type = {
859 .mem_map_size = siena_mem_map_size,
681 .probe = siena_probe_nic, 860 .probe = siena_probe_nic,
682 .remove = siena_remove_nic, 861 .remove = siena_remove_nic,
683 .init = siena_init_nic, 862 .init = siena_init_nic,
@@ -688,44 +867,94 @@ const struct efx_nic_type siena_a0_nic_type = {
688#else 867#else
689 .monitor = NULL, 868 .monitor = NULL,
690#endif 869#endif
691 .map_reset_reason = siena_map_reset_reason, 870 .map_reset_reason = efx_mcdi_map_reset_reason,
692 .map_reset_flags = siena_map_reset_flags, 871 .map_reset_flags = siena_map_reset_flags,
693 .reset = siena_reset_hw, 872 .reset = efx_mcdi_reset,
694 .probe_port = siena_probe_port, 873 .probe_port = efx_mcdi_port_probe,
695 .remove_port = siena_remove_port, 874 .remove_port = efx_mcdi_port_remove,
875 .fini_dmaq = efx_farch_fini_dmaq,
696 .prepare_flush = siena_prepare_flush, 876 .prepare_flush = siena_prepare_flush,
697 .finish_flush = siena_finish_flush, 877 .finish_flush = siena_finish_flush,
878 .describe_stats = siena_describe_nic_stats,
698 .update_stats = siena_update_nic_stats, 879 .update_stats = siena_update_nic_stats,
699 .start_stats = siena_start_nic_stats, 880 .start_stats = efx_mcdi_mac_start_stats,
700 .stop_stats = siena_stop_nic_stats, 881 .stop_stats = efx_mcdi_mac_stop_stats,
701 .set_id_led = efx_mcdi_set_id_led, 882 .set_id_led = efx_mcdi_set_id_led,
702 .push_irq_moderation = siena_push_irq_moderation, 883 .push_irq_moderation = siena_push_irq_moderation,
703 .reconfigure_mac = efx_mcdi_mac_reconfigure, 884 .reconfigure_mac = siena_mac_reconfigure,
704 .check_mac_fault = efx_mcdi_mac_check_fault, 885 .check_mac_fault = efx_mcdi_mac_check_fault,
705 .reconfigure_port = efx_mcdi_phy_reconfigure, 886 .reconfigure_port = efx_mcdi_port_reconfigure,
706 .get_wol = siena_get_wol, 887 .get_wol = siena_get_wol,
707 .set_wol = siena_set_wol, 888 .set_wol = siena_set_wol,
708 .resume_wol = siena_init_wol, 889 .resume_wol = siena_init_wol,
709 .test_chip = siena_test_chip, 890 .test_chip = siena_test_chip,
710 .test_nvram = efx_mcdi_nvram_test_all, 891 .test_nvram = efx_mcdi_nvram_test_all,
892 .mcdi_request = siena_mcdi_request,
893 .mcdi_poll_response = siena_mcdi_poll_response,
894 .mcdi_read_response = siena_mcdi_read_response,
895 .mcdi_poll_reboot = siena_mcdi_poll_reboot,
896 .irq_enable_master = efx_farch_irq_enable_master,
897 .irq_test_generate = efx_farch_irq_test_generate,
898 .irq_disable_non_ev = efx_farch_irq_disable_master,
899 .irq_handle_msi = efx_farch_msi_interrupt,
900 .irq_handle_legacy = efx_farch_legacy_interrupt,
901 .tx_probe = efx_farch_tx_probe,
902 .tx_init = efx_farch_tx_init,
903 .tx_remove = efx_farch_tx_remove,
904 .tx_write = efx_farch_tx_write,
905 .rx_push_indir_table = efx_farch_rx_push_indir_table,
906 .rx_probe = efx_farch_rx_probe,
907 .rx_init = efx_farch_rx_init,
908 .rx_remove = efx_farch_rx_remove,
909 .rx_write = efx_farch_rx_write,
910 .rx_defer_refill = efx_farch_rx_defer_refill,
911 .ev_probe = efx_farch_ev_probe,
912 .ev_init = efx_farch_ev_init,
913 .ev_fini = efx_farch_ev_fini,
914 .ev_remove = efx_farch_ev_remove,
915 .ev_process = efx_farch_ev_process,
916 .ev_read_ack = efx_farch_ev_read_ack,
917 .ev_test_generate = efx_farch_ev_test_generate,
918 .filter_table_probe = efx_farch_filter_table_probe,
919 .filter_table_restore = efx_farch_filter_table_restore,
920 .filter_table_remove = efx_farch_filter_table_remove,
921 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
922 .filter_insert = efx_farch_filter_insert,
923 .filter_remove_safe = efx_farch_filter_remove_safe,
924 .filter_get_safe = efx_farch_filter_get_safe,
925 .filter_clear_rx = efx_farch_filter_clear_rx,
926 .filter_count_rx_used = efx_farch_filter_count_rx_used,
927 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
928 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
929#ifdef CONFIG_RFS_ACCEL
930 .filter_rfs_insert = efx_farch_filter_rfs_insert,
931 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
932#endif
933#ifdef CONFIG_SFC_MTD
934 .mtd_probe = siena_mtd_probe,
935 .mtd_rename = efx_mcdi_mtd_rename,
936 .mtd_read = efx_mcdi_mtd_read,
937 .mtd_erase = efx_mcdi_mtd_erase,
938 .mtd_write = efx_mcdi_mtd_write,
939 .mtd_sync = efx_mcdi_mtd_sync,
940#endif
941 .ptp_write_host_time = siena_ptp_write_host_time,
711 942
712 .revision = EFX_REV_SIENA_A0, 943 .revision = EFX_REV_SIENA_A0,
713 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
714 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
715 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 944 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
716 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 945 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
717 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 946 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
718 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 947 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
719 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 948 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
720 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 949 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
721 .rx_buffer_hash_size = 0x10, 950 .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
951 .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
722 .rx_buffer_padding = 0, 952 .rx_buffer_padding = 0,
723 .can_rx_scatter = true, 953 .can_rx_scatter = true,
724 .max_interrupt_mode = EFX_INT_MODE_MSIX, 954 .max_interrupt_mode = EFX_INT_MODE_MSIX,
725 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
726 * interrupt handler only supports 32
727 * channels */
728 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, 955 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
729 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 956 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
730 NETIF_F_RXHASH | NETIF_F_NTUPLE), 957 NETIF_F_RXHASH | NETIF_F_NTUPLE),
958 .mcdi_max_ver = 1,
959 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
731}; 960};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 90f8d1604f5f..0c38f926871e 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc. 3 * Copyright 2010-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,7 @@
15#include "mcdi.h" 15#include "mcdi.h"
16#include "filter.h" 16#include "filter.h"
17#include "mcdi_pcol.h" 17#include "mcdi_pcol.h"
18#include "regs.h" 18#include "farch_regs.h"
19#include "vfdi.h" 19#include "vfdi.h"
20 20
21/* Number of longs required to track all the VIs in a VF */ 21/* Number of longs required to track all the VIs in a VF */
@@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
197static int efx_sriov_cmd(struct efx_nic *efx, bool enable, 197static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
198 unsigned *vi_scale_out, unsigned *vf_total_out) 198 unsigned *vi_scale_out, unsigned *vf_total_out)
199{ 199{
200 u8 inbuf[MC_CMD_SRIOV_IN_LEN]; 200 MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
201 u8 outbuf[MC_CMD_SRIOV_OUT_LEN]; 201 MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
202 unsigned vi_scale, vf_total; 202 unsigned vi_scale, vf_total;
203 size_t outlen; 203 size_t outlen;
204 int rc; 204 int rc;
@@ -240,64 +240,55 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
240static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, 240static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
241 unsigned int count) 241 unsigned int count)
242{ 242{
243 u8 *inbuf, *record; 243 MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
244 unsigned int used; 244 MCDI_DECLARE_STRUCT_PTR(record);
245 u32 from_rid, from_hi, from_lo; 245 unsigned int index, used;
246 u64 from_addr;
247 u32 from_rid;
246 int rc; 248 int rc;
247 249
248 mb(); /* Finish writing source/reading dest before DMA starts */ 250 mb(); /* Finish writing source/reading dest before DMA starts */
249 251
250 used = MC_CMD_MEMCPY_IN_LEN(count); 252 if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
251 if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
252 return -ENOBUFS; 253 return -ENOBUFS;
254 used = MC_CMD_MEMCPY_IN_LEN(count);
253 255
254 /* Allocate room for the largest request */ 256 for (index = 0; index < count; index++) {
255 inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL); 257 record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
256 if (inbuf == NULL) 258 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
257 return -ENOMEM; 259 count);
258
259 record = inbuf;
260 MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
261 while (count-- > 0) {
262 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, 260 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
263 req->to_rid); 261 req->to_rid);
264 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO, 262 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
265 (u32)req->to_addr); 263 req->to_addr);
266 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
267 (u32)(req->to_addr >> 32));
268 if (req->from_buf == NULL) { 264 if (req->from_buf == NULL) {
269 from_rid = req->from_rid; 265 from_rid = req->from_rid;
270 from_lo = (u32)req->from_addr; 266 from_addr = req->from_addr;
271 from_hi = (u32)(req->from_addr >> 32);
272 } else { 267 } else {
273 if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) { 268 if (WARN_ON(used + req->length >
269 MCDI_CTL_SDU_LEN_MAX_V1)) {
274 rc = -ENOBUFS; 270 rc = -ENOBUFS;
275 goto out; 271 goto out;
276 } 272 }
277 273
278 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; 274 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
279 from_lo = used; 275 from_addr = used;
280 from_hi = 0; 276 memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
281 memcpy(inbuf + used, req->from_buf, req->length); 277 req->length);
282 used += req->length; 278 used += req->length;
283 } 279 }
284 280
285 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); 281 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
286 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO, 282 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
287 from_lo); 283 from_addr);
288 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
289 from_hi);
290 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, 284 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
291 req->length); 285 req->length);
292 286
293 ++req; 287 ++req;
294 record += MC_CMD_MEMCPY_IN_RECORD_LEN;
295 } 288 }
296 289
297 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); 290 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
298out: 291out:
299 kfree(inbuf);
300
301 mb(); /* Don't write source/read dest before DMA is complete */ 292 mb(); /* Don't write source/read dest before DMA is complete */
302 293
303 return rc; 294 return rc;
@@ -473,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
473 VFDI_EV_SEQ, (vf->msg_seqno & 0xff), 464 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
474 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); 465 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
475 ++vf->msg_seqno; 466 ++vf->msg_seqno;
476 efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx), 467 efx_farch_generate_event(efx,
477 &event); 468 EFX_VI_BASE + vf->index * efx_vf_size(efx),
469 &event);
478} 470}
479 471
480static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, 472static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
@@ -684,16 +676,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
684 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); 676 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
685 unsigned timeout = HZ; 677 unsigned timeout = HZ;
686 unsigned index, rxqs_count; 678 unsigned index, rxqs_count;
687 __le32 *rxqs; 679 MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
688 int rc; 680 int rc;
689 681
690 BUILD_BUG_ON(VF_MAX_RX_QUEUES > 682 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
691 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 683 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
692 684
693 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
694 if (rxqs == NULL)
695 return VFDI_RC_ENOMEM;
696
697 rtnl_lock(); 685 rtnl_lock();
698 siena_prepare_flush(efx); 686 siena_prepare_flush(efx);
699 rtnl_unlock(); 687 rtnl_unlock();
@@ -708,14 +696,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
708 vf_offset + index); 696 vf_offset + index);
709 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ); 697 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
710 } 698 }
711 if (test_bit(index, vf->rxq_mask)) 699 if (test_bit(index, vf->rxq_mask)) {
712 rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index); 700 MCDI_SET_ARRAY_DWORD(
701 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
702 rxqs_count, vf_offset + index);
703 rxqs_count++;
704 }
713 } 705 }
714 706
715 atomic_set(&vf->rxq_retry_count, 0); 707 atomic_set(&vf->rxq_retry_count, 0);
716 while (timeout && (vf->rxq_count || vf->txq_count)) { 708 while (timeout && (vf->rxq_count || vf->txq_count)) {
717 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs, 709 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
718 rxqs_count * sizeof(*rxqs), NULL, 0, NULL); 710 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
711 NULL, 0, NULL);
719 WARN_ON(rc < 0); 712 WARN_ON(rc < 0);
720 713
721 timeout = wait_event_timeout(vf->flush_waitq, 714 timeout = wait_event_timeout(vf->flush_waitq,
@@ -725,8 +718,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
725 for (index = 0; index < count; ++index) { 718 for (index = 0; index < count; ++index) {
726 if (test_and_clear_bit(index, vf->rxq_retry_mask)) { 719 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
727 atomic_dec(&vf->rxq_retry_count); 720 atomic_dec(&vf->rxq_retry_count);
728 rxqs[rxqs_count++] = 721 MCDI_SET_ARRAY_DWORD(
729 cpu_to_le32(vf_offset + index); 722 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
723 rxqs_count, vf_offset + index);
724 rxqs_count++;
730 } 725 }
731 } 726 }
732 } 727 }
@@ -749,7 +744,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
749 } 744 }
750 efx_sriov_bufs(efx, vf->buftbl_base, NULL, 745 efx_sriov_bufs(efx, vf->buftbl_base, NULL,
751 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); 746 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
752 kfree(rxqs);
753 efx_vfdi_flush_clear(vf); 747 efx_vfdi_flush_clear(vf);
754 748
755 vf->evq0_count = 0; 749 vf->evq0_count = 0;
@@ -1004,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
1004 struct efx_nic *efx = vf->efx; 998 struct efx_nic *efx = vf->efx;
1005 struct efx_buffer buf; 999 struct efx_buffer buf;
1006 1000
1007 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) { 1001 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
1008 efx_sriov_reset_vf(vf, &buf); 1002 efx_sriov_reset_vf(vf, &buf);
1009 efx_nic_free_buffer(efx, &buf); 1003 efx_nic_free_buffer(efx, &buf);
1010 } 1004 }
@@ -1248,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
1248 pci_domain_nr(pci_dev->bus), pci_dev->bus->number, 1242 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1249 PCI_SLOT(devfn), PCI_FUNC(devfn)); 1243 PCI_SLOT(devfn), PCI_FUNC(devfn));
1250 1244
1251 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE); 1245 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
1246 GFP_KERNEL);
1252 if (rc) 1247 if (rc)
1253 goto fail; 1248 goto fail;
1254 1249
@@ -1280,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
1280 if (rc) 1275 if (rc)
1281 goto fail_cmd; 1276 goto fail_cmd;
1282 1277
1283 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status)); 1278 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
1279 GFP_KERNEL);
1284 if (rc) 1280 if (rc)
1285 goto fail_status; 1281 goto fail_status;
1286 vfdi_status = efx->vfdi_status.addr; 1282 vfdi_status = efx->vfdi_status.addr;
@@ -1535,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
1535 efx_sriov_usrev(efx, true); 1531 efx_sriov_usrev(efx, true);
1536 (void)efx_sriov_cmd(efx, true, NULL, NULL); 1532 (void)efx_sriov_cmd(efx, true, NULL, NULL);
1537 1533
1538 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) 1534 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
1539 return; 1535 return;
1540 1536
1541 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { 1537 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
deleted file mode 100644
index 5431a1bbff5c..000000000000
--- a/drivers/net/ethernet/sfc/spi.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_SPI_H
12#define EFX_SPI_H
13
14#include "net_driver.h"
15
16/**************************************************************************
17 *
18 * Basic SPI command set and bit definitions
19 *
20 *************************************************************************/
21
22#define SPI_WRSR 0x01 /* Write status register */
23#define SPI_WRITE 0x02 /* Write data to memory array */
24#define SPI_READ 0x03 /* Read data from memory array */
25#define SPI_WRDI 0x04 /* Reset write enable latch */
26#define SPI_RDSR 0x05 /* Read status register */
27#define SPI_WREN 0x06 /* Set write enable latch */
28#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
29
30#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
31#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
32#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
33#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
34#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
35#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
36
37/**
38 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
39 * @device_id: Controller's id for the device
40 * @size: Size (in bytes)
41 * @addr_len: Number of address bytes in read/write commands
42 * @munge_address: Flag whether addresses should be munged.
43 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
44 * use bit 3 of the command byte as address bit A8, rather
45 * than having a two-byte address. If this flag is set, then
46 * commands should be munged in this way.
47 * @erase_command: Erase command (or 0 if sector erase not needed).
48 * @erase_size: Erase sector size (in bytes)
49 * Erase commands affect sectors with this size and alignment.
50 * This must be a power of two.
51 * @block_size: Write block size (in bytes).
52 * Write commands are limited to blocks with this size and alignment.
53 */
54struct efx_spi_device {
55 int device_id;
56 unsigned int size;
57 unsigned int addr_len;
58 unsigned int munge_address:1;
59 u8 erase_command;
60 unsigned int erase_size;
61 unsigned int block_size;
62};
63
64static inline bool efx_spi_present(const struct efx_spi_device *spi)
65{
66 return spi->size != 0;
67}
68
69int falcon_spi_cmd(struct efx_nic *efx,
70 const struct efx_spi_device *spi, unsigned int command,
71 int address, const void *in, void *out, size_t len);
72int falcon_spi_wait_write(struct efx_nic *efx,
73 const struct efx_spi_device *spi);
74int falcon_spi_read(struct efx_nic *efx,
75 const struct efx_spi_device *spi, loff_t start,
76 size_t len, size_t *retlen, u8 *buffer);
77int falcon_spi_write(struct efx_nic *efx,
78 const struct efx_spi_device *spi, loff_t start,
79 size_t len, size_t *retlen, const u8 *buffer);
80
81/*
82 * SFC4000 flash is partitioned into:
83 * 0-0x400 chip and board config (see falcon_hwdefs.h)
84 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
85 * 0x8000-end boot code (mapped to PCI expansion ROM)
86 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
87 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
88 * 0-0x400 chip and board config
89 * configurable VPD
90 * 0x800-0x1800 boot config
91 * Aside from the chip and board config, all of these are optional and may
92 * be absent or truncated depending on the devices used.
93 */
94#define FALCON_NVCONFIG_END 0x400U
95#define FALCON_FLASH_BOOTCODE_START 0x8000U
96#define EFX_EEPROM_BOOTCONFIG_START 0x800U
97#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
98
99#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb5017129..2c90e6b31575 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2011 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5e090e54298e..2ac91c5b5eea 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2010 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
306 306
307 while (read_ptr != stop_index) { 307 while (read_ptr != stop_index) {
308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
309 if (unlikely(buffer->len == 0)) { 309
310 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
311 unlikely(buffer->len == 0)) {
310 netif_err(efx, tx_err, efx->net_dev, 312 netif_err(efx, tx_err, efx->net_dev,
311 "TX queue %d spurious TX completion id %x\n", 313 "TX queue %d spurious TX completion id %x\n",
312 tx_queue->queue, read_ptr); 314 tx_queue->queue, read_ptr);
@@ -437,6 +439,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
437 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 439 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
438 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 440 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
439 441
442 if (pkts_compl > 1)
443 ++tx_queue->merge_events;
444
440 /* See if we need to restart the netif queue. This memory 445 /* See if we need to restart the netif queue. This memory
441 * barrier ensures that we write read_count (inside 446 * barrier ensures that we write read_count (inside
442 * efx_dequeue_buffers()) before reading the queue status. 447 * efx_dequeue_buffers()) before reading the queue status.
@@ -543,10 +548,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
543 tx_queue->initialised = true; 548 tx_queue->initialised = true;
544} 549}
545 550
546void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 551void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
547{ 552{
548 struct efx_tx_buffer *buffer; 553 struct efx_tx_buffer *buffer;
549 554
555 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
556 "shutting down TX queue %d\n", tx_queue->queue);
557
550 if (!tx_queue->buffer) 558 if (!tx_queue->buffer)
551 return; 559 return;
552 560
@@ -561,22 +569,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
561 netdev_tx_reset_queue(tx_queue->core_txq); 569 netdev_tx_reset_queue(tx_queue->core_txq);
562} 570}
563 571
564void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
565{
566 if (!tx_queue->initialised)
567 return;
568
569 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
570 "shutting down TX queue %d\n", tx_queue->queue);
571
572 tx_queue->initialised = false;
573
574 /* Flush TX queue, remove descriptor ring */
575 efx_nic_fini_tx(tx_queue);
576
577 efx_release_tx_buffers(tx_queue);
578}
579
580void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 572void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
581{ 573{
582 int i; 574 int i;
@@ -708,7 +700,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
708 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; 700 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
709 701
710 if (unlikely(!page_buf->addr) && 702 if (unlikely(!page_buf->addr) &&
711 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE)) 703 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
704 GFP_ATOMIC))
712 return NULL; 705 return NULL;
713 706
714 result = (u8 *)page_buf->addr + offset; 707 result = (u8 *)page_buf->addr + offset;
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c0..3d5ee3259885 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5a..ae044f44936a 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2010-2012 Solarflare Communications Inc. 3 * Copyright 2010-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304b..2310b75d4ec2 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -15,27 +15,15 @@
15 * Bug numbers are from Solarflare's Bugzilla. 15 * Bug numbers are from Solarflare's Bugzilla.
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) 18#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) 19#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 20#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
22#define EFX_WORKAROUND_10G(efx) 1 21#define EFX_WORKAROUND_10G(efx) 1
23 22
24/* XAUI resets if link not detected */
25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
26/* RX PCIe double split performance issue */
27#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
28/* Bit-bashed I2C reads cause performance drop */ 23/* Bit-bashed I2C reads cause performance drop */
29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G 24#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
30/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
31 * or a PCIe error (bug 11028) */
32#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
33/* Transmit flow control may get disabled */
34#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
35/* Truncated IPv4 packets can confuse the TX packet parser */ 25/* Truncated IPv4 packets can confuse the TX packet parser */
36#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB 26#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
37/* Legacy ISR read can return zero once */
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 27/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 28#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41 29
@@ -56,4 +44,10 @@
56/* Leak overlength packets rather than free */ 44/* Leak overlength packets rather than free */
57#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
58 46
47/* Lockup when writing event block registers at gen2/gen3 */
48#define EFX_EF10_WORKAROUND_35388(efx) \
49 (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
50#define EFX_WORKAROUND_35388(efx) \
51 (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
52
59#endif /* EFX_WORKAROUNDS_H */ 53#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 9f5f35e041ac..770036bc2d87 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -212,9 +212,8 @@ static void meth_check_link(struct net_device *dev)
212static int meth_init_tx_ring(struct meth_private *priv) 212static int meth_init_tx_ring(struct meth_private *priv)
213{ 213{
214 /* Init TX ring */ 214 /* Init TX ring */
215 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
216 &priv->tx_ring_dma, 216 &priv->tx_ring_dma, GFP_ATOMIC);
217 GFP_ATOMIC | __GFP_ZERO);
218 if (!priv->tx_ring) 217 if (!priv->tx_ring)
219 return -ENOMEM; 218 return -ENOMEM;
220 219
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02df0894690d..ee18e6f7b4fe 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1770 struct sis190_private *tp = netdev_priv(dev); 1770 struct sis190_private *tp = netdev_priv(dev);
1771 unsigned long flags; 1771 unsigned long flags;
1772 1772
1773 if (regs->len > SIS190_REGS_SIZE)
1774 regs->len = SIS190_REGS_SIZE;
1775
1776 spin_lock_irqsave(&tp->lock, flags); 1773 spin_lock_irqsave(&tp->lock, flags);
1777 memcpy_fromio(p, tp->mmio_addr, regs->len); 1774 memcpy_fromio(p, tp->mmio_addr, regs->len);
1778 spin_unlock_irqrestore(&tp->lock, flags); 1775 spin_unlock_irqrestore(&tp->lock, flags);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index f5d7ad75e479..b7a39305472b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data)
1309 struct sis900_private *sis_priv = netdev_priv(net_dev); 1309 struct sis900_private *sis_priv = netdev_priv(net_dev);
1310 struct mii_phy *mii_phy = sis_priv->mii; 1310 struct mii_phy *mii_phy = sis_priv->mii;
1311 static const int next_tick = 5*HZ; 1311 static const int next_tick = 5*HZ;
1312 int speed = 0, duplex = 0;
1312 u16 status; 1313 u16 status;
1313 1314
1314 if (!sis_priv->autong_complete){
1315 int uninitialized_var(speed), duplex = 0;
1316
1317 sis900_read_mode(net_dev, &speed, &duplex);
1318 if (duplex){
1319 sis900_set_mode(sis_priv, speed, duplex);
1320 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1321 netif_carrier_on(net_dev);
1322 }
1323
1324 sis_priv->timer.expires = jiffies + HZ;
1325 add_timer(&sis_priv->timer);
1326 return;
1327 }
1328
1329 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); 1315 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1330 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); 1316 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1331 1317
@@ -1336,8 +1322,16 @@ static void sis900_timer(unsigned long data)
1336 status = sis900_default_phy(net_dev); 1322 status = sis900_default_phy(net_dev);
1337 mii_phy = sis_priv->mii; 1323 mii_phy = sis_priv->mii;
1338 1324
1339 if (status & MII_STAT_LINK) 1325 if (status & MII_STAT_LINK) {
1340 sis900_check_mode(net_dev, mii_phy); 1326 WARN_ON(!(status & MII_STAT_AUTO_DONE));
1327
1328 sis900_read_mode(net_dev, &speed, &duplex);
1329 if (duplex) {
1330 sis900_set_mode(sis_priv, speed, duplex);
1331 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1332 netif_carrier_on(net_dev);
1333 }
1334 }
1341 } else { 1335 } else {
1342 /* Link ON -> OFF */ 1336 /* Link ON -> OFF */
1343 if (!(status & MII_STAT_LINK)){ 1337 if (!(status & MII_STAT_LINK)){
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 345558fe7367..afe01c4088a3 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2067,7 +2067,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2067 lp->netdev = ndev; 2067 lp->netdev = ndev;
2068#ifdef SMC_DYNAMIC_BUS_CONFIG 2068#ifdef SMC_DYNAMIC_BUS_CONFIG
2069 { 2069 {
2070 struct smc911x_platdata *pd = pdev->dev.platform_data; 2070 struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
2071 if (!pd) { 2071 if (!pd) {
2072 ret = -EINVAL; 2072 ret = -EINVAL;
2073 goto release_both; 2073 goto release_both;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index cde13be7c7de..73be7f3982e6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2202,7 +2202,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2202 */ 2202 */
2203static int smc_drv_probe(struct platform_device *pdev) 2203static int smc_drv_probe(struct platform_device *pdev)
2204{ 2204{
2205 struct smc91x_platdata *pd = pdev->dev.platform_data; 2205 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
2206 struct smc_local *lp; 2206 struct smc_local *lp;
2207 struct net_device *ndev; 2207 struct net_device *ndev;
2208 struct resource *res, *ires; 2208 struct resource *res, *ires;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a1419211585b..5fdbc2686eb3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2374,7 +2374,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2374 struct device_node *np = pdev->dev.of_node; 2374 struct device_node *np = pdev->dev.of_node;
2375 struct net_device *dev; 2375 struct net_device *dev;
2376 struct smsc911x_data *pdata; 2376 struct smsc911x_data *pdata;
2377 struct smsc911x_platform_config *config = pdev->dev.platform_data; 2377 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
2378 struct resource *res, *irq_res; 2378 struct resource *res, *irq_res;
2379 unsigned int intcfg = 0; 2379 unsigned int intcfg = 0;
2380 int res_size, irq_flags; 2380 int res_size, irq_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c922fde929a1..f16a9bdf45bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -70,7 +70,6 @@ struct stmmac_priv {
70 struct net_device *dev; 70 struct net_device *dev;
71 struct device *device; 71 struct device *device;
72 struct mac_device_info *hw; 72 struct mac_device_info *hw;
73 int no_csum_insertion;
74 spinlock_t lock; 73 spinlock_t lock;
75 74
76 struct phy_device *phydev ____cacheline_aligned_in_smp; 75 struct phy_device *phydev ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index def7e75e1d57..76ad214b4036 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -45,8 +45,8 @@ static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
45 data = (1000000000ULL / 50000000); 45 data = (1000000000ULL / 50000000);
46 46
47 /* 0.465ns accuracy */ 47 /* 0.465ns accuracy */
48 if (value & PTP_TCR_TSCTRLSSR) 48 if (!(value & PTP_TCR_TSCTRLSSR))
49 data = (data * 100) / 465; 49 data = (data * 1000) / 465;
50 50
51 writel(data, ioaddr + PTP_SSIR); 51 writel(data, ioaddr + PTP_SSIR);
52} 52}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0a9bb9d30c3f..8d4ccd35a016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1224,8 +1224,9 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
1224 */ 1224 */
1225static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1225static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1226{ 1226{
1227 if (likely(priv->plat->force_sf_dma_mode || 1227 if (priv->plat->force_thresh_dma_mode)
1228 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) { 1228 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1229 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1229 /* 1230 /*
1230 * In case of GMAC, SF mode can be enabled 1231 * In case of GMAC, SF mode can be enabled
1231 * to perform the TX COE in HW. This depends on: 1232 * to perform the TX COE in HW. This depends on:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1c83a44c547b..7a0072003f34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -83,6 +83,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
83 dma_cfg->mixed_burst = 83 dma_cfg->mixed_burst =
84 of_property_read_bool(np, "snps,mixed-burst"); 84 of_property_read_bool(np, "snps,mixed-burst");
85 } 85 }
86 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
87 if (plat->force_thresh_dma_mode) {
88 plat->force_sf_dma_mode = 0;
89 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
86 90
87 return 0; 91 return 0;
88} 92}
@@ -113,14 +117,11 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
113 const char *mac = NULL; 117 const char *mac = NULL;
114 118
115 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 119 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
116 if (!res)
117 return -ENODEV;
118
119 addr = devm_ioremap_resource(dev, res); 120 addr = devm_ioremap_resource(dev, res);
120 if (IS_ERR(addr)) 121 if (IS_ERR(addr))
121 return PTR_ERR(addr); 122 return PTR_ERR(addr);
122 123
123 plat_dat = pdev->dev.platform_data; 124 plat_dat = dev_get_platdata(&pdev->dev);
124 if (pdev->dev.of_node) { 125 if (pdev->dev.of_node) {
125 if (!plat_dat) 126 if (!plat_dat)
126 plat_dat = devm_kzalloc(&pdev->dev, 127 plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 52b2adf63cbf..f28460ce24a7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9360,7 +9360,7 @@ static ssize_t show_port_phy(struct device *dev,
9360 struct device_attribute *attr, char *buf) 9360 struct device_attribute *attr, char *buf)
9361{ 9361{
9362 struct platform_device *plat_dev = to_platform_device(dev); 9362 struct platform_device *plat_dev = to_platform_device(dev);
9363 struct niu_parent *p = plat_dev->dev.platform_data; 9363 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9364 u32 port_phy = p->port_phy; 9364 u32 port_phy = p->port_phy;
9365 char *orig_buf = buf; 9365 char *orig_buf = buf;
9366 int i; 9366 int i;
@@ -9390,7 +9390,7 @@ static ssize_t show_plat_type(struct device *dev,
9390 struct device_attribute *attr, char *buf) 9390 struct device_attribute *attr, char *buf)
9391{ 9391{
9392 struct platform_device *plat_dev = to_platform_device(dev); 9392 struct platform_device *plat_dev = to_platform_device(dev);
9393 struct niu_parent *p = plat_dev->dev.platform_data; 9393 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9394 const char *type_str; 9394 const char *type_str;
9395 9395
9396 switch (p->plat_type) { 9396 switch (p->plat_type) {
@@ -9419,7 +9419,7 @@ static ssize_t __show_chan_per_port(struct device *dev,
9419 int rx) 9419 int rx)
9420{ 9420{
9421 struct platform_device *plat_dev = to_platform_device(dev); 9421 struct platform_device *plat_dev = to_platform_device(dev);
9422 struct niu_parent *p = plat_dev->dev.platform_data; 9422 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9423 char *orig_buf = buf; 9423 char *orig_buf = buf;
9424 u8 *arr; 9424 u8 *arr;
9425 int i; 9425 int i;
@@ -9452,7 +9452,7 @@ static ssize_t show_num_ports(struct device *dev,
9452 struct device_attribute *attr, char *buf) 9452 struct device_attribute *attr, char *buf)
9453{ 9453{
9454 struct platform_device *plat_dev = to_platform_device(dev); 9454 struct platform_device *plat_dev = to_platform_device(dev);
9455 struct niu_parent *p = plat_dev->dev.platform_data; 9455 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9456 9456
9457 return sprintf(buf, "%d\n", p->num_ports); 9457 return sprintf(buf, "%d\n", p->num_ports);
9458} 9458}
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d43fa9ff980..7217ee5d6273 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
1239 1239
1240static int bigmac_sbus_remove(struct platform_device *op) 1240static int bigmac_sbus_remove(struct platform_device *op)
1241{ 1241{
1242 struct bigmac *bp = dev_get_drvdata(&op->dev); 1242 struct bigmac *bp = platform_get_drvdata(op);
1243 struct device *parent = op->dev.parent; 1243 struct device *parent = op->dev.parent;
1244 struct net_device *net_dev = bp->dev; 1244 struct net_device *net_dev = bp->dev;
1245 struct platform_device *qec_op; 1245 struct platform_device *qec_op;
@@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
1259 1259
1260 free_netdev(net_dev); 1260 free_netdev(net_dev);
1261 1261
1262 dev_set_drvdata(&op->dev, NULL);
1263
1264 return 0; 1262 return 0;
1265} 1263}
1266 1264
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 171f5b0809c4..e37b587b3860 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2798,7 +2798,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2798 goto err_out_free_coherent; 2798 goto err_out_free_coherent;
2799 } 2799 }
2800 2800
2801 dev_set_drvdata(&op->dev, hp); 2801 platform_set_drvdata(op, hp);
2802 2802
2803 if (qfe_slot != -1) 2803 if (qfe_slot != -1)
2804 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", 2804 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
@@ -3111,7 +3111,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
3111 goto err_out_iounmap; 3111 goto err_out_iounmap;
3112 } 3112 }
3113 3113
3114 dev_set_drvdata(&pdev->dev, hp); 3114 pci_set_drvdata(pdev, hp);
3115 3115
3116 if (!qfe_slot) { 3116 if (!qfe_slot) {
3117 struct pci_dev *qpdev = qp->quattro_dev; 3117 struct pci_dev *qpdev = qp->quattro_dev;
@@ -3159,7 +3159,7 @@ err_out:
3159 3159
3160static void happy_meal_pci_remove(struct pci_dev *pdev) 3160static void happy_meal_pci_remove(struct pci_dev *pdev)
3161{ 3161{
3162 struct happy_meal *hp = dev_get_drvdata(&pdev->dev); 3162 struct happy_meal *hp = pci_get_drvdata(pdev);
3163 struct net_device *net_dev = hp->dev; 3163 struct net_device *net_dev = hp->dev;
3164 3164
3165 unregister_netdev(net_dev); 3165 unregister_netdev(net_dev);
@@ -3171,7 +3171,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
3171 3171
3172 free_netdev(net_dev); 3172 free_netdev(net_dev);
3173 3173
3174 dev_set_drvdata(&pdev->dev, NULL); 3174 pci_set_drvdata(pdev, NULL);
3175} 3175}
3176 3176
3177static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { 3177static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
@@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op)
3231 3231
3232static int hme_sbus_remove(struct platform_device *op) 3232static int hme_sbus_remove(struct platform_device *op)
3233{ 3233{
3234 struct happy_meal *hp = dev_get_drvdata(&op->dev); 3234 struct happy_meal *hp = platform_get_drvdata(op);
3235 struct net_device *net_dev = hp->dev; 3235 struct net_device *net_dev = hp->dev;
3236 3236
3237 unregister_netdev(net_dev); 3237 unregister_netdev(net_dev);
@@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op)
3250 3250
3251 free_netdev(net_dev); 3251 free_netdev(net_dev);
3252 3252
3253 dev_set_drvdata(&op->dev, NULL);
3254
3255 return 0; 3253 return 0;
3256} 3254}
3257 3255
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 31bbbca341a7..2dc16b6efaf0 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -636,7 +636,7 @@ static void cpmac_hw_stop(struct net_device *dev)
636{ 636{
637 int i; 637 int i;
638 struct cpmac_priv *priv = netdev_priv(dev); 638 struct cpmac_priv *priv = netdev_priv(dev);
639 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; 639 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
640 640
641 ar7_device_reset(pdata->reset_bit); 641 ar7_device_reset(pdata->reset_bit);
642 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 642 cpmac_write(priv->regs, CPMAC_RX_CONTROL,
@@ -659,7 +659,7 @@ static void cpmac_hw_start(struct net_device *dev)
659{ 659{
660 int i; 660 int i;
661 struct cpmac_priv *priv = netdev_priv(dev); 661 struct cpmac_priv *priv = netdev_priv(dev);
662 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; 662 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
663 663
664 ar7_device_reset(pdata->reset_bit); 664 ar7_device_reset(pdata->reset_bit);
665 for (i = 0; i < 8; i++) { 665 for (i = 0; i < 8; i++) {
@@ -1118,7 +1118,7 @@ static int cpmac_probe(struct platform_device *pdev)
1118 struct net_device *dev; 1118 struct net_device *dev;
1119 struct plat_cpmac_data *pdata; 1119 struct plat_cpmac_data *pdata;
1120 1120
1121 pdata = pdev->dev.platform_data; 1121 pdata = dev_get_platdata(&pdev->dev);
1122 1122
1123 if (external_switch || dumb_switch) { 1123 if (external_switch || dumb_switch) {
1124 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1124 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 22a7a4336211..79974e31187a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,9 +34,9 @@
34#include <linux/of_device.h> 34#include <linux/of_device.h>
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36 36
37#include <linux/platform_data/cpsw.h>
38#include <linux/pinctrl/consumer.h> 37#include <linux/pinctrl/consumer.h>
39 38
39#include "cpsw.h"
40#include "cpsw_ale.h" 40#include "cpsw_ale.h"
41#include "cpts.h" 41#include "cpts.h"
42#include "davinci_cpdma.h" 42#include "davinci_cpdma.h"
@@ -82,6 +82,8 @@ do { \
82 82
83#define CPSW_VERSION_1 0x19010a 83#define CPSW_VERSION_1 0x19010a
84#define CPSW_VERSION_2 0x19010c 84#define CPSW_VERSION_2 0x19010c
85#define CPSW_VERSION_3 0x19010f
86#define CPSW_VERSION_4 0x190112
85 87
86#define HOST_PORT_NUM 0 88#define HOST_PORT_NUM 0
87#define SLIVER_SIZE 0x40 89#define SLIVER_SIZE 0x40
@@ -91,6 +93,7 @@ do { \
91#define CPSW1_SLAVE_SIZE 0x040 93#define CPSW1_SLAVE_SIZE 0x040
92#define CPSW1_CPDMA_OFFSET 0x100 94#define CPSW1_CPDMA_OFFSET 0x100
93#define CPSW1_STATERAM_OFFSET 0x200 95#define CPSW1_STATERAM_OFFSET 0x200
96#define CPSW1_HW_STATS 0x400
94#define CPSW1_CPTS_OFFSET 0x500 97#define CPSW1_CPTS_OFFSET 0x500
95#define CPSW1_ALE_OFFSET 0x600 98#define CPSW1_ALE_OFFSET 0x600
96#define CPSW1_SLIVER_OFFSET 0x700 99#define CPSW1_SLIVER_OFFSET 0x700
@@ -99,6 +102,7 @@ do { \
99#define CPSW2_SLAVE_OFFSET 0x200 102#define CPSW2_SLAVE_OFFSET 0x200
100#define CPSW2_SLAVE_SIZE 0x100 103#define CPSW2_SLAVE_SIZE 0x100
101#define CPSW2_CPDMA_OFFSET 0x800 104#define CPSW2_CPDMA_OFFSET 0x800
105#define CPSW2_HW_STATS 0x900
102#define CPSW2_STATERAM_OFFSET 0xa00 106#define CPSW2_STATERAM_OFFSET 0xa00
103#define CPSW2_CPTS_OFFSET 0xc00 107#define CPSW2_CPTS_OFFSET 0xc00
104#define CPSW2_ALE_OFFSET 0xd00 108#define CPSW2_ALE_OFFSET 0xd00
@@ -299,6 +303,44 @@ struct cpsw_sliver_regs {
299 u32 rx_pri_map; 303 u32 rx_pri_map;
300}; 304};
301 305
306struct cpsw_hw_stats {
307 u32 rxgoodframes;
308 u32 rxbroadcastframes;
309 u32 rxmulticastframes;
310 u32 rxpauseframes;
311 u32 rxcrcerrors;
312 u32 rxaligncodeerrors;
313 u32 rxoversizedframes;
314 u32 rxjabberframes;
315 u32 rxundersizedframes;
316 u32 rxfragments;
317 u32 __pad_0[2];
318 u32 rxoctets;
319 u32 txgoodframes;
320 u32 txbroadcastframes;
321 u32 txmulticastframes;
322 u32 txpauseframes;
323 u32 txdeferredframes;
324 u32 txcollisionframes;
325 u32 txsinglecollframes;
326 u32 txmultcollframes;
327 u32 txexcessivecollisions;
328 u32 txlatecollisions;
329 u32 txunderrun;
330 u32 txcarriersenseerrors;
331 u32 txoctets;
332 u32 octetframes64;
333 u32 octetframes65t127;
334 u32 octetframes128t255;
335 u32 octetframes256t511;
336 u32 octetframes512t1023;
337 u32 octetframes1024tup;
338 u32 netoctets;
339 u32 rxsofoverruns;
340 u32 rxmofoverruns;
341 u32 rxdmaoverruns;
342};
343
302struct cpsw_slave { 344struct cpsw_slave {
303 void __iomem *regs; 345 void __iomem *regs;
304 struct cpsw_sliver_regs __iomem *sliver; 346 struct cpsw_sliver_regs __iomem *sliver;
@@ -332,6 +374,7 @@ struct cpsw_priv {
332 struct cpsw_platform_data data; 374 struct cpsw_platform_data data;
333 struct cpsw_ss_regs __iomem *regs; 375 struct cpsw_ss_regs __iomem *regs;
334 struct cpsw_wr_regs __iomem *wr_regs; 376 struct cpsw_wr_regs __iomem *wr_regs;
377 u8 __iomem *hw_stats;
335 struct cpsw_host_regs __iomem *host_port_regs; 378 struct cpsw_host_regs __iomem *host_port_regs;
336 u32 msg_enable; 379 u32 msg_enable;
337 u32 version; 380 u32 version;
@@ -354,6 +397,94 @@ struct cpsw_priv {
354 u32 emac_port; 397 u32 emac_port;
355}; 398};
356 399
400struct cpsw_stats {
401 char stat_string[ETH_GSTRING_LEN];
402 int type;
403 int sizeof_stat;
404 int stat_offset;
405};
406
407enum {
408 CPSW_STATS,
409 CPDMA_RX_STATS,
410 CPDMA_TX_STATS,
411};
412
413#define CPSW_STAT(m) CPSW_STATS, \
414 sizeof(((struct cpsw_hw_stats *)0)->m), \
415 offsetof(struct cpsw_hw_stats, m)
416#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
417 sizeof(((struct cpdma_chan_stats *)0)->m), \
418 offsetof(struct cpdma_chan_stats, m)
419#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
420 sizeof(((struct cpdma_chan_stats *)0)->m), \
421 offsetof(struct cpdma_chan_stats, m)
422
423static const struct cpsw_stats cpsw_gstrings_stats[] = {
424 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
425 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
426 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
427 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
428 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
429 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
430 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
431 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
432 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
433 { "Rx Fragments", CPSW_STAT(rxfragments) },
434 { "Rx Octets", CPSW_STAT(rxoctets) },
435 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
436 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
437 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
438 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
439 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
440 { "Collisions", CPSW_STAT(txcollisionframes) },
441 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
442 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
443 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
444 { "Late Collisions", CPSW_STAT(txlatecollisions) },
445 { "Tx Underrun", CPSW_STAT(txunderrun) },
446 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
447 { "Tx Octets", CPSW_STAT(txoctets) },
448 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
449 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
450 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
451 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
452 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
453 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
454 { "Net Octets", CPSW_STAT(netoctets) },
455 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
456 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
457 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
458 { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
459 { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
460 { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
461 { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
462 { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
463 { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
464 { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
465 { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
466 { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
467 { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
468 { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
469 { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
470 { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
471 { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
472 { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
473 { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
474 { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
475 { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
476 { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
477 { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
478 { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
479 { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
480 { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
481 { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
482 { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
483 { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
484};
485
486#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
487
357#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 488#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
358#define for_each_slave(priv, func, arg...) \ 489#define for_each_slave(priv, func, arg...) \
359 do { \ 490 do { \
@@ -723,6 +854,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
723 return 0; 854 return 0;
724} 855}
725 856
857static int cpsw_get_sset_count(struct net_device *ndev, int sset)
858{
859 switch (sset) {
860 case ETH_SS_STATS:
861 return CPSW_STATS_LEN;
862 default:
863 return -EOPNOTSUPP;
864 }
865}
866
867static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
868{
869 u8 *p = data;
870 int i;
871
872 switch (stringset) {
873 case ETH_SS_STATS:
874 for (i = 0; i < CPSW_STATS_LEN; i++) {
875 memcpy(p, cpsw_gstrings_stats[i].stat_string,
876 ETH_GSTRING_LEN);
877 p += ETH_GSTRING_LEN;
878 }
879 break;
880 }
881}
882
883static void cpsw_get_ethtool_stats(struct net_device *ndev,
884 struct ethtool_stats *stats, u64 *data)
885{
886 struct cpsw_priv *priv = netdev_priv(ndev);
887 struct cpdma_chan_stats rx_stats;
888 struct cpdma_chan_stats tx_stats;
889 u32 val;
890 u8 *p;
891 int i;
892
893 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
894 cpdma_chan_get_stats(priv->rxch, &rx_stats);
895 cpdma_chan_get_stats(priv->txch, &tx_stats);
896
897 for (i = 0; i < CPSW_STATS_LEN; i++) {
898 switch (cpsw_gstrings_stats[i].type) {
899 case CPSW_STATS:
900 val = readl(priv->hw_stats +
901 cpsw_gstrings_stats[i].stat_offset);
902 data[i] = val;
903 break;
904
905 case CPDMA_RX_STATS:
906 p = (u8 *)&rx_stats +
907 cpsw_gstrings_stats[i].stat_offset;
908 data[i] = *(u32 *)p;
909 break;
910
911 case CPDMA_TX_STATS:
912 p = (u8 *)&tx_stats +
913 cpsw_gstrings_stats[i].stat_offset;
914 data[i] = *(u32 *)p;
915 break;
916 }
917 }
918}
919
726static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 920static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
727{ 921{
728 static char *leader = "........................................"; 922 static char *leader = "........................................";
@@ -799,6 +993,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
799 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 993 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
800 break; 994 break;
801 case CPSW_VERSION_2: 995 case CPSW_VERSION_2:
996 case CPSW_VERSION_3:
997 case CPSW_VERSION_4:
802 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 998 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
803 break; 999 break;
804 } 1000 }
@@ -1232,6 +1428,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1232 1428
1233} 1429}
1234 1430
1431static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1432{
1433 struct cpsw_priv *priv = netdev_priv(ndev);
1434 struct sockaddr *addr = (struct sockaddr *)p;
1435 int flags = 0;
1436 u16 vid = 0;
1437
1438 if (!is_valid_ether_addr(addr->sa_data))
1439 return -EADDRNOTAVAIL;
1440
1441 if (priv->data.dual_emac) {
1442 vid = priv->slaves[priv->emac_port].port_vlan;
1443 flags = ALE_VLAN;
1444 }
1445
1446 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
1447 flags, vid);
1448 cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
1449 flags, vid);
1450
1451 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1452 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1453 for_each_slave(priv, cpsw_set_slave_mac, priv);
1454
1455 return 0;
1456}
1457
1235static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 1458static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
1236{ 1459{
1237 struct cpsw_priv *priv = netdev_priv(ndev); 1460 struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1326,6 +1549,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
1326 .ndo_stop = cpsw_ndo_stop, 1549 .ndo_stop = cpsw_ndo_stop,
1327 .ndo_start_xmit = cpsw_ndo_start_xmit, 1550 .ndo_start_xmit = cpsw_ndo_start_xmit,
1328 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 1551 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
1552 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
1329 .ndo_do_ioctl = cpsw_ndo_ioctl, 1553 .ndo_do_ioctl = cpsw_ndo_ioctl,
1330 .ndo_validate_addr = eth_validate_addr, 1554 .ndo_validate_addr = eth_validate_addr,
1331 .ndo_change_mtu = eth_change_mtu, 1555 .ndo_change_mtu = eth_change_mtu,
@@ -1416,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1416 return -EOPNOTSUPP; 1640 return -EOPNOTSUPP;
1417} 1641}
1418 1642
1643static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1644{
1645 struct cpsw_priv *priv = netdev_priv(ndev);
1646 int slave_no = cpsw_slave_index(priv);
1647
1648 wol->supported = 0;
1649 wol->wolopts = 0;
1650
1651 if (priv->slaves[slave_no].phy)
1652 phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
1653}
1654
1655static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1656{
1657 struct cpsw_priv *priv = netdev_priv(ndev);
1658 int slave_no = cpsw_slave_index(priv);
1659
1660 if (priv->slaves[slave_no].phy)
1661 return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
1662 else
1663 return -EOPNOTSUPP;
1664}
1665
1419static const struct ethtool_ops cpsw_ethtool_ops = { 1666static const struct ethtool_ops cpsw_ethtool_ops = {
1420 .get_drvinfo = cpsw_get_drvinfo, 1667 .get_drvinfo = cpsw_get_drvinfo,
1421 .get_msglevel = cpsw_get_msglevel, 1668 .get_msglevel = cpsw_get_msglevel,
@@ -1426,6 +1673,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
1426 .set_settings = cpsw_set_settings, 1673 .set_settings = cpsw_set_settings,
1427 .get_coalesce = cpsw_get_coalesce, 1674 .get_coalesce = cpsw_get_coalesce,
1428 .set_coalesce = cpsw_set_coalesce, 1675 .set_coalesce = cpsw_set_coalesce,
1676 .get_sset_count = cpsw_get_sset_count,
1677 .get_strings = cpsw_get_strings,
1678 .get_ethtool_stats = cpsw_get_ethtool_stats,
1679 .get_wol = cpsw_get_wol,
1680 .set_wol = cpsw_set_wol,
1429}; 1681};
1430 1682
1431static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1683static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1623,6 +1875,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1623 priv_sl2->host_port = priv->host_port; 1875 priv_sl2->host_port = priv->host_port;
1624 priv_sl2->host_port_regs = priv->host_port_regs; 1876 priv_sl2->host_port_regs = priv->host_port_regs;
1625 priv_sl2->wr_regs = priv->wr_regs; 1877 priv_sl2->wr_regs = priv->wr_regs;
1878 priv_sl2->hw_stats = priv->hw_stats;
1626 priv_sl2->dma = priv->dma; 1879 priv_sl2->dma = priv->dma;
1627 priv_sl2->txch = priv->txch; 1880 priv_sl2->txch = priv->txch;
1628 priv_sl2->rxch = priv->rxch; 1881 priv_sl2->rxch = priv->rxch;
@@ -1780,7 +2033,8 @@ static int cpsw_probe(struct platform_device *pdev)
1780 switch (priv->version) { 2033 switch (priv->version) {
1781 case CPSW_VERSION_1: 2034 case CPSW_VERSION_1:
1782 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; 2035 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
1783 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; 2036 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
2037 priv->hw_stats = ss_regs + CPSW1_HW_STATS;
1784 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; 2038 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
1785 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; 2039 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
1786 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; 2040 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1790,8 +2044,11 @@ static int cpsw_probe(struct platform_device *pdev)
1790 dma_params.desc_mem_phys = 0; 2044 dma_params.desc_mem_phys = 0;
1791 break; 2045 break;
1792 case CPSW_VERSION_2: 2046 case CPSW_VERSION_2:
2047 case CPSW_VERSION_3:
2048 case CPSW_VERSION_4:
1793 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; 2049 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
1794 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; 2050 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
2051 priv->hw_stats = ss_regs + CPSW2_HW_STATS;
1795 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; 2052 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
1796 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; 2053 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
1797 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; 2054 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
new file mode 100644
index 000000000000..eb3e101ec048
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -0,0 +1,42 @@
1/* Texas Instruments Ethernet Switch Driver
2 *
3 * Copyright (C) 2013 Texas Instruments
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef __CPSW_H__
15#define __CPSW_H__
16
17#include <linux/if_ether.h>
18
19struct cpsw_slave_data {
20 char phy_id[MII_BUS_ID_SIZE];
21 int phy_if;
22 u8 mac_addr[ETH_ALEN];
23 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
24};
25
26struct cpsw_platform_data {
27 struct cpsw_slave_data *slave_data;
28 u32 ss_reg_ofs; /* Subsystem control register offset */
29 u32 channels; /* number of cpdma channels (symmetric) */
30 u32 slaves; /* number of slave cpgmac ports */
31 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
32 u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
33 u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
34 u32 ale_entries; /* ale table size */
35 u32 bd_ram_size; /*buffer descriptor ram size */
36 u32 rx_descs; /* Number of Rx Descriptios */
37 u32 mac_control; /* Mac control register */
38 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
39 bool dual_emac; /* Enable Dual EMAC mode */
40};
41
42#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 031ebc81b50c..90a79462c869 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -591,6 +591,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
591 spin_unlock_irqrestore(&chan->lock, flags); 591 spin_unlock_irqrestore(&chan->lock, flags);
592 return 0; 592 return 0;
593} 593}
594EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
594 595
595int cpdma_chan_dump(struct cpdma_chan *chan) 596int cpdma_chan_dump(struct cpdma_chan *chan)
596{ 597{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 1a222bce4bd7..67df09ea9d04 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1761,7 +1761,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1761 const u8 *mac_addr; 1761 const u8 *mac_addr;
1762 1762
1763 if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) 1763 if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
1764 return pdev->dev.platform_data; 1764 return dev_get_platdata(&pdev->dev);
1765 1765
1766 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1766 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1767 if (!pdata) 1767 if (!pdata)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 16ddfc348062..4ec92659a100 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -314,7 +314,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
314 314
315static int davinci_mdio_probe(struct platform_device *pdev) 315static int davinci_mdio_probe(struct platform_device *pdev)
316{ 316{
317 struct mdio_platform_data *pdata = pdev->dev.platform_data; 317 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
318 struct device *dev = &pdev->dev; 318 struct device *dev = &pdev->dev;
319 struct davinci_mdio_data *data; 319 struct davinci_mdio_data *data;
320 struct resource *res; 320 struct resource *res;
@@ -421,8 +421,7 @@ bail_out:
421 421
422static int davinci_mdio_remove(struct platform_device *pdev) 422static int davinci_mdio_remove(struct platform_device *pdev)
423{ 423{
424 struct device *dev = &pdev->dev; 424 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
425 struct davinci_mdio_data *data = dev_get_drvdata(dev);
426 425
427 if (data->bus) { 426 if (data->bus) {
428 mdiobus_unregister(data->bus); 427 mdiobus_unregister(data->bus);
@@ -434,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
434 pm_runtime_put_sync(&pdev->dev); 433 pm_runtime_put_sync(&pdev->dev);
435 pm_runtime_disable(&pdev->dev); 434 pm_runtime_disable(&pdev->dev);
436 435
437 dev_set_drvdata(dev, NULL);
438
439 kfree(data); 436 kfree(data);
440 437
441 return 0; 438 return 0;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 098b1c42b393..4083ba8839e1 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -15,3 +15,14 @@ config TILE_NET
15 15
16 To compile this driver as a module, choose M here: the module 16 To compile this driver as a module, choose M here: the module
17 will be called tile_net. 17 will be called tile_net.
18
19config PTP_1588_CLOCK_TILEGX
20 tristate "Tilera TILE-Gx mPIPE as PTP clock"
21 select PTP_1588_CLOCK
22 depends on TILE_NET
23 depends on TILEGX
24 ---help---
25 This driver adds support for using the mPIPE as a PTP
26 clock. This clock is only useful if your PTP programs are
27 getting hardware time stamps on the PTP Ethernet packets
28 using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index f3c2d034b32c..949076f4e6ae 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -36,7 +36,10 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/ctype.h> 37#include <linux/ctype.h>
38#include <linux/ip.h> 38#include <linux/ip.h>
39#include <linux/ipv6.h>
39#include <linux/tcp.h> 40#include <linux/tcp.h>
41#include <linux/net_tstamp.h>
42#include <linux/ptp_clock_kernel.h>
40 43
41#include <asm/checksum.h> 44#include <asm/checksum.h>
42#include <asm/homecache.h> 45#include <asm/homecache.h>
@@ -76,6 +79,9 @@
76 79
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1) 80#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78 81
82/* The "kinds" of buffer stacks (small/large/jumbo). */
83#define MAX_KINDS 3
84
79/* Size of completions data to allocate. 85/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels. 86 * ISSUE: Probably more than needed since we don't use all the channels.
81 */ 87 */
@@ -130,29 +136,31 @@ struct tile_net_tx_wake {
130 136
131/* Info for a specific cpu. */ 137/* Info for a specific cpu. */
132struct tile_net_info { 138struct tile_net_info {
133 /* The NAPI struct. */
134 struct napi_struct napi;
135 /* Packet queue. */
136 gxio_mpipe_iqueue_t iqueue;
137 /* Our cpu. */ 139 /* Our cpu. */
138 int my_cpu; 140 int my_cpu;
139 /* True if iqueue is valid. */
140 bool has_iqueue;
141 /* NAPI flags. */
142 bool napi_added;
143 bool napi_enabled;
144 /* Number of small sk_buffs which must still be provided. */
145 unsigned int num_needed_small_buffers;
146 /* Number of large sk_buffs which must still be provided. */
147 unsigned int num_needed_large_buffers;
148 /* A timer for handling egress completions. */ 141 /* A timer for handling egress completions. */
149 struct hrtimer egress_timer; 142 struct hrtimer egress_timer;
150 /* True if "egress_timer" is scheduled. */ 143 /* True if "egress_timer" is scheduled. */
151 bool egress_timer_scheduled; 144 bool egress_timer_scheduled;
152 /* Comps for each egress channel. */ 145 struct info_mpipe {
153 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; 146 /* Packet queue. */
154 /* Transmit wake timer for each egress channel. */ 147 gxio_mpipe_iqueue_t iqueue;
155 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; 148 /* The NAPI struct. */
149 struct napi_struct napi;
150 /* Number of buffers (by kind) which must still be provided. */
151 unsigned int num_needed_buffers[MAX_KINDS];
152 /* instance id. */
153 int instance;
154 /* True if iqueue is valid. */
155 bool has_iqueue;
156 /* NAPI flags. */
157 bool napi_added;
158 bool napi_enabled;
159 /* Comps for each egress channel. */
160 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
161 /* Transmit wake timer for each egress channel. */
162 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
163 } mpipe[NR_MPIPE_MAX];
156}; 164};
157 165
158/* Info for egress on a particular egress channel. */ 166/* Info for egress on a particular egress channel. */
@@ -177,19 +185,67 @@ struct tile_net_priv {
177 int loopify_channel; 185 int loopify_channel;
178 /* The egress channel (channel or loopify_channel). */ 186 /* The egress channel (channel or loopify_channel). */
179 int echannel; 187 int echannel;
180 /* Total stats. */ 188 /* mPIPE instance, 0 or 1. */
181 struct net_device_stats stats; 189 int instance;
190#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
191 /* The timestamp config. */
192 struct hwtstamp_config stamp_cfg;
193#endif
182}; 194};
183 195
184/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ 196static struct mpipe_data {
185static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; 197 /* The ingress irq. */
198 int ingress_irq;
186 199
187/* Devices currently associated with each channel. 200 /* The "context" for all devices. */
188 * NOTE: The array entry can become NULL after ifconfig down, but 201 gxio_mpipe_context_t context;
189 * we do not free the underlying net_device structures, so it is 202
190 * safe to use a pointer after reading it from this array. 203 /* Egress info, indexed by "priv->echannel"
191 */ 204 * (lazily created as needed).
192static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; 205 */
206 struct tile_net_egress
207 egress_for_echannel[TILE_NET_CHANNELS];
208
209 /* Devices currently associated with each channel.
210 * NOTE: The array entry can become NULL after ifconfig down, but
211 * we do not free the underlying net_device structures, so it is
212 * safe to use a pointer after reading it from this array.
213 */
214 struct net_device
215 *tile_net_devs_for_channel[TILE_NET_CHANNELS];
216
217 /* The actual memory allocated for the buffer stacks. */
218 void *buffer_stack_vas[MAX_KINDS];
219
220 /* The amount of memory allocated for each buffer stack. */
221 size_t buffer_stack_bytes[MAX_KINDS];
222
223 /* The first buffer stack index
224 * (small = +0, large = +1, jumbo = +2).
225 */
226 int first_buffer_stack;
227
228 /* The buckets. */
229 int first_bucket;
230 int num_buckets;
231
232#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
233 /* PTP-specific data. */
234 struct ptp_clock *ptp_clock;
235 struct ptp_clock_info caps;
236
237 /* Lock for ptp accessors. */
238 struct mutex ptp_lock;
239#endif
240
241} mpipe_data[NR_MPIPE_MAX] = {
242 [0 ... (NR_MPIPE_MAX - 1)] {
243 .ingress_irq = -1,
244 .first_buffer_stack = -1,
245 .first_bucket = -1,
246 .num_buckets = 1
247 }
248};
193 249
194/* A mutex for "tile_net_devs_for_channel". */ 250/* A mutex for "tile_net_devs_for_channel". */
195static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); 251static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
197/* The per-cpu info. */ 253/* The per-cpu info. */
198static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); 254static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
199 255
200/* The "context" for all devices. */
201static gxio_mpipe_context_t context;
202 256
203/* Buffer sizes and mpipe enum codes for buffer stacks. 257/* The buffer size enums for each buffer stack.
204 * See arch/tile/include/gxio/mpipe.h for the set of possible values. 258 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
259 * We avoid the "10384" size because it can induce "false chaining"
260 * on "cut-through" jumbo packets.
205 */ 261 */
206#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 262static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
207#define BUFFER_SIZE_SMALL 128 263 GXIO_MPIPE_BUFFER_SIZE_128,
208#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 264 GXIO_MPIPE_BUFFER_SIZE_1664,
209#define BUFFER_SIZE_LARGE 1664 265 GXIO_MPIPE_BUFFER_SIZE_16384
210 266};
211/* The small/large "buffer stacks". */
212static int small_buffer_stack = -1;
213static int large_buffer_stack = -1;
214
215/* Amount of memory allocated for each buffer stack. */
216static size_t buffer_stack_size;
217
218/* The actual memory allocated for the buffer stacks. */
219static void *small_buffer_stack_va;
220static void *large_buffer_stack_va;
221
222/* The buckets. */
223static int first_bucket = -1;
224static int num_buckets = 1;
225
226/* The ingress irq. */
227static int ingress_irq = -1;
228 267
229/* Text value of tile_net.cpus if passed as a module parameter. */ 268/* Text value of tile_net.cpus if passed as a module parameter. */
230static char *network_cpus_string; 269static char *network_cpus_string;
@@ -232,11 +271,21 @@ static char *network_cpus_string;
232/* The actual cpus in "network_cpus". */ 271/* The actual cpus in "network_cpus". */
233static struct cpumask network_cpus_map; 272static struct cpumask network_cpus_map;
234 273
235/* If "loopify=LINK" was specified, this is "LINK". */ 274/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
236static char *loopify_link_name; 275static char *loopify_link_name;
237 276
238/* If "tile_net.custom" was specified, this is non-NULL. */ 277/* If "tile_net.custom" was specified, this is true. */
239static char *custom_str; 278static bool custom_flag;
279
280/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
281static uint jumbo_num;
282
283/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
284static inline int mpipe_instance(struct net_device *dev)
285{
286 struct tile_net_priv *priv = netdev_priv(dev);
287 return priv->instance;
288}
240 289
241/* The "tile_net.cpus" argument specifies the cpus that are dedicated 290/* The "tile_net.cpus" argument specifies the cpus that are dedicated
242 * to handle ingress packets. 291 * to handle ingress packets.
@@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
289/* The "tile_net.custom" argument causes us to ignore the "conventional" 338/* The "tile_net.custom" argument causes us to ignore the "conventional"
290 * classifier metadata, in particular, the "l2_offset". 339 * classifier metadata, in particular, the "l2_offset".
291 */ 340 */
292module_param_named(custom, custom_str, charp, 0444); 341module_param_named(custom, custom_flag, bool, 0444);
293MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); 342MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
294 343
344/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
345 * and to allocate the given number of "jumbo" buffers.
346 */
347module_param_named(jumbo, jumbo_num, uint, 0444);
348MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
349
295/* Atomically update a statistics field. 350/* Atomically update a statistics field.
296 * Note that on TILE-Gx, this operation is fire-and-forget on the 351 * Note that on TILE-Gx, this operation is fire-and-forget on the
297 * issuing core (single-cycle dispatch) and takes only a few cycles 352 * issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
305} 360}
306 361
307/* Allocate and push a buffer. */ 362/* Allocate and push a buffer. */
308static bool tile_net_provide_buffer(bool small) 363static bool tile_net_provide_buffer(int instance, int kind)
309{ 364{
310 int stack = small ? small_buffer_stack : large_buffer_stack; 365 struct mpipe_data *md = &mpipe_data[instance];
366 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
367 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
311 const unsigned long buffer_alignment = 128; 368 const unsigned long buffer_alignment = 128;
312 struct sk_buff *skb; 369 struct sk_buff *skb;
313 int len; 370 int len;
314 371
315 len = sizeof(struct sk_buff **) + buffer_alignment; 372 len = sizeof(struct sk_buff **) + buffer_alignment + bs;
316 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
317 skb = dev_alloc_skb(len); 373 skb = dev_alloc_skb(len);
318 if (skb == NULL) 374 if (skb == NULL)
319 return false; 375 return false;
@@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small)
328 /* Make sure "skb" and the back-pointer have been flushed. */ 384 /* Make sure "skb" and the back-pointer have been flushed. */
329 wmb(); 385 wmb();
330 386
331 gxio_mpipe_push_buffer(&context, stack, 387 gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
332 (void *)va_to_tile_io_addr(skb->data)); 388 (void *)va_to_tile_io_addr(skb->data));
333 389
334 return true; 390 return true;
@@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
354 return skb; 410 return skb;
355} 411}
356 412
357static void tile_net_pop_all_buffers(int stack) 413static void tile_net_pop_all_buffers(int instance, int stack)
358{ 414{
415 struct mpipe_data *md = &mpipe_data[instance];
416
359 for (;;) { 417 for (;;) {
360 tile_io_addr_t addr = 418 tile_io_addr_t addr =
361 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); 419 (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
420 stack);
362 if (addr == 0) 421 if (addr == 0)
363 break; 422 break;
364 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); 423 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack)
369static void tile_net_provide_needed_buffers(void) 428static void tile_net_provide_needed_buffers(void)
370{ 429{
371 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 430 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
431 int instance, kind;
432 for (instance = 0; instance < NR_MPIPE_MAX &&
433 info->mpipe[instance].has_iqueue; instance++) {
434 for (kind = 0; kind < MAX_KINDS; kind++) {
435 while (info->mpipe[instance].num_needed_buffers[kind]
436 != 0) {
437 if (!tile_net_provide_buffer(instance, kind)) {
438 pr_notice("Tile %d still needs"
439 " some buffers\n",
440 info->my_cpu);
441 return;
442 }
443 info->mpipe[instance].
444 num_needed_buffers[kind]--;
445 }
446 }
447 }
448}
372 449
373 while (info->num_needed_small_buffers != 0) { 450/* Get RX timestamp, and store it in the skb. */
374 if (!tile_net_provide_buffer(true)) 451static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
375 goto oops; 452 gxio_mpipe_idesc_t *idesc)
376 info->num_needed_small_buffers--; 453{
454#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
455 if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
456 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
457 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
458 shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
459 idesc->time_stamp_ns);
377 } 460 }
461#endif
462}
378 463
379 while (info->num_needed_large_buffers != 0) { 464/* Get TX timestamp, and store it in the skb. */
380 if (!tile_net_provide_buffer(false)) 465static void tile_tx_timestamp(struct sk_buff *skb, int instance)
381 goto oops; 466{
382 info->num_needed_large_buffers--; 467#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
468 struct skb_shared_info *shtx = skb_shinfo(skb);
469 if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
470 struct mpipe_data *md = &mpipe_data[instance];
471 struct skb_shared_hwtstamps shhwtstamps;
472 struct timespec ts;
473
474 shtx->tx_flags |= SKBTX_IN_PROGRESS;
475 gxio_mpipe_get_timestamp(&md->context, &ts);
476 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
477 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
478 skb_tstamp_tx(skb, &shhwtstamps);
383 } 479 }
480#endif
481}
384 482
385 return; 483/* Use ioctl() to enable or disable TX or RX timestamping. */
484static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
485 int cmd)
486{
487#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
488 struct hwtstamp_config config;
489 struct tile_net_priv *priv = netdev_priv(dev);
386 490
387oops: 491 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
388 /* Add a description to the page allocation failure dump. */ 492 return -EFAULT;
389 pr_notice("Tile %d still needs some buffers\n", info->my_cpu); 493
494 if (config.flags) /* reserved for future extensions */
495 return -EINVAL;
496
497 switch (config.tx_type) {
498 case HWTSTAMP_TX_OFF:
499 case HWTSTAMP_TX_ON:
500 break;
501 default:
502 return -ERANGE;
503 }
504
505 switch (config.rx_filter) {
506 case HWTSTAMP_FILTER_NONE:
507 break;
508 case HWTSTAMP_FILTER_ALL:
509 case HWTSTAMP_FILTER_SOME:
510 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
511 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
512 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
513 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
514 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
515 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
516 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
517 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
518 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
519 case HWTSTAMP_FILTER_PTP_V2_EVENT:
520 case HWTSTAMP_FILTER_PTP_V2_SYNC:
521 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
522 config.rx_filter = HWTSTAMP_FILTER_ALL;
523 break;
524 default:
525 return -ERANGE;
526 }
527
528 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
529 return -EFAULT;
530
531 priv->stamp_cfg = config;
532 return 0;
533#else
534 return -EOPNOTSUPP;
535#endif
390} 536}
391 537
392static inline bool filter_packet(struct net_device *dev, void *buf) 538static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -398,7 +544,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf)
398 /* Filter out packets that aren't for us. */ 544 /* Filter out packets that aren't for us. */
399 if (!(dev->flags & IFF_PROMISC) && 545 if (!(dev->flags & IFF_PROMISC) &&
400 !is_multicast_ether_addr(buf) && 546 !is_multicast_ether_addr(buf) &&
401 compare_ether_addr(dev->dev_addr, buf) != 0) 547 !ether_addr_equal(dev->dev_addr, buf))
402 return true; 548 return true;
403 549
404 return false; 550 return false;
@@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
409{ 555{
410 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 556 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
411 struct tile_net_priv *priv = netdev_priv(dev); 557 struct tile_net_priv *priv = netdev_priv(dev);
558 int instance = priv->instance;
412 559
413 /* Encode the actual packet length. */ 560 /* Encode the actual packet length. */
414 skb_put(skb, len); 561 skb_put(skb, len);
@@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
419 if (idesc->cs && idesc->csum_seed_val == 0xFFFF) 566 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
420 skb->ip_summed = CHECKSUM_UNNECESSARY; 567 skb->ip_summed = CHECKSUM_UNNECESSARY;
421 568
422 netif_receive_skb(skb); 569 /* Get RX timestamp from idesc. */
570 tile_rx_timestamp(priv, skb, idesc);
571
572 napi_gro_receive(&info->mpipe[instance].napi, skb);
423 573
424 /* Update stats. */ 574 /* Update stats. */
425 tile_net_stats_add(1, &priv->stats.rx_packets); 575 tile_net_stats_add(1, &dev->stats.rx_packets);
426 tile_net_stats_add(len, &priv->stats.rx_bytes); 576 tile_net_stats_add(len, &dev->stats.rx_bytes);
427 577
428 /* Need a new buffer. */ 578 /* Need a new buffer. */
429 if (idesc->size == BUFFER_SIZE_SMALL_ENUM) 579 if (idesc->size == buffer_size_enums[0])
430 info->num_needed_small_buffers++; 580 info->mpipe[instance].num_needed_buffers[0]++;
581 else if (idesc->size == buffer_size_enums[1])
582 info->mpipe[instance].num_needed_buffers[1]++;
431 else 583 else
432 info->num_needed_large_buffers++; 584 info->mpipe[instance].num_needed_buffers[2]++;
433} 585}
434 586
435/* Handle a packet. Return true if "processed", false if "filtered". */ 587/* Handle a packet. Return true if "processed", false if "filtered". */
436static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) 588static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
437{ 589{
438 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 590 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
439 struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; 591 struct mpipe_data *md = &mpipe_data[instance];
592 struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
440 uint8_t l2_offset; 593 uint8_t l2_offset;
441 void *va; 594 void *va;
442 void *buf; 595 void *buf;
443 unsigned long len; 596 unsigned long len;
444 bool filter; 597 bool filter;
445 598
446 /* Drop packets for which no buffer was available. 599 /* Drop packets for which no buffer was available (which can
447 * NOTE: This happens under heavy load. 600 * happen under heavy load), or for which the me/tr/ce flags
601 * are set (which can happen for jumbo cut-through packets,
602 * or with a customized classifier).
448 */ 603 */
449 if (idesc->be) { 604 if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
450 struct tile_net_priv *priv = netdev_priv(dev); 605 if (dev)
451 tile_net_stats_add(1, &priv->stats.rx_dropped); 606 tile_net_stats_add(1, &dev->stats.rx_errors);
452 gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 607 goto drop;
453 if (net_ratelimit())
454 pr_info("Dropping packet (insufficient buffers).\n");
455 return false;
456 } 608 }
457 609
458 /* Get the "l2_offset", if allowed. */ 610 /* Get the "l2_offset", if allowed. */
459 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); 611 l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
460 612
461 /* Get the raw buffer VA (includes "headroom"). */ 613 /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
462 va = tile_io_addr_to_va((unsigned long)(long)idesc->va); 614 va = tile_io_addr_to_va((unsigned long)idesc->va);
463 615
464 /* Get the actual packet start/length. */ 616 /* Get the actual packet start/length. */
465 buf = va + l2_offset; 617 buf = va + l2_offset;
@@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
470 622
471 filter = filter_packet(dev, buf); 623 filter = filter_packet(dev, buf);
472 if (filter) { 624 if (filter) {
473 gxio_mpipe_iqueue_drop(&info->iqueue, idesc); 625 if (dev)
626 tile_net_stats_add(1, &dev->stats.rx_dropped);
627drop:
628 gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
474 } else { 629 } else {
475 struct sk_buff *skb = mpipe_buf_to_skb(va); 630 struct sk_buff *skb = mpipe_buf_to_skb(va);
476 631
@@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
480 tile_net_receive_skb(dev, skb, idesc, len); 635 tile_net_receive_skb(dev, skb, idesc, len);
481 } 636 }
482 637
483 gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 638 gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
484 return !filter; 639 return !filter;
485} 640}
486 641
@@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
501 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 656 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
502 unsigned int work = 0; 657 unsigned int work = 0;
503 gxio_mpipe_idesc_t *idesc; 658 gxio_mpipe_idesc_t *idesc;
504 int i, n; 659 int instance, i, n;
505 660 struct mpipe_data *md;
506 /* Process packets. */ 661 struct info_mpipe *info_mpipe =
507 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { 662 container_of(napi, struct info_mpipe, napi);
663
664 instance = info_mpipe->instance;
665 while ((n = gxio_mpipe_iqueue_try_peek(
666 &info_mpipe->iqueue,
667 &idesc)) > 0) {
508 for (i = 0; i < n; i++) { 668 for (i = 0; i < n; i++) {
509 if (i == TILE_NET_BATCH) 669 if (i == TILE_NET_BATCH)
510 goto done; 670 goto done;
511 if (tile_net_handle_packet(idesc + i)) { 671 if (tile_net_handle_packet(instance,
672 idesc + i)) {
512 if (++work >= budget) 673 if (++work >= budget)
513 goto done; 674 goto done;
514 } 675 }
@@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
516 } 677 }
517 678
518 /* There are no packets left. */ 679 /* There are no packets left. */
519 napi_complete(&info->napi); 680 napi_complete(&info_mpipe->napi);
520 681
682 md = &mpipe_data[instance];
521 /* Re-enable hypervisor interrupts. */ 683 /* Re-enable hypervisor interrupts. */
522 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); 684 gxio_mpipe_enable_notif_ring_interrupt(
685 &md->context, info->mpipe[instance].iqueue.ring);
523 686
524 /* HACK: Avoid the "rotting packet" problem. */ 687 /* HACK: Avoid the "rotting packet" problem. */
525 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) 688 if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
526 napi_schedule(&info->napi); 689 napi_schedule(&info_mpipe->napi);
527 690
528 /* ISSUE: Handle completions? */ 691 /* ISSUE: Handle completions? */
529 692
@@ -533,11 +696,11 @@ done:
533 return work; 696 return work;
534} 697}
535 698
536/* Handle an ingress interrupt on the current cpu. */ 699/* Handle an ingress interrupt from an instance on the current cpu. */
537static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) 700static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
538{ 701{
539 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 702 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
540 napi_schedule(&info->napi); 703 napi_schedule(&info->mpipe[(uint64_t)id].napi);
541 return IRQ_HANDLED; 704 return IRQ_HANDLED;
542} 705}
543 706
@@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
579{ 742{
580 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); 743 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
581 struct tile_net_priv *priv = netdev_priv(dev); 744 struct tile_net_priv *priv = netdev_priv(dev);
582 struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; 745 int instance = priv->instance;
746 struct tile_net_tx_wake *tx_wake =
747 &info->mpipe[instance].tx_wake[priv->echannel];
583 748
584 hrtimer_start(&tx_wake->timer, 749 hrtimer_start(&tx_wake->timer,
585 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 750 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
617 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 782 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
618 unsigned long irqflags; 783 unsigned long irqflags;
619 bool pending = false; 784 bool pending = false;
620 int i; 785 int i, instance;
621 786
622 local_irq_save(irqflags); 787 local_irq_save(irqflags);
623 788
@@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
625 info->egress_timer_scheduled = false; 790 info->egress_timer_scheduled = false;
626 791
627 /* Free all possible comps for this tile. */ 792 /* Free all possible comps for this tile. */
628 for (i = 0; i < TILE_NET_CHANNELS; i++) { 793 for (instance = 0; instance < NR_MPIPE_MAX &&
629 struct tile_net_egress *egress = &egress_for_echannel[i]; 794 info->mpipe[instance].has_iqueue; instance++) {
630 struct tile_net_comps *comps = info->comps_for_echannel[i]; 795 for (i = 0; i < TILE_NET_CHANNELS; i++) {
631 if (comps->comp_last >= comps->comp_next) 796 struct tile_net_egress *egress =
632 continue; 797 &mpipe_data[instance].egress_for_echannel[i];
633 tile_net_free_comps(egress->equeue, comps, -1, true); 798 struct tile_net_comps *comps =
634 pending = pending || (comps->comp_last < comps->comp_next); 799 info->mpipe[instance].comps_for_echannel[i];
800 if (!egress || comps->comp_last >= comps->comp_next)
801 continue;
802 tile_net_free_comps(egress->equeue, comps, -1, true);
803 pending = pending ||
804 (comps->comp_last < comps->comp_next);
805 }
635 } 806 }
636 807
637 /* Reschedule timer if needed. */ 808 /* Reschedule timer if needed. */
@@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
643 return HRTIMER_NORESTART; 814 return HRTIMER_NORESTART;
644} 815}
645 816
646/* Helper function for "tile_net_update()". 817#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
647 * "dev" (i.e. arg) is the device being brought up or down, 818
648 * or NULL if all devices are now down. 819/* PTP clock operations. */
649 */ 820
650static void tile_net_update_cpu(void *arg) 821static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
651{ 822{
652 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 823 int ret = 0;
653 struct net_device *dev = arg; 824 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
825 mutex_lock(&md->ptp_lock);
826 if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
827 ret = -EINVAL;
828 mutex_unlock(&md->ptp_lock);
829 return ret;
830}
654 831
655 if (!info->has_iqueue) 832static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
656 return; 833{
834 int ret = 0;
835 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
836 mutex_lock(&md->ptp_lock);
837 if (gxio_mpipe_adjust_timestamp(&md->context, delta))
838 ret = -EBUSY;
839 mutex_unlock(&md->ptp_lock);
840 return ret;
841}
657 842
658 if (dev != NULL) { 843static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
659 if (!info->napi_added) { 844{
660 netif_napi_add(dev, &info->napi, 845 int ret = 0;
661 tile_net_poll, TILE_NET_WEIGHT); 846 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
662 info->napi_added = true; 847 mutex_lock(&md->ptp_lock);
663 } 848 if (gxio_mpipe_get_timestamp(&md->context, ts))
664 if (!info->napi_enabled) { 849 ret = -EBUSY;
665 napi_enable(&info->napi); 850 mutex_unlock(&md->ptp_lock);
666 info->napi_enabled = true; 851 return ret;
667 } 852}
668 enable_percpu_irq(ingress_irq, 0); 853
669 } else { 854static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
670 disable_percpu_irq(ingress_irq); 855 const struct timespec *ts)
671 if (info->napi_enabled) { 856{
672 napi_disable(&info->napi); 857 int ret = 0;
673 info->napi_enabled = false; 858 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
674 } 859 mutex_lock(&md->ptp_lock);
675 /* FIXME: Drain the iqueue. */ 860 if (gxio_mpipe_set_timestamp(&md->context, ts))
676 } 861 ret = -EBUSY;
862 mutex_unlock(&md->ptp_lock);
863 return ret;
864}
865
866static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
867 struct ptp_clock_request *request, int on)
868{
869 return -EOPNOTSUPP;
870}
871
872static struct ptp_clock_info ptp_mpipe_caps = {
873 .owner = THIS_MODULE,
874 .name = "mPIPE clock",
875 .max_adj = 999999999,
876 .n_ext_ts = 0,
877 .pps = 0,
878 .adjfreq = ptp_mpipe_adjfreq,
879 .adjtime = ptp_mpipe_adjtime,
880 .gettime = ptp_mpipe_gettime,
881 .settime = ptp_mpipe_settime,
882 .enable = ptp_mpipe_enable,
883};
884
885#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
886
887/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
888static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
889{
890#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
891 struct timespec ts;
892
893 getnstimeofday(&ts);
894 gxio_mpipe_set_timestamp(&md->context, &ts);
895
896 mutex_init(&md->ptp_lock);
897 md->caps = ptp_mpipe_caps;
898 md->ptp_clock = ptp_clock_register(&md->caps, NULL);
899 if (IS_ERR(md->ptp_clock))
900 netdev_err(dev, "ptp_clock_register failed %ld\n",
901 PTR_ERR(md->ptp_clock));
902#endif
903}
904
905/* Initialize PTP fields in a new device. */
906static void init_ptp_dev(struct tile_net_priv *priv)
907{
908#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
909 priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
910 priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
911#endif
912}
913
914/* Helper functions for "tile_net_update()". */
915static void enable_ingress_irq(void *irq)
916{
917 enable_percpu_irq((long)irq, 0);
918}
919
920static void disable_ingress_irq(void *irq)
921{
922 disable_percpu_irq((long)irq);
677} 923}
678 924
679/* Helper function for tile_net_open() and tile_net_stop(). 925/* Helper function for tile_net_open() and tile_net_stop().
@@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev)
683{ 929{
684 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ 930 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
685 bool saw_channel = false; 931 bool saw_channel = false;
932 int instance = mpipe_instance(dev);
933 struct mpipe_data *md = &mpipe_data[instance];
686 int channel; 934 int channel;
687 int rc; 935 int rc;
688 int cpu; 936 int cpu;
689 937
690 gxio_mpipe_rules_init(&rules, &context); 938 saw_channel = false;
939 gxio_mpipe_rules_init(&rules, &md->context);
691 940
692 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { 941 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
693 if (tile_net_devs_for_channel[channel] == NULL) 942 if (md->tile_net_devs_for_channel[channel] == NULL)
694 continue; 943 continue;
695 if (!saw_channel) { 944 if (!saw_channel) {
696 saw_channel = true; 945 saw_channel = true;
697 gxio_mpipe_rules_begin(&rules, first_bucket, 946 gxio_mpipe_rules_begin(&rules, md->first_bucket,
698 num_buckets, NULL); 947 md->num_buckets, NULL);
699 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); 948 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
700 } 949 }
701 gxio_mpipe_rules_add_channel(&rules, channel); 950 gxio_mpipe_rules_add_channel(&rules, channel);
@@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev)
706 */ 955 */
707 rc = gxio_mpipe_rules_commit(&rules); 956 rc = gxio_mpipe_rules_commit(&rules);
708 if (rc != 0) { 957 if (rc != 0) {
709 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); 958 netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
959 instance, rc);
710 return -EIO; 960 return -EIO;
711 } 961 }
712 962
713 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ 963 /* Update all cpus, sequentially (to protect "netif_napi_add()").
714 for_each_online_cpu(cpu) 964 * We use on_each_cpu to handle the IPI mask or unmask.
715 smp_call_function_single(cpu, tile_net_update_cpu, 965 */
716 (saw_channel ? dev : NULL), 1); 966 if (!saw_channel)
967 on_each_cpu(disable_ingress_irq,
968 (void *)(long)(md->ingress_irq), 1);
969 for_each_online_cpu(cpu) {
970 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
971
972 if (!info->mpipe[instance].has_iqueue)
973 continue;
974 if (saw_channel) {
975 if (!info->mpipe[instance].napi_added) {
976 netif_napi_add(dev, &info->mpipe[instance].napi,
977 tile_net_poll, TILE_NET_WEIGHT);
978 info->mpipe[instance].napi_added = true;
979 }
980 if (!info->mpipe[instance].napi_enabled) {
981 napi_enable(&info->mpipe[instance].napi);
982 info->mpipe[instance].napi_enabled = true;
983 }
984 } else {
985 if (info->mpipe[instance].napi_enabled) {
986 napi_disable(&info->mpipe[instance].napi);
987 info->mpipe[instance].napi_enabled = false;
988 }
989 /* FIXME: Drain the iqueue. */
990 }
991 }
992 if (saw_channel)
993 on_each_cpu(enable_ingress_irq,
994 (void *)(long)(md->ingress_irq), 1);
717 995
718 /* HACK: Allow packets to flow in the simulator. */ 996 /* HACK: Allow packets to flow in the simulator. */
719 if (saw_channel) 997 if (saw_channel)
720 sim_enable_mpipe_links(0, -1); 998 sim_enable_mpipe_links(instance, -1);
721 999
722 return 0; 1000 return 0;
723} 1001}
724 1002
725/* Allocate and initialize mpipe buffer stacks, and register them in 1003/* Initialize a buffer stack. */
726 * the mPIPE TLBs, for both small and large packet sizes. 1004static int create_buffer_stack(struct net_device *dev,
727 * This routine supports tile_net_init_mpipe(), below. 1005 int kind, size_t num_buffers)
728 */
729static int init_buffer_stacks(struct net_device *dev, int num_buffers)
730{ 1006{
731 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); 1007 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
732 int rc; 1008 int instance = mpipe_instance(dev);
1009 struct mpipe_data *md = &mpipe_data[instance];
1010 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
1011 int stack_idx = md->first_buffer_stack + kind;
1012 void *va;
1013 int i, rc;
733 1014
734 /* Compute stack bytes; we round up to 64KB and then use 1015 /* Round up to 64KB and then use alloc_pages() so we get the
735 * alloc_pages() so we get the required 64KB alignment as well. 1016 * required 64KB alignment.
736 */ 1017 */
737 buffer_stack_size = 1018 md->buffer_stack_bytes[kind] =
738 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), 1019 ALIGN(needed, 64 * 1024);
739 64 * 1024);
740 1020
741 /* Allocate two buffer stack indices. */ 1021 va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
742 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); 1022 if (va == NULL) {
743 if (rc < 0) {
744 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
745 rc);
746 return rc;
747 }
748 small_buffer_stack = rc;
749 large_buffer_stack = rc + 1;
750
751 /* Allocate the small memory stack. */
752 small_buffer_stack_va =
753 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
754 if (small_buffer_stack_va == NULL) {
755 netdev_err(dev, 1023 netdev_err(dev,
756 "Could not alloc %zd bytes for buffer stacks\n", 1024 "Could not alloc %zd bytes for buffer stack %d\n",
757 buffer_stack_size); 1025 md->buffer_stack_bytes[kind], kind);
758 return -ENOMEM; 1026 return -ENOMEM;
759 } 1027 }
760 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, 1028
761 BUFFER_SIZE_SMALL_ENUM, 1029 /* Initialize the buffer stack. */
762 small_buffer_stack_va, 1030 rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
763 buffer_stack_size, 0); 1031 buffer_size_enums[kind], va,
1032 md->buffer_stack_bytes[kind], 0);
764 if (rc != 0) { 1033 if (rc != 0) {
765 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); 1034 netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
1035 instance, rc);
1036 free_pages_exact(va, md->buffer_stack_bytes[kind]);
766 return rc; 1037 return rc;
767 } 1038 }
768 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, 1039
1040 md->buffer_stack_vas[kind] = va;
1041
1042 rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
769 hash_pte, 0); 1043 hash_pte, 0);
770 if (rc != 0) { 1044 if (rc != 0) {
771 netdev_err(dev, 1045 netdev_err(dev,
772 "gxio_mpipe_register_buffer_memory failed: %d\n", 1046 "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
773 rc); 1047 instance, rc);
774 return rc; 1048 return rc;
775 } 1049 }
776 1050
777 /* Allocate the large buffer stack. */ 1051 /* Provide initial buffers. */
778 large_buffer_stack_va = 1052 for (i = 0; i < num_buffers; i++) {
779 alloc_pages_exact(buffer_stack_size, GFP_KERNEL); 1053 if (!tile_net_provide_buffer(instance, kind)) {
780 if (large_buffer_stack_va == NULL) { 1054 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
781 netdev_err(dev, 1055 return -ENOMEM;
782 "Could not alloc %zd bytes for buffer stacks\n", 1056 }
783 buffer_stack_size);
784 return -ENOMEM;
785 }
786 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
787 BUFFER_SIZE_LARGE_ENUM,
788 large_buffer_stack_va,
789 buffer_stack_size, 0);
790 if (rc != 0) {
791 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
792 rc);
793 return rc;
794 } 1057 }
795 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, 1058
796 hash_pte, 0); 1059 return 0;
797 if (rc != 0) { 1060}
1061
1062/* Allocate and initialize mpipe buffer stacks, and register them in
1063 * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
1064 * This routine supports tile_net_init_mpipe(), below.
1065 */
1066static int init_buffer_stacks(struct net_device *dev,
1067 int network_cpus_count)
1068{
1069 int num_kinds = MAX_KINDS - (jumbo_num == 0);
1070 size_t num_buffers;
1071 int rc;
1072 int instance = mpipe_instance(dev);
1073 struct mpipe_data *md = &mpipe_data[instance];
1074
1075 /* Allocate the buffer stacks. */
1076 rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
1077 if (rc < 0) {
798 netdev_err(dev, 1078 netdev_err(dev,
799 "gxio_mpipe_register_buffer_memory failed: %d\n", 1079 "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
800 rc); 1080 instance, rc);
801 return rc; 1081 return rc;
802 } 1082 }
1083 md->first_buffer_stack = rc;
803 1084
804 return 0; 1085 /* Enough small/large buffers to (normally) avoid buffer errors. */
1086 num_buffers =
1087 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1088
1089 /* Allocate the small memory stack. */
1090 if (rc >= 0)
1091 rc = create_buffer_stack(dev, 0, num_buffers);
1092
1093 /* Allocate the large buffer stack. */
1094 if (rc >= 0)
1095 rc = create_buffer_stack(dev, 1, num_buffers);
1096
1097 /* Allocate the jumbo buffer stack if needed. */
1098 if (rc >= 0 && jumbo_num != 0)
1099 rc = create_buffer_stack(dev, 2, jumbo_num);
1100
1101 return rc;
805} 1102}
806 1103
807/* Allocate per-cpu resources (memory for completions and idescs). 1104/* Allocate per-cpu resources (memory for completions and idescs).
@@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
812{ 1109{
813 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1110 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
814 int order, i, rc; 1111 int order, i, rc;
1112 int instance = mpipe_instance(dev);
1113 struct mpipe_data *md = &mpipe_data[instance];
815 struct page *page; 1114 struct page *page;
816 void *addr; 1115 void *addr;
817 1116
@@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
826 addr = pfn_to_kaddr(page_to_pfn(page)); 1125 addr = pfn_to_kaddr(page_to_pfn(page));
827 memset(addr, 0, COMPS_SIZE); 1126 memset(addr, 0, COMPS_SIZE);
828 for (i = 0; i < TILE_NET_CHANNELS; i++) 1127 for (i = 0; i < TILE_NET_CHANNELS; i++)
829 info->comps_for_echannel[i] = 1128 info->mpipe[instance].comps_for_echannel[i] =
830 addr + i * sizeof(struct tile_net_comps); 1129 addr + i * sizeof(struct tile_net_comps);
831 1130
832 /* If this is a network cpu, create an iqueue. */ 1131 /* If this is a network cpu, create an iqueue. */
@@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
840 return -ENOMEM; 1139 return -ENOMEM;
841 } 1140 }
842 addr = pfn_to_kaddr(page_to_pfn(page)); 1141 addr = pfn_to_kaddr(page_to_pfn(page));
843 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, 1142 rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
844 addr, NOTIF_RING_SIZE, 0); 1143 &md->context, ring++, addr,
1144 NOTIF_RING_SIZE, 0);
845 if (rc < 0) { 1145 if (rc < 0) {
846 netdev_err(dev, 1146 netdev_err(dev,
847 "gxio_mpipe_iqueue_init failed: %d\n", rc); 1147 "gxio_mpipe_iqueue_init failed: %d\n", rc);
848 return rc; 1148 return rc;
849 } 1149 }
850 info->has_iqueue = true; 1150 info->mpipe[instance].has_iqueue = true;
851 } 1151 }
852 1152
853 return ring; 1153 return ring;
@@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
860 int ring, int network_cpus_count) 1160 int ring, int network_cpus_count)
861{ 1161{
862 int group, rc; 1162 int group, rc;
1163 int instance = mpipe_instance(dev);
1164 struct mpipe_data *md = &mpipe_data[instance];
863 1165
864 /* Allocate one NotifGroup. */ 1166 /* Allocate one NotifGroup. */
865 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); 1167 rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
866 if (rc < 0) { 1168 if (rc < 0) {
867 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", 1169 netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
868 rc); 1170 instance, rc);
869 return rc; 1171 return rc;
870 } 1172 }
871 group = rc; 1173 group = rc;
872 1174
873 /* Initialize global num_buckets value. */ 1175 /* Initialize global num_buckets value. */
874 if (network_cpus_count > 4) 1176 if (network_cpus_count > 4)
875 num_buckets = 256; 1177 md->num_buckets = 256;
876 else if (network_cpus_count > 1) 1178 else if (network_cpus_count > 1)
877 num_buckets = 16; 1179 md->num_buckets = 16;
878 1180
879 /* Allocate some buckets, and set global first_bucket value. */ 1181 /* Allocate some buckets, and set global first_bucket value. */
880 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); 1182 rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
881 if (rc < 0) { 1183 if (rc < 0) {
882 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); 1184 netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
1185 instance, rc);
883 return rc; 1186 return rc;
884 } 1187 }
885 first_bucket = rc; 1188 md->first_bucket = rc;
886 1189
887 /* Init group and buckets. */ 1190 /* Init group and buckets. */
888 rc = gxio_mpipe_init_notif_group_and_buckets( 1191 rc = gxio_mpipe_init_notif_group_and_buckets(
889 &context, group, ring, network_cpus_count, 1192 &md->context, group, ring, network_cpus_count,
890 first_bucket, num_buckets, 1193 md->first_bucket, md->num_buckets,
891 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); 1194 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
892 if (rc != 0) { 1195 if (rc != 0) {
893 netdev_err( 1196 netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
894 dev, 1197 "mpipe[%d] %d\n", instance, rc);
895 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
896 rc);
897 return rc; 1198 return rc;
898 } 1199 }
899 1200
@@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
907 */ 1208 */
908static int tile_net_setup_interrupts(struct net_device *dev) 1209static int tile_net_setup_interrupts(struct net_device *dev)
909{ 1210{
910 int cpu, rc; 1211 int cpu, rc, irq;
1212 int instance = mpipe_instance(dev);
1213 struct mpipe_data *md = &mpipe_data[instance];
1214
1215 irq = md->ingress_irq;
1216 if (irq < 0) {
1217 irq = create_irq();
1218 if (irq < 0) {
1219 netdev_err(dev,
1220 "create_irq failed: mpipe[%d] %d\n",
1221 instance, irq);
1222 return irq;
1223 }
1224 tile_irq_activate(irq, TILE_IRQ_PERCPU);
911 1225
912 rc = create_irq(); 1226 rc = request_irq(irq, tile_net_handle_ingress_irq,
913 if (rc < 0) { 1227 0, "tile_net", (void *)((uint64_t)instance));
914 netdev_err(dev, "create_irq failed: %d\n", rc); 1228
915 return rc; 1229 if (rc != 0) {
916 } 1230 netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
917 ingress_irq = rc; 1231 instance, rc);
918 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); 1232 destroy_irq(irq);
919 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, 1233 return rc;
920 0, "tile_net", NULL); 1234 }
921 if (rc != 0) { 1235 md->ingress_irq = irq;
922 netdev_err(dev, "request_irq failed: %d\n", rc);
923 destroy_irq(ingress_irq);
924 ingress_irq = -1;
925 return rc;
926 } 1236 }
927 1237
928 for_each_online_cpu(cpu) { 1238 for_each_online_cpu(cpu) {
929 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1239 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
930 if (info->has_iqueue) { 1240 if (info->mpipe[instance].has_iqueue) {
931 gxio_mpipe_request_notif_ring_interrupt( 1241 gxio_mpipe_request_notif_ring_interrupt(&md->context,
932 &context, cpu_x(cpu), cpu_y(cpu), 1242 cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
933 KERNEL_PL, ingress_irq, info->iqueue.ring); 1243 info->mpipe[instance].iqueue.ring);
934 } 1244 }
935 } 1245 }
936 1246
@@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
938} 1248}
939 1249
940/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ 1250/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
941static void tile_net_init_mpipe_fail(void) 1251static void tile_net_init_mpipe_fail(int instance)
942{ 1252{
943 int cpu; 1253 int kind, cpu;
1254 struct mpipe_data *md = &mpipe_data[instance];
944 1255
945 /* Do cleanups that require the mpipe context first. */ 1256 /* Do cleanups that require the mpipe context first. */
946 if (small_buffer_stack >= 0) 1257 for (kind = 0; kind < MAX_KINDS; kind++) {
947 tile_net_pop_all_buffers(small_buffer_stack); 1258 if (md->buffer_stack_vas[kind] != NULL) {
948 if (large_buffer_stack >= 0) 1259 tile_net_pop_all_buffers(instance,
949 tile_net_pop_all_buffers(large_buffer_stack); 1260 md->first_buffer_stack +
1261 kind);
1262 }
1263 }
950 1264
951 /* Destroy mpipe context so the hardware no longer owns any memory. */ 1265 /* Destroy mpipe context so the hardware no longer owns any memory. */
952 gxio_mpipe_destroy(&context); 1266 gxio_mpipe_destroy(&md->context);
953 1267
954 for_each_online_cpu(cpu) { 1268 for_each_online_cpu(cpu) {
955 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1269 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
956 free_pages((unsigned long)(info->comps_for_echannel[0]), 1270 free_pages(
957 get_order(COMPS_SIZE)); 1271 (unsigned long)(
958 info->comps_for_echannel[0] = NULL; 1272 info->mpipe[instance].comps_for_echannel[0]),
959 free_pages((unsigned long)(info->iqueue.idescs), 1273 get_order(COMPS_SIZE));
1274 info->mpipe[instance].comps_for_echannel[0] = NULL;
1275 free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
960 get_order(NOTIF_RING_SIZE)); 1276 get_order(NOTIF_RING_SIZE));
961 info->iqueue.idescs = NULL; 1277 info->mpipe[instance].iqueue.idescs = NULL;
962 } 1278 }
963 1279
964 if (small_buffer_stack_va) 1280 for (kind = 0; kind < MAX_KINDS; kind++) {
965 free_pages_exact(small_buffer_stack_va, buffer_stack_size); 1281 if (md->buffer_stack_vas[kind] != NULL) {
966 if (large_buffer_stack_va) 1282 free_pages_exact(md->buffer_stack_vas[kind],
967 free_pages_exact(large_buffer_stack_va, buffer_stack_size); 1283 md->buffer_stack_bytes[kind]);
1284 md->buffer_stack_vas[kind] = NULL;
1285 }
1286 }
968 1287
969 small_buffer_stack_va = NULL; 1288 md->first_buffer_stack = -1;
970 large_buffer_stack_va = NULL; 1289 md->first_bucket = -1;
971 large_buffer_stack = -1;
972 small_buffer_stack = -1;
973 first_bucket = -1;
974} 1290}
975 1291
976/* The first time any tilegx network device is opened, we initialize 1292/* The first time any tilegx network device is opened, we initialize
@@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void)
984 */ 1300 */
985static int tile_net_init_mpipe(struct net_device *dev) 1301static int tile_net_init_mpipe(struct net_device *dev)
986{ 1302{
987 int i, num_buffers, rc; 1303 int rc;
988 int cpu; 1304 int cpu;
989 int first_ring, ring; 1305 int first_ring, ring;
1306 int instance = mpipe_instance(dev);
1307 struct mpipe_data *md = &mpipe_data[instance];
990 int network_cpus_count = cpus_weight(network_cpus_map); 1308 int network_cpus_count = cpus_weight(network_cpus_map);
991 1309
992 if (!hash_default) { 1310 if (!hash_default) {
@@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev)
994 return -EIO; 1312 return -EIO;
995 } 1313 }
996 1314
997 rc = gxio_mpipe_init(&context, 0); 1315 rc = gxio_mpipe_init(&md->context, instance);
998 if (rc != 0) { 1316 if (rc != 0) {
999 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); 1317 netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
1318 instance, rc);
1000 return -EIO; 1319 return -EIO;
1001 } 1320 }
1002 1321
1003 /* Set up the buffer stacks. */ 1322 /* Set up the buffer stacks. */
1004 num_buffers = 1323 rc = init_buffer_stacks(dev, network_cpus_count);
1005 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1006 rc = init_buffer_stacks(dev, num_buffers);
1007 if (rc != 0) 1324 if (rc != 0)
1008 goto fail; 1325 goto fail;
1009 1326
1010 /* Provide initial buffers. */
1011 rc = -ENOMEM;
1012 for (i = 0; i < num_buffers; i++) {
1013 if (!tile_net_provide_buffer(true)) {
1014 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1015 goto fail;
1016 }
1017 }
1018 for (i = 0; i < num_buffers; i++) {
1019 if (!tile_net_provide_buffer(false)) {
1020 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1021 goto fail;
1022 }
1023 }
1024
1025 /* Allocate one NotifRing for each network cpu. */ 1327 /* Allocate one NotifRing for each network cpu. */
1026 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); 1328 rc = gxio_mpipe_alloc_notif_rings(&md->context,
1329 network_cpus_count, 0, 0);
1027 if (rc < 0) { 1330 if (rc < 0) {
1028 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", 1331 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1029 rc); 1332 rc);
@@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev)
1050 if (rc != 0) 1353 if (rc != 0)
1051 goto fail; 1354 goto fail;
1052 1355
1356 /* Register PTP clock and set mPIPE timestamp, if configured. */
1357 register_ptp_clock(dev, md);
1358
1053 return 0; 1359 return 0;
1054 1360
1055fail: 1361fail:
1056 tile_net_init_mpipe_fail(); 1362 tile_net_init_mpipe_fail(instance);
1057 return rc; 1363 return rc;
1058} 1364}
1059 1365
@@ -1063,17 +1369,19 @@ fail:
1063 */ 1369 */
1064static int tile_net_init_egress(struct net_device *dev, int echannel) 1370static int tile_net_init_egress(struct net_device *dev, int echannel)
1065{ 1371{
1372 static int ering = -1;
1066 struct page *headers_page, *edescs_page, *equeue_page; 1373 struct page *headers_page, *edescs_page, *equeue_page;
1067 gxio_mpipe_edesc_t *edescs; 1374 gxio_mpipe_edesc_t *edescs;
1068 gxio_mpipe_equeue_t *equeue; 1375 gxio_mpipe_equeue_t *equeue;
1069 unsigned char *headers; 1376 unsigned char *headers;
1070 int headers_order, edescs_order, equeue_order; 1377 int headers_order, edescs_order, equeue_order;
1071 size_t edescs_size; 1378 size_t edescs_size;
1072 int edma;
1073 int rc = -ENOMEM; 1379 int rc = -ENOMEM;
1380 int instance = mpipe_instance(dev);
1381 struct mpipe_data *md = &mpipe_data[instance];
1074 1382
1075 /* Only initialize once. */ 1383 /* Only initialize once. */
1076 if (egress_for_echannel[echannel].equeue != NULL) 1384 if (md->egress_for_echannel[echannel].equeue != NULL)
1077 return 0; 1385 return 0;
1078 1386
1079 /* Allocate memory for the "headers". */ 1387 /* Allocate memory for the "headers". */
@@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
1110 } 1418 }
1111 equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); 1419 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1112 1420
1113 /* Allocate an edma ring. Note that in practice this can't 1421 /* Allocate an edma ring (using a one entry "free list"). */
1114 * fail, which is good, because we will leak an edma ring if so. 1422 if (ering < 0) {
1115 */ 1423 rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
1116 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); 1424 if (rc < 0) {
1117 if (rc < 0) { 1425 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
1118 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", 1426 "mpipe[%d] %d\n", instance, rc);
1119 rc); 1427 goto fail_equeue;
1120 goto fail_equeue; 1428 }
1429 ering = rc;
1121 } 1430 }
1122 edma = rc;
1123 1431
1124 /* Initialize the equeue. */ 1432 /* Initialize the equeue. */
1125 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, 1433 rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
1126 edescs, edescs_size, 0); 1434 edescs, edescs_size, 0);
1127 if (rc != 0) { 1435 if (rc != 0) {
1128 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); 1436 netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
1437 instance, rc);
1129 goto fail_equeue; 1438 goto fail_equeue;
1130 } 1439 }
1131 1440
1441 /* Don't reuse the ering later. */
1442 ering = -1;
1443
1444 if (jumbo_num != 0) {
1445 /* Make sure "jumbo" packets can be egressed safely. */
1446 if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
1447 /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
1448 netdev_warn(dev, "Jumbo packets may not be egressed"
1449 " properly on channel %d\n", echannel);
1450 }
1451 }
1452
1132 /* Done. */ 1453 /* Done. */
1133 egress_for_echannel[echannel].equeue = equeue; 1454 md->egress_for_echannel[echannel].equeue = equeue;
1134 egress_for_echannel[echannel].headers = headers; 1455 md->egress_for_echannel[echannel].headers = headers;
1135 return 0; 1456 return 0;
1136 1457
1137fail_equeue: 1458fail_equeue:
@@ -1151,11 +1472,25 @@ fail:
1151static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, 1472static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1152 const char *link_name) 1473 const char *link_name)
1153{ 1474{
1154 int rc = gxio_mpipe_link_open(link, &context, link_name, 0); 1475 int instance = mpipe_instance(dev);
1476 struct mpipe_data *md = &mpipe_data[instance];
1477 int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
1155 if (rc < 0) { 1478 if (rc < 0) {
1156 netdev_err(dev, "Failed to open '%s'\n", link_name); 1479 netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
1480 link_name, instance, rc);
1157 return rc; 1481 return rc;
1158 } 1482 }
1483 if (jumbo_num != 0) {
1484 u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
1485 rc = gxio_mpipe_link_set_attr(link, attr, 1);
1486 if (rc != 0) {
1487 netdev_err(dev,
1488 "Cannot receive jumbo packets on '%s'\n",
1489 link_name);
1490 gxio_mpipe_link_close(link);
1491 return rc;
1492 }
1493 }
1159 rc = gxio_mpipe_link_channel(link); 1494 rc = gxio_mpipe_link_channel(link);
1160 if (rc < 0 || rc >= TILE_NET_CHANNELS) { 1495 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1161 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); 1496 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1169,12 +1504,23 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1169static int tile_net_open(struct net_device *dev) 1504static int tile_net_open(struct net_device *dev)
1170{ 1505{
1171 struct tile_net_priv *priv = netdev_priv(dev); 1506 struct tile_net_priv *priv = netdev_priv(dev);
1172 int cpu, rc; 1507 int cpu, rc, instance;
1173 1508
1174 mutex_lock(&tile_net_devs_for_channel_mutex); 1509 mutex_lock(&tile_net_devs_for_channel_mutex);
1175 1510
1176 /* Do one-time initialization the first time any device is opened. */ 1511 /* Get the instance info. */
1177 if (ingress_irq < 0) { 1512 rc = gxio_mpipe_link_instance(dev->name);
1513 if (rc < 0 || rc >= NR_MPIPE_MAX) {
1514 mutex_unlock(&tile_net_devs_for_channel_mutex);
1515 return -EIO;
1516 }
1517
1518 priv->instance = rc;
1519 instance = rc;
1520 if (!mpipe_data[rc].context.mmio_fast_base) {
1521 /* Do one-time initialization per instance the first time
1522 * any device is opened.
1523 */
1178 rc = tile_net_init_mpipe(dev); 1524 rc = tile_net_init_mpipe(dev);
1179 if (rc != 0) 1525 if (rc != 0)
1180 goto fail; 1526 goto fail;
@@ -1205,7 +1551,7 @@ static int tile_net_open(struct net_device *dev)
1205 if (rc != 0) 1551 if (rc != 0)
1206 goto fail; 1552 goto fail;
1207 1553
1208 tile_net_devs_for_channel[priv->channel] = dev; 1554 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
1209 1555
1210 rc = tile_net_update(dev); 1556 rc = tile_net_update(dev);
1211 if (rc != 0) 1557 if (rc != 0)
@@ -1217,7 +1563,7 @@ static int tile_net_open(struct net_device *dev)
1217 for_each_online_cpu(cpu) { 1563 for_each_online_cpu(cpu) {
1218 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1564 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1219 struct tile_net_tx_wake *tx_wake = 1565 struct tile_net_tx_wake *tx_wake =
1220 &info->tx_wake[priv->echannel]; 1566 &info->mpipe[instance].tx_wake[priv->echannel];
1221 1567
1222 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, 1568 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1223 HRTIMER_MODE_REL); 1569 HRTIMER_MODE_REL);
@@ -1243,7 +1589,7 @@ fail:
1243 priv->channel = -1; 1589 priv->channel = -1;
1244 } 1590 }
1245 priv->echannel = -1; 1591 priv->echannel = -1;
1246 tile_net_devs_for_channel[priv->channel] = NULL; 1592 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
1247 mutex_unlock(&tile_net_devs_for_channel_mutex); 1593 mutex_unlock(&tile_net_devs_for_channel_mutex);
1248 1594
1249 /* Don't return raw gxio error codes to generic Linux. */ 1595 /* Don't return raw gxio error codes to generic Linux. */
@@ -1255,18 +1601,20 @@ static int tile_net_stop(struct net_device *dev)
1255{ 1601{
1256 struct tile_net_priv *priv = netdev_priv(dev); 1602 struct tile_net_priv *priv = netdev_priv(dev);
1257 int cpu; 1603 int cpu;
1604 int instance = priv->instance;
1605 struct mpipe_data *md = &mpipe_data[instance];
1258 1606
1259 for_each_online_cpu(cpu) { 1607 for_each_online_cpu(cpu) {
1260 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1608 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1261 struct tile_net_tx_wake *tx_wake = 1609 struct tile_net_tx_wake *tx_wake =
1262 &info->tx_wake[priv->echannel]; 1610 &info->mpipe[instance].tx_wake[priv->echannel];
1263 1611
1264 hrtimer_cancel(&tx_wake->timer); 1612 hrtimer_cancel(&tx_wake->timer);
1265 netif_stop_subqueue(dev, cpu); 1613 netif_stop_subqueue(dev, cpu);
1266 } 1614 }
1267 1615
1268 mutex_lock(&tile_net_devs_for_channel_mutex); 1616 mutex_lock(&tile_net_devs_for_channel_mutex);
1269 tile_net_devs_for_channel[priv->channel] = NULL; 1617 md->tile_net_devs_for_channel[priv->channel] = NULL;
1270 (void)tile_net_update(dev); 1618 (void)tile_net_update(dev);
1271 if (priv->loopify_channel >= 0) { 1619 if (priv->loopify_channel >= 0) {
1272 if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1620 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1374,20 +1722,20 @@ static int tso_count_edescs(struct sk_buff *skb)
1374 return num_edescs; 1722 return num_edescs;
1375} 1723}
1376 1724
1377/* Prepare modified copies of the skbuff headers. 1725/* Prepare modified copies of the skbuff headers. */
1378 * FIXME: add support for IPv6.
1379 */
1380static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, 1726static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1381 s64 slot) 1727 s64 slot)
1382{ 1728{
1383 struct skb_shared_info *sh = skb_shinfo(skb); 1729 struct skb_shared_info *sh = skb_shinfo(skb);
1384 struct iphdr *ih; 1730 struct iphdr *ih;
1731 struct ipv6hdr *ih6;
1385 struct tcphdr *th; 1732 struct tcphdr *th;
1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1733 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1387 unsigned int data_len = skb->len - sh_len; 1734 unsigned int data_len = skb->len - sh_len;
1388 unsigned char *data = skb->data; 1735 unsigned char *data = skb->data;
1389 unsigned int ih_off, th_off, p_len; 1736 unsigned int ih_off, th_off, p_len;
1390 unsigned int isum_seed, tsum_seed, id, seq; 1737 unsigned int isum_seed, tsum_seed, id, seq;
1738 int is_ipv6;
1391 long f_id = -1; /* id of the current fragment */ 1739 long f_id = -1; /* id of the current fragment */
1392 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1740 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1393 long f_used = 0; /* bytes used from the current fragment */ 1741 long f_used = 0; /* bytes used from the current fragment */
@@ -1395,18 +1743,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1395 int segment; 1743 int segment;
1396 1744
1397 /* Locate original headers and compute various lengths. */ 1745 /* Locate original headers and compute various lengths. */
1398 ih = ip_hdr(skb); 1746 is_ipv6 = skb_is_gso_v6(skb);
1747 if (is_ipv6) {
1748 ih6 = ipv6_hdr(skb);
1749 ih_off = skb_network_offset(skb);
1750 } else {
1751 ih = ip_hdr(skb);
1752 ih_off = skb_network_offset(skb);
1753 isum_seed = ((0xFFFF - ih->check) +
1754 (0xFFFF - ih->tot_len) +
1755 (0xFFFF - ih->id));
1756 id = ntohs(ih->id);
1757 }
1758
1399 th = tcp_hdr(skb); 1759 th = tcp_hdr(skb);
1400 ih_off = skb_network_offset(skb);
1401 th_off = skb_transport_offset(skb); 1760 th_off = skb_transport_offset(skb);
1402 p_len = sh->gso_size; 1761 p_len = sh->gso_size;
1403 1762
1404 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1405 isum_seed = ((0xFFFF - ih->check) +
1406 (0xFFFF - ih->tot_len) +
1407 (0xFFFF - ih->id));
1408 tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); 1763 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1409 id = ntohs(ih->id);
1410 seq = ntohl(th->seq); 1764 seq = ntohl(th->seq);
1411 1765
1412 /* Prepare all the headers. */ 1766 /* Prepare all the headers. */
@@ -1420,11 +1774,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1420 memcpy(buf, data, sh_len); 1774 memcpy(buf, data, sh_len);
1421 1775
1422 /* Update copied ip header. */ 1776 /* Update copied ip header. */
1423 ih = (struct iphdr *)(buf + ih_off); 1777 if (is_ipv6) {
1424 ih->tot_len = htons(sh_len + p_len - ih_off); 1778 ih6 = (struct ipv6hdr *)(buf + ih_off);
1425 ih->id = htons(id); 1779 ih6->payload_len = htons(sh_len + p_len - ih_off -
1426 ih->check = csum_long(isum_seed + ih->tot_len + 1780 sizeof(*ih6));
1427 ih->id) ^ 0xffff; 1781 } else {
1782 ih = (struct iphdr *)(buf + ih_off);
1783 ih->tot_len = htons(sh_len + p_len - ih_off);
1784 ih->id = htons(id);
1785 ih->check = csum_long(isum_seed + ih->tot_len +
1786 ih->id) ^ 0xffff;
1787 }
1428 1788
1429 /* Update copied tcp header. */ 1789 /* Update copied tcp header. */
1430 th = (struct tcphdr *)(buf + th_off); 1790 th = (struct tcphdr *)(buf + th_off);
@@ -1475,8 +1835,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1475static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, 1835static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1476 struct sk_buff *skb, unsigned char *headers, s64 slot) 1836 struct sk_buff *skb, unsigned char *headers, s64 slot)
1477{ 1837{
1478 struct tile_net_priv *priv = netdev_priv(dev);
1479 struct skb_shared_info *sh = skb_shinfo(skb); 1838 struct skb_shared_info *sh = skb_shinfo(skb);
1839 int instance = mpipe_instance(dev);
1840 struct mpipe_data *md = &mpipe_data[instance];
1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1841 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1481 unsigned int data_len = skb->len - sh_len; 1842 unsigned int data_len = skb->len - sh_len;
1482 unsigned int p_len = sh->gso_size; 1843 unsigned int p_len = sh->gso_size;
@@ -1499,8 +1860,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1499 edesc_head.xfer_size = sh_len; 1860 edesc_head.xfer_size = sh_len;
1500 1861
1501 /* This is only used to specify the TLB. */ 1862 /* This is only used to specify the TLB. */
1502 edesc_head.stack_idx = large_buffer_stack; 1863 edesc_head.stack_idx = md->first_buffer_stack;
1503 edesc_body.stack_idx = large_buffer_stack; 1864 edesc_body.stack_idx = md->first_buffer_stack;
1504 1865
1505 /* Egress all the edescs. */ 1866 /* Egress all the edescs. */
1506 for (segment = 0; segment < sh->gso_segs; segment++) { 1867 for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1553,8 +1914,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1553 } 1914 }
1554 1915
1555 /* Update stats. */ 1916 /* Update stats. */
1556 tile_net_stats_add(tx_packets, &priv->stats.tx_packets); 1917 tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
1557 tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); 1918 tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
1558} 1919}
1559 1920
1560/* Do "TSO" handling for egress. 1921/* Do "TSO" handling for egress.
@@ -1575,8 +1936,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1575 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1936 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1576 struct tile_net_priv *priv = netdev_priv(dev); 1937 struct tile_net_priv *priv = netdev_priv(dev);
1577 int channel = priv->echannel; 1938 int channel = priv->echannel;
1578 struct tile_net_egress *egress = &egress_for_echannel[channel]; 1939 int instance = priv->instance;
1579 struct tile_net_comps *comps = info->comps_for_echannel[channel]; 1940 struct mpipe_data *md = &mpipe_data[instance];
1941 struct tile_net_egress *egress = &md->egress_for_echannel[channel];
1942 struct tile_net_comps *comps =
1943 info->mpipe[instance].comps_for_echannel[channel];
1580 gxio_mpipe_equeue_t *equeue = egress->equeue; 1944 gxio_mpipe_equeue_t *equeue = egress->equeue;
1581 unsigned long irqflags; 1945 unsigned long irqflags;
1582 int num_edescs; 1946 int num_edescs;
@@ -1640,10 +2004,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1640{ 2004{
1641 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 2005 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1642 struct tile_net_priv *priv = netdev_priv(dev); 2006 struct tile_net_priv *priv = netdev_priv(dev);
1643 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; 2007 int instance = priv->instance;
2008 struct mpipe_data *md = &mpipe_data[instance];
2009 struct tile_net_egress *egress =
2010 &md->egress_for_echannel[priv->echannel];
1644 gxio_mpipe_equeue_t *equeue = egress->equeue; 2011 gxio_mpipe_equeue_t *equeue = egress->equeue;
1645 struct tile_net_comps *comps = 2012 struct tile_net_comps *comps =
1646 info->comps_for_echannel[priv->echannel]; 2013 info->mpipe[instance].comps_for_echannel[priv->echannel];
1647 unsigned int len = skb->len; 2014 unsigned int len = skb->len;
1648 unsigned char *data = skb->data; 2015 unsigned char *data = skb->data;
1649 unsigned int num_edescs; 2016 unsigned int num_edescs;
@@ -1660,7 +2027,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1660 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 2027 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1661 2028
1662 /* This is only used to specify the TLB. */ 2029 /* This is only used to specify the TLB. */
1663 edesc.stack_idx = large_buffer_stack; 2030 edesc.stack_idx = md->first_buffer_stack;
1664 2031
1665 /* Prepare the edescs. */ 2032 /* Prepare the edescs. */
1666 for (i = 0; i < num_edescs; i++) { 2033 for (i = 0; i < num_edescs; i++) {
@@ -1693,13 +2060,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1693 for (i = 0; i < num_edescs; i++) 2060 for (i = 0; i < num_edescs; i++)
1694 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); 2061 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
1695 2062
2063 /* Store TX timestamp if needed. */
2064 tile_tx_timestamp(skb, instance);
2065
1696 /* Add a completion record. */ 2066 /* Add a completion record. */
1697 add_comp(equeue, comps, slot - 1, skb); 2067 add_comp(equeue, comps, slot - 1, skb);
1698 2068
1699 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ 2069 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1700 tile_net_stats_add(1, &priv->stats.tx_packets); 2070 tile_net_stats_add(1, &dev->stats.tx_packets);
1701 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), 2071 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
1702 &priv->stats.tx_bytes); 2072 &dev->stats.tx_bytes);
1703 2073
1704 local_irq_restore(irqflags); 2074 local_irq_restore(irqflags);
1705 2075
@@ -1727,20 +2097,18 @@ static void tile_net_tx_timeout(struct net_device *dev)
1727/* Ioctl commands. */ 2097/* Ioctl commands. */
1728static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2098static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1729{ 2099{
1730 return -EOPNOTSUPP; 2100 if (cmd == SIOCSHWTSTAMP)
1731} 2101 return tile_hwtstamp_ioctl(dev, rq, cmd);
1732 2102
1733/* Get system network statistics for device. */ 2103 return -EOPNOTSUPP;
1734static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1735{
1736 struct tile_net_priv *priv = netdev_priv(dev);
1737 return &priv->stats;
1738} 2104}
1739 2105
1740/* Change the MTU. */ 2106/* Change the MTU. */
1741static int tile_net_change_mtu(struct net_device *dev, int new_mtu) 2107static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1742{ 2108{
1743 if ((new_mtu < 68) || (new_mtu > 1500)) 2109 if (new_mtu < 68)
2110 return -EINVAL;
2111 if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
1744 return -EINVAL; 2112 return -EINVAL;
1745 dev->mtu = new_mtu; 2113 dev->mtu = new_mtu;
1746 return 0; 2114 return 0;
@@ -1772,9 +2140,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
1772 */ 2140 */
1773static void tile_net_netpoll(struct net_device *dev) 2141static void tile_net_netpoll(struct net_device *dev)
1774{ 2142{
1775 disable_percpu_irq(ingress_irq); 2143 int instance = mpipe_instance(dev);
1776 tile_net_handle_ingress_irq(ingress_irq, NULL); 2144 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1777 enable_percpu_irq(ingress_irq, 0); 2145 struct mpipe_data *md = &mpipe_data[instance];
2146
2147 disable_percpu_irq(md->ingress_irq);
2148 napi_schedule(&info->mpipe[instance].napi);
2149 enable_percpu_irq(md->ingress_irq, 0);
1778} 2150}
1779#endif 2151#endif
1780 2152
@@ -1784,7 +2156,6 @@ static const struct net_device_ops tile_net_ops = {
1784 .ndo_start_xmit = tile_net_tx, 2156 .ndo_start_xmit = tile_net_tx,
1785 .ndo_select_queue = tile_net_select_queue, 2157 .ndo_select_queue = tile_net_select_queue,
1786 .ndo_do_ioctl = tile_net_ioctl, 2158 .ndo_do_ioctl = tile_net_ioctl,
1787 .ndo_get_stats = tile_net_get_stats,
1788 .ndo_change_mtu = tile_net_change_mtu, 2159 .ndo_change_mtu = tile_net_change_mtu,
1789 .ndo_tx_timeout = tile_net_tx_timeout, 2160 .ndo_tx_timeout = tile_net_tx_timeout,
1790 .ndo_set_mac_address = tile_net_set_mac_address, 2161 .ndo_set_mac_address = tile_net_set_mac_address,
@@ -1800,14 +2171,21 @@ static const struct net_device_ops tile_net_ops = {
1800 */ 2171 */
1801static void tile_net_setup(struct net_device *dev) 2172static void tile_net_setup(struct net_device *dev)
1802{ 2173{
2174 netdev_features_t features = 0;
2175
1803 ether_setup(dev); 2176 ether_setup(dev);
1804 dev->netdev_ops = &tile_net_ops; 2177 dev->netdev_ops = &tile_net_ops;
1805 dev->watchdog_timeo = TILE_NET_TIMEOUT; 2178 dev->watchdog_timeo = TILE_NET_TIMEOUT;
1806 dev->features |= NETIF_F_LLTX;
1807 dev->features |= NETIF_F_HW_CSUM;
1808 dev->features |= NETIF_F_SG;
1809 dev->features |= NETIF_F_TSO;
1810 dev->mtu = 1500; 2179 dev->mtu = 1500;
2180
2181 features |= NETIF_F_HW_CSUM;
2182 features |= NETIF_F_SG;
2183 features |= NETIF_F_TSO;
2184 features |= NETIF_F_TSO6;
2185
2186 dev->hw_features |= features;
2187 dev->vlan_features |= features;
2188 dev->features |= features;
1811} 2189}
1812 2190
1813/* Allocate the device structure, register the device, and obtain the 2191/* Allocate the device structure, register the device, and obtain the
@@ -1842,6 +2220,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
1842 priv->channel = -1; 2220 priv->channel = -1;
1843 priv->loopify_channel = -1; 2221 priv->loopify_channel = -1;
1844 priv->echannel = -1; 2222 priv->echannel = -1;
2223 init_ptp_dev(priv);
1845 2224
1846 /* Get the MAC address and set it in the device struct; this must 2225 /* Get the MAC address and set it in the device struct; this must
1847 * be done before the device is opened. If the MAC is all zeroes, 2226 * be done before the device is opened. If the MAC is all zeroes,
@@ -1871,9 +2250,12 @@ static void tile_net_init_module_percpu(void *unused)
1871{ 2250{
1872 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 2251 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1873 int my_cpu = smp_processor_id(); 2252 int my_cpu = smp_processor_id();
2253 int instance;
1874 2254
1875 info->has_iqueue = false; 2255 for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
1876 2256 info->mpipe[instance].has_iqueue = false;
2257 info->mpipe[instance].instance = instance;
2258 }
1877 info->my_cpu = my_cpu; 2259 info->my_cpu = my_cpu;
1878 2260
1879 /* Initialize the egress timer. */ 2261 /* Initialize the egress timer. */
@@ -1890,6 +2272,8 @@ static int __init tile_net_init_module(void)
1890 2272
1891 pr_info("Tilera Network Driver\n"); 2273 pr_info("Tilera Network Driver\n");
1892 2274
2275 BUILD_BUG_ON(NR_MPIPE_MAX != 2);
2276
1893 mutex_init(&tile_net_devs_for_channel_mutex); 2277 mutex_init(&tile_net_devs_for_channel_mutex);
1894 2278
1895 /* Initialize each CPU. */ 2279 /* Initialize each CPU. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 36435499814b..106be47716e7 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -31,6 +31,7 @@
31#include <linux/in6.h> 31#include <linux/in6.h>
32#include <linux/timer.h> 32#include <linux/timer.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/u64_stats_sync.h>
34#include <asm/checksum.h> 35#include <asm/checksum.h>
35#include <asm/homecache.h> 36#include <asm/homecache.h>
36 37
@@ -88,13 +89,6 @@
88/* ISSUE: This has not been thoroughly tested (except at 1500). */ 89/* ISSUE: This has not been thoroughly tested (except at 1500). */
89#define TILE_NET_MTU 1500 90#define TILE_NET_MTU 1500
90 91
91/* HACK: Define to support GSO. */
92/* ISSUE: This may actually hurt performance of the TCP blaster. */
93/* #define TILE_NET_GSO */
94
95/* Define this to collapse "duplicate" acks. */
96/* #define IGNORE_DUP_ACKS */
97
98/* HACK: Define this to verify incoming packets. */ 92/* HACK: Define this to verify incoming packets. */
99/* #define TILE_NET_VERIFY_INGRESS */ 93/* #define TILE_NET_VERIFY_INGRESS */
100 94
@@ -156,10 +150,13 @@ struct tile_netio_queue {
156 * Statistics counters for a specific cpu and device. 150 * Statistics counters for a specific cpu and device.
157 */ 151 */
158struct tile_net_stats_t { 152struct tile_net_stats_t {
159 u32 rx_packets; 153 struct u64_stats_sync syncp;
160 u32 rx_bytes; 154 u64 rx_packets; /* total packets received */
161 u32 tx_packets; 155 u64 tx_packets; /* total packets transmitted */
162 u32 tx_bytes; 156 u64 rx_bytes; /* total bytes received */
157 u64 tx_bytes; /* total bytes transmitted */
158 u64 rx_errors; /* packets truncated or marked bad by hw */
159 u64 rx_dropped; /* packets not for us or intf not up */
163}; 160};
164 161
165 162
@@ -218,8 +215,6 @@ struct tile_net_priv {
218 int network_cpus_count; 215 int network_cpus_count;
219 /* Credits per network cpu. */ 216 /* Credits per network cpu. */
220 int network_cpus_credits; 217 int network_cpus_credits;
221 /* Network stats. */
222 struct net_device_stats stats;
223 /* For NetIO bringup retries. */ 218 /* For NetIO bringup retries. */
224 struct delayed_work retry_work; 219 struct delayed_work retry_work;
225 /* Quick access to per cpu data. */ 220 /* Quick access to per cpu data. */
@@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg)
627} 622}
628 623
629 624
630#ifdef IGNORE_DUP_ACKS
631
632/*
633 * Help detect "duplicate" ACKs. These are sequential packets (for a
634 * given flow) which are exactly 66 bytes long, sharing everything but
635 * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
636 * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
637 * +N, and the Tstamps are usually identical.
638 *
639 * NOTE: Apparently truly duplicate acks (with identical "ack" values),
640 * should not be collapsed, as they are used for some kind of flow control.
641 */
642static bool is_dup_ack(char *s1, char *s2, unsigned int len)
643{
644 int i;
645
646 unsigned long long ignorable = 0;
647
648 /* Identification. */
649 ignorable |= (1ULL << 0x12);
650 ignorable |= (1ULL << 0x13);
651
652 /* Header checksum. */
653 ignorable |= (1ULL << 0x18);
654 ignorable |= (1ULL << 0x19);
655
656 /* ACK. */
657 ignorable |= (1ULL << 0x2a);
658 ignorable |= (1ULL << 0x2b);
659 ignorable |= (1ULL << 0x2c);
660 ignorable |= (1ULL << 0x2d);
661
662 /* WinSize. */
663 ignorable |= (1ULL << 0x30);
664 ignorable |= (1ULL << 0x31);
665
666 /* Checksum. */
667 ignorable |= (1ULL << 0x32);
668 ignorable |= (1ULL << 0x33);
669
670 for (i = 0; i < len; i++, ignorable >>= 1) {
671
672 if ((ignorable & 1) || (s1[i] == s2[i]))
673 continue;
674
675#ifdef TILE_NET_DEBUG
676 /* HACK: Mention non-timestamp diffs. */
677 if (i < 0x38 && i != 0x2f &&
678 net_ratelimit())
679 pr_info("Diff at 0x%x\n", i);
680#endif
681
682 return false;
683 }
684
685#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
686 /* HACK: Do not suppress truly duplicate ACKs. */
687 /* ISSUE: Is this actually necessary or helpful? */
688 if (s1[0x2a] == s2[0x2a] &&
689 s1[0x2b] == s2[0x2b] &&
690 s1[0x2c] == s2[0x2c] &&
691 s1[0x2d] == s2[0x2d]) {
692 return false;
693 }
694#endif
695
696 return true;
697}
698
699#endif
700
701
702
703static void tile_net_discard_aux(struct tile_net_cpu *info, int index) 625static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
704{ 626{
705 struct tile_netio_queue *queue = &info->queue; 627 struct tile_netio_queue *queue = &info->queue;
@@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
774 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); 696 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
775 697
776 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); 698 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
699 netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
777 700
778 /* Extract the packet size. FIXME: Shouldn't the second line */ 701 /* Extract the packet size. FIXME: Shouldn't the second line */
779 /* get subtracted? Mostly moot, since it should be "zero". */ 702 /* get subtracted? Mostly moot, since it should be "zero". */
@@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
806#endif /* TILE_NET_DUMP_PACKETS */ 729#endif /* TILE_NET_DUMP_PACKETS */
807 730
808#ifdef TILE_NET_VERIFY_INGRESS 731#ifdef TILE_NET_VERIFY_INGRESS
809 if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && 732 if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
810 NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
811 /* Bug 6624: Includes UDP packets with a "zero" checksum. */
812 pr_warning("Bad L4 checksum on %d byte packet.\n", len);
813 }
814 if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
815 NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
816 dump_packet(buf, len, "rx"); 733 dump_packet(buf, len, "rx");
817 panic("Bad L3 checksum."); 734 panic("Unexpected OVERSIZE.");
818 }
819 switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
820 case NETIO_PKT_STATUS_OVERSIZE:
821 if (len >= 64) {
822 dump_packet(buf, len, "rx");
823 panic("Unexpected OVERSIZE.");
824 }
825 break;
826 case NETIO_PKT_STATUS_BAD:
827 pr_warning("Unexpected BAD %ld byte packet.\n", len);
828 } 735 }
829#endif 736#endif
830 737
831 filter = 0; 738 filter = 0;
832 739
833 /* ISSUE: Filter TCP packets with "bad" checksums? */ 740 if (pkt_status == NETIO_PKT_STATUS_BAD) {
834 741 /* Handle CRC error and hardware truncation. */
835 if (!(dev->flags & IFF_UP)) { 742 filter = 2;
743 } else if (!(dev->flags & IFF_UP)) {
836 /* Filter packets received before we're up. */ 744 /* Filter packets received before we're up. */
837 filter = 1; 745 filter = 1;
838 } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { 746 } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
747 pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
839 /* Filter "truncated" packets. */ 748 /* Filter "truncated" packets. */
840 filter = 1; 749 filter = 2;
841 } else if (!(dev->flags & IFF_PROMISC)) { 750 } else if (!(dev->flags & IFF_PROMISC)) {
842 /* FIXME: Implement HW multicast filter. */
843 if (!is_multicast_ether_addr(buf)) { 751 if (!is_multicast_ether_addr(buf)) {
844 /* Filter packets not for our address. */ 752 /* Filter packets not for our address. */
845 const u8 *mine = dev->dev_addr; 753 const u8 *mine = dev->dev_addr;
@@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
847 } 755 }
848 } 756 }
849 757
850 if (filter) { 758 u64_stats_update_begin(&stats->syncp);
851 759
852 /* ISSUE: Update "drop" statistics? */ 760 if (filter != 0) {
761
762 if (filter == 1)
763 stats->rx_dropped++;
764 else
765 stats->rx_errors++;
853 766
854 tile_net_provide_linux_buffer(info, va, small); 767 tile_net_provide_linux_buffer(info, va, small);
855 768
@@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
881 stats->rx_bytes += len; 794 stats->rx_bytes += len;
882 } 795 }
883 796
797 u64_stats_update_end(&stats->syncp);
798
884 /* ISSUE: It would be nice to defer this until the packet has */ 799 /* ISSUE: It would be nice to defer this until the packet has */
885 /* actually been processed. */ 800 /* actually been processed. */
886 tile_net_return_credit(info); 801 tile_net_return_credit(info);
@@ -1907,8 +1822,10 @@ busy:
1907 kfree_skb(olds[i]); 1822 kfree_skb(olds[i]);
1908 1823
1909 /* Update stats. */ 1824 /* Update stats. */
1825 u64_stats_update_begin(&stats->syncp);
1910 stats->tx_packets += num_segs; 1826 stats->tx_packets += num_segs;
1911 stats->tx_bytes += (num_segs * sh_len) + d_len; 1827 stats->tx_bytes += (num_segs * sh_len) + d_len;
1828 u64_stats_update_end(&stats->syncp);
1912 1829
1913 /* Make sure the egress timer is scheduled. */ 1830 /* Make sure the egress timer is scheduled. */
1914 tile_net_schedule_egress_timer(info); 1831 tile_net_schedule_egress_timer(info);
@@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1936 1853
1937 unsigned int csum_start = skb_checksum_start_offset(skb); 1854 unsigned int csum_start = skb_checksum_start_offset(skb);
1938 1855
1939 lepp_frag_t frags[LEPP_MAX_FRAGS]; 1856 lepp_frag_t frags[1 + MAX_SKB_FRAGS];
1940 1857
1941 unsigned int num_frags; 1858 unsigned int num_frags;
1942 1859
@@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1951 unsigned int cmd_head, cmd_tail, cmd_next; 1868 unsigned int cmd_head, cmd_tail, cmd_next;
1952 unsigned int comp_tail; 1869 unsigned int comp_tail;
1953 1870
1954 lepp_cmd_t cmds[LEPP_MAX_FRAGS]; 1871 lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
1955 1872
1956 1873
1957 /* 1874 /*
@@ -2089,8 +2006,10 @@ busy:
2089 kfree_skb(olds[i]); 2006 kfree_skb(olds[i]);
2090 2007
2091 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ 2008 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
2009 u64_stats_update_begin(&stats->syncp);
2092 stats->tx_packets++; 2010 stats->tx_packets++;
2093 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); 2011 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
2012 u64_stats_update_end(&stats->syncp);
2094 2013
2095 /* Make sure the egress timer is scheduled. */ 2014 /* Make sure the egress timer is scheduled. */
2096 tile_net_schedule_egress_timer(info); 2015 tile_net_schedule_egress_timer(info);
@@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2127 * 2046 *
2128 * Returns the address of the device statistics structure. 2047 * Returns the address of the device statistics structure.
2129 */ 2048 */
2130static struct net_device_stats *tile_net_get_stats(struct net_device *dev) 2049static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
2050 struct rtnl_link_stats64 *stats)
2131{ 2051{
2132 struct tile_net_priv *priv = netdev_priv(dev); 2052 struct tile_net_priv *priv = netdev_priv(dev);
2133 u32 rx_packets = 0; 2053 u64 rx_packets = 0, tx_packets = 0;
2134 u32 tx_packets = 0; 2054 u64 rx_bytes = 0, tx_bytes = 0;
2135 u32 rx_bytes = 0; 2055 u64 rx_errors = 0, rx_dropped = 0;
2136 u32 tx_bytes = 0;
2137 int i; 2056 int i;
2138 2057
2139 for_each_online_cpu(i) { 2058 for_each_online_cpu(i) {
2140 if (priv->cpu[i]) { 2059 struct tile_net_stats_t *cpu_stats;
2141 rx_packets += priv->cpu[i]->stats.rx_packets; 2060 u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
2142 rx_bytes += priv->cpu[i]->stats.rx_bytes; 2061 u64 trx_errors, trx_dropped;
2143 tx_packets += priv->cpu[i]->stats.tx_packets; 2062 unsigned int start;
2144 tx_bytes += priv->cpu[i]->stats.tx_bytes; 2063
2145 } 2064 if (priv->cpu[i] == NULL)
2065 continue;
2066 cpu_stats = &priv->cpu[i]->stats;
2067
2068 do {
2069 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
2070 trx_packets = cpu_stats->rx_packets;
2071 ttx_packets = cpu_stats->tx_packets;
2072 trx_bytes = cpu_stats->rx_bytes;
2073 ttx_bytes = cpu_stats->tx_bytes;
2074 trx_errors = cpu_stats->rx_errors;
2075 trx_dropped = cpu_stats->rx_dropped;
2076 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
2077
2078 rx_packets += trx_packets;
2079 tx_packets += ttx_packets;
2080 rx_bytes += trx_bytes;
2081 tx_bytes += ttx_bytes;
2082 rx_errors += trx_errors;
2083 rx_dropped += trx_dropped;
2146 } 2084 }
2147 2085
2148 priv->stats.rx_packets = rx_packets; 2086 stats->rx_packets = rx_packets;
2149 priv->stats.rx_bytes = rx_bytes; 2087 stats->tx_packets = tx_packets;
2150 priv->stats.tx_packets = tx_packets; 2088 stats->rx_bytes = rx_bytes;
2151 priv->stats.tx_bytes = tx_bytes; 2089 stats->tx_bytes = tx_bytes;
2090 stats->rx_errors = rx_errors;
2091 stats->rx_dropped = rx_dropped;
2152 2092
2153 return &priv->stats; 2093 return stats;
2154} 2094}
2155 2095
2156 2096
@@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = {
2287 .ndo_stop = tile_net_stop, 2227 .ndo_stop = tile_net_stop,
2288 .ndo_start_xmit = tile_net_tx, 2228 .ndo_start_xmit = tile_net_tx,
2289 .ndo_do_ioctl = tile_net_ioctl, 2229 .ndo_do_ioctl = tile_net_ioctl,
2290 .ndo_get_stats = tile_net_get_stats, 2230 .ndo_get_stats64 = tile_net_get_stats64,
2291 .ndo_change_mtu = tile_net_change_mtu, 2231 .ndo_change_mtu = tile_net_change_mtu,
2292 .ndo_tx_timeout = tile_net_tx_timeout, 2232 .ndo_tx_timeout = tile_net_tx_timeout,
2293 .ndo_set_mac_address = tile_net_set_mac_address, 2233 .ndo_set_mac_address = tile_net_set_mac_address,
@@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = {
2305 */ 2245 */
2306static void tile_net_setup(struct net_device *dev) 2246static void tile_net_setup(struct net_device *dev)
2307{ 2247{
2308 PDEBUG("tile_net_setup()\n"); 2248 netdev_features_t features = 0;
2309 2249
2310 ether_setup(dev); 2250 ether_setup(dev);
2311
2312 dev->netdev_ops = &tile_net_ops; 2251 dev->netdev_ops = &tile_net_ops;
2313
2314 dev->watchdog_timeo = TILE_NET_TIMEOUT; 2252 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2253 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
2254 dev->mtu = TILE_NET_MTU;
2315 2255
2316 /* We want lockless xmit. */ 2256 features |= NETIF_F_HW_CSUM;
2317 dev->features |= NETIF_F_LLTX; 2257 features |= NETIF_F_SG;
2318
2319 /* We support hardware tx checksums. */
2320 dev->features |= NETIF_F_HW_CSUM;
2321
2322 /* We support scatter/gather. */
2323 dev->features |= NETIF_F_SG;
2324
2325 /* We support TSO. */
2326 dev->features |= NETIF_F_TSO;
2327 2258
2328#ifdef TILE_NET_GSO 2259 /* We support TSO iff the HV supports sufficient frags. */
2329 /* We support GSO. */ 2260 if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
2330 dev->features |= NETIF_F_GSO; 2261 features |= NETIF_F_TSO;
2331#endif
2332 2262
2263 /* We can't support HIGHDMA without hash_default, since we need
2264 * to be able to finv() with a VA if we don't have hash_default.
2265 */
2333 if (hash_default) 2266 if (hash_default)
2334 dev->features |= NETIF_F_HIGHDMA; 2267 features |= NETIF_F_HIGHDMA;
2335
2336 /* ISSUE: We should support NETIF_F_UFO. */
2337 2268
2338 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; 2269 dev->hw_features |= features;
2339 2270 dev->vlan_features |= features;
2340 dev->mtu = TILE_NET_MTU; 2271 dev->features |= features;
2341} 2272}
2342 2273
2343 2274
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01bdc6ca0755..c4dbf981804b 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,13 +1308,13 @@ static int tsi108_open(struct net_device *dev)
1308 data->id, dev->irq, dev->name); 1308 data->id, dev->irq, dev->name);
1309 } 1309 }
1310 1310
1311 data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma, 1311 data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
1312 GFP_KERNEL | __GFP_ZERO); 1312 GFP_KERNEL);
1313 if (!data->rxring) 1313 if (!data->rxring)
1314 return -ENOMEM; 1314 return -ENOMEM;
1315 1315
1316 data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma, 1316 data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
1317 GFP_KERNEL | __GFP_ZERO); 1317 GFP_KERNEL);
1318 if (!data->txring) { 1318 if (!data->txring) {
1319 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); 1319 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
1320 return -ENOMEM; 1320 return -ENOMEM;
@@ -1558,7 +1558,7 @@ tsi108_init_one(struct platform_device *pdev)
1558 hw_info *einfo; 1558 hw_info *einfo;
1559 int err = 0; 1559 int err = 0;
1560 1560
1561 einfo = pdev->dev.platform_data; 1561 einfo = dev_get_platdata(&pdev->dev);
1562 1562
1563 if (NULL == einfo) { 1563 if (NULL == einfo) {
1564 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n", 1564 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index b75eb9e0e867..c8f088ab5fdf 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2407,7 +2407,7 @@ static struct pci_driver rhine_driver = {
2407 .driver.pm = RHINE_PM_OPS, 2407 .driver.pm = RHINE_PM_OPS,
2408}; 2408};
2409 2409
2410static struct dmi_system_id __initdata rhine_dmi_table[] = { 2410static struct dmi_system_id rhine_dmi_table[] __initdata = {
2411 { 2411 {
2412 .ident = "EPIA-M", 2412 .ident = "EPIA-M",
2413 .matches = { 2413 .matches = {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d01cacf8a7c2..d022bf936572 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2376,6 +2376,23 @@ out_0:
2376 return ret; 2376 return ret;
2377} 2377}
2378 2378
2379#ifdef CONFIG_NET_POLL_CONTROLLER
2380/**
2381 * velocity_poll_controller - Velocity Poll controller function
2382 * @dev: network device
2383 *
2384 *
2385 * Used by NETCONSOLE and other diagnostic tools to allow network I/P
2386 * with interrupts disabled.
2387 */
2388static void velocity_poll_controller(struct net_device *dev)
2389{
2390 disable_irq(dev->irq);
2391 velocity_intr(dev->irq, dev);
2392 enable_irq(dev->irq);
2393}
2394#endif
2395
2379/** 2396/**
2380 * velocity_mii_ioctl - MII ioctl handler 2397 * velocity_mii_ioctl - MII ioctl handler
2381 * @dev: network device 2398 * @dev: network device
@@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = {
2641 .ndo_do_ioctl = velocity_ioctl, 2658 .ndo_do_ioctl = velocity_ioctl,
2642 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, 2659 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2643 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, 2660 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2661#ifdef CONFIG_NET_POLL_CONTROLLER
2662 .ndo_poll_controller = velocity_poll_controller,
2663#endif
2644}; 2664};
2645 2665
2646/** 2666/**
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 30fed08d1674..0df36c6ec7f4 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -622,7 +622,7 @@ static const struct net_device_ops w5100_netdev_ops = {
622 622
623static int w5100_hw_probe(struct platform_device *pdev) 623static int w5100_hw_probe(struct platform_device *pdev)
624{ 624{
625 struct wiznet_platform_data *data = pdev->dev.platform_data; 625 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
626 struct net_device *ndev = platform_get_drvdata(pdev); 626 struct net_device *ndev = platform_get_drvdata(pdev);
627 struct w5100_priv *priv = netdev_priv(ndev); 627 struct w5100_priv *priv = netdev_priv(ndev);
628 const char *name = netdev_name(ndev); 628 const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index e92884564e1e..71c27b3292f1 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -542,7 +542,7 @@ static const struct net_device_ops w5300_netdev_ops = {
542 542
543static int w5300_hw_probe(struct platform_device *pdev) 543static int w5300_hw_probe(struct platform_device *pdev)
544{ 544{
545 struct wiznet_platform_data *data = pdev->dev.platform_data; 545 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
546 struct net_device *ndev = platform_get_drvdata(pdev); 546 struct net_device *ndev = platform_get_drvdata(pdev);
547 struct w5300_priv *priv = netdev_priv(ndev); 547 struct w5300_priv *priv = netdev_priv(ndev);
548 const char *name = netdev_name(ndev); 548 const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 58eb4488beff..b88121f240ca 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
243 243
244 /* allocate the tx and rx ring buffer descriptors. */ 244 /* allocate the tx and rx ring buffer descriptors. */
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO); 248 &lp->tx_bd_p, GFP_KERNEL);
249 if (!lp->tx_bd_v) 249 if (!lp->tx_bd_v)
250 goto out; 250 goto out;
251 251
252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
253 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
254 &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO); 254 &lp->rx_bd_p, GFP_KERNEL);
255 if (!lp->rx_bd_v) 255 if (!lp->rx_bd_v)
256 goto out; 256 goto out;
257 257
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fb7d1c28a2ea..b2ff038d6d20 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -201,17 +201,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
201 /* 201 /*
202 * Allocate the Tx and Rx buffer descriptors. 202 * Allocate the Tx and Rx buffer descriptors.
203 */ 203 */
204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 204 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206 &lp->tx_bd_p, 206 &lp->tx_bd_p, GFP_KERNEL);
207 GFP_KERNEL | __GFP_ZERO);
208 if (!lp->tx_bd_v) 207 if (!lp->tx_bd_v)
209 goto out; 208 goto out;
210 209
211 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 210 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
212 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 211 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
213 &lp->rx_bd_p, 212 &lp->rx_bd_p, GFP_KERNEL);
214 GFP_KERNEL | __GFP_ZERO);
215 if (!lp->rx_bd_v) 213 if (!lp->rx_bd_v)
216 goto out; 214 goto out;
217 215
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fd4dbdae5331..4c619ea5189f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1230,8 +1230,7 @@ error:
1230 */ 1230 */
1231static int xemaclite_of_remove(struct platform_device *of_dev) 1231static int xemaclite_of_remove(struct platform_device *of_dev)
1232{ 1232{
1233 struct device *dev = &of_dev->dev; 1233 struct net_device *ndev = platform_get_drvdata(of_dev);
1234 struct net_device *ndev = dev_get_drvdata(dev);
1235 1234
1236 struct net_local *lp = netdev_priv(ndev); 1235 struct net_local *lp = netdev_priv(ndev);
1237 1236
@@ -1250,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
1250 lp->phy_node = NULL; 1249 lp->phy_node = NULL;
1251 1250
1252 xemaclite_remove_ndev(ndev, of_dev); 1251 xemaclite_remove_ndev(ndev, of_dev);
1253 dev_set_drvdata(dev, NULL);
1254 1252
1255 return 0; 1253 return 0;
1256} 1254}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3d689fcb7917..e78802e75ea6 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1384,7 +1384,7 @@ static int eth_init_one(struct platform_device *pdev)
1384{ 1384{
1385 struct port *port; 1385 struct port *port;
1386 struct net_device *dev; 1386 struct net_device *dev;
1387 struct eth_plat_info *plat = pdev->dev.platform_data; 1387 struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
1388 u32 regs_phys; 1388 u32 regs_phys;
1389 char phy_id[MII_BUS_ID_SIZE + 3]; 1389 char phy_id[MII_BUS_ID_SIZE + 3];
1390 int err; 1390 int err;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4c8ddc944d51..0b40e1c46f07 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1068,9 +1068,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
1068#endif 1068#endif
1069 sizeof(PI_CONSUMER_BLOCK) + 1069 sizeof(PI_CONSUMER_BLOCK) +
1070 (PI_ALIGN_K_DESC_BLK - 1); 1070 (PI_ALIGN_K_DESC_BLK - 1);
1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, 1071 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
1072 &bp->kmalloced_dma, 1072 &bp->kmalloced_dma,
1073 GFP_ATOMIC | __GFP_ZERO); 1073 GFP_ATOMIC);
1074 if (top_v == NULL) 1074 if (top_v == NULL)
1075 return DFX_K_FAILURE; 1075 return DFX_K_FAILURE;
1076 1076
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 3adb43ce138f..7bbd318bc93e 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -351,16 +351,16 @@ static int ali_ircc_open(int i, chipio_t *info)
351 351
352 /* Allocate memory if needed */ 352 /* Allocate memory if needed */
353 self->rx_buff.head = 353 self->rx_buff.head =
354 dma_alloc_coherent(NULL, self->rx_buff.truesize, 354 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
355 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); 355 &self->rx_buff_dma, GFP_KERNEL);
356 if (self->rx_buff.head == NULL) { 356 if (self->rx_buff.head == NULL) {
357 err = -ENOMEM; 357 err = -ENOMEM;
358 goto err_out2; 358 goto err_out2;
359 } 359 }
360 360
361 self->tx_buff.head = 361 self->tx_buff.head =
362 dma_alloc_coherent(NULL, self->tx_buff.truesize, 362 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
363 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); 363 &self->tx_buff_dma, GFP_KERNEL);
364 if (self->tx_buff.head == NULL) { 364 if (self->tx_buff.head == NULL) {
365 err = -ENOMEM; 365 err = -ENOMEM;
366 goto err_out3; 366 goto err_out3;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 9cf836b57c49..ceeb53737f86 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -430,8 +430,8 @@ static int __init nsc_ircc_open(chipio_t *info)
430 430
431 /* Allocate memory if needed */ 431 /* Allocate memory if needed */
432 self->rx_buff.head = 432 self->rx_buff.head =
433 dma_alloc_coherent(NULL, self->rx_buff.truesize, 433 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
434 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); 434 &self->rx_buff_dma, GFP_KERNEL);
435 if (self->rx_buff.head == NULL) { 435 if (self->rx_buff.head == NULL) {
436 err = -ENOMEM; 436 err = -ENOMEM;
437 goto out2; 437 goto out2;
@@ -439,8 +439,8 @@ static int __init nsc_ircc_open(chipio_t *info)
439 } 439 }
440 440
441 self->tx_buff.head = 441 self->tx_buff.head =
442 dma_alloc_coherent(NULL, self->tx_buff.truesize, 442 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
443 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); 443 &self->tx_buff_dma, GFP_KERNEL);
444 if (self->tx_buff.head == NULL) { 444 if (self->tx_buff.head == NULL) {
445 err = -ENOMEM; 445 err = -ENOMEM;
446 goto out3; 446 goto out3;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 964b116a0ab7..3eeaaf800494 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -915,7 +915,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
915 err = register_netdev(dev); 915 err = register_netdev(dev);
916 916
917 if (err == 0) 917 if (err == 0)
918 dev_set_drvdata(&pdev->dev, dev); 918 platform_set_drvdata(pdev, dev);
919 919
920 if (err) { 920 if (err) {
921 if (si->pdata->shutdown) 921 if (si->pdata->shutdown)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index aa05dad75335..0dcdf1592f6b 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -562,14 +562,14 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
562 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE; 562 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
563 563
564 self->rx_buff.head = 564 self->rx_buff.head =
565 dma_alloc_coherent(NULL, self->rx_buff.truesize, 565 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
566 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); 566 &self->rx_buff_dma, GFP_KERNEL);
567 if (self->rx_buff.head == NULL) 567 if (self->rx_buff.head == NULL)
568 goto err_out2; 568 goto err_out2;
569 569
570 self->tx_buff.head = 570 self->tx_buff.head =
571 dma_alloc_coherent(NULL, self->tx_buff.truesize, 571 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
572 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); 572 &self->tx_buff_dma, GFP_KERNEL);
573 if (self->tx_buff.head == NULL) 573 if (self->tx_buff.head == NULL)
574 goto err_out3; 574 goto err_out3;
575 575
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2dcc60fb37f1..9abaec27f962 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -361,16 +361,16 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
361 361
362 /* Allocate memory if needed */ 362 /* Allocate memory if needed */
363 self->rx_buff.head = 363 self->rx_buff.head =
364 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize, 364 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
365 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); 365 &self->rx_buff_dma, GFP_KERNEL);
366 if (self->rx_buff.head == NULL) { 366 if (self->rx_buff.head == NULL) {
367 err = -ENOMEM; 367 err = -ENOMEM;
368 goto err_out2; 368 goto err_out2;
369 } 369 }
370 370
371 self->tx_buff.head = 371 self->tx_buff.head =
372 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize, 372 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
373 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); 373 &self->tx_buff_dma, GFP_KERNEL);
374 if (self->tx_buff.head == NULL) { 374 if (self->tx_buff.head == NULL) {
375 err = -ENOMEM; 375 err = -ENOMEM;
376 goto err_out3; 376 goto err_out3;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index bb8857a158a6..e641bb240362 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -215,16 +215,16 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
215 215
216 /* Allocate memory if needed */ 216 /* Allocate memory if needed */
217 self->rx_buff.head = 217 self->rx_buff.head =
218 dma_alloc_coherent(NULL, self->rx_buff.truesize, 218 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
219 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); 219 &self->rx_buff_dma, GFP_KERNEL);
220 if (self->rx_buff.head == NULL) { 220 if (self->rx_buff.head == NULL) {
221 err = -ENOMEM; 221 err = -ENOMEM;
222 goto err_out1; 222 goto err_out1;
223 } 223 }
224 224
225 self->tx_buff.head = 225 self->tx_buff.head =
226 dma_alloc_coherent(NULL, self->tx_buff.truesize, 226 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
227 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); 227 &self->tx_buff_dma, GFP_KERNEL);
228 if (self->tx_buff.head == NULL) { 228 if (self->tx_buff.head == NULL) {
229 err = -ENOMEM; 229 err = -ENOMEM;
230 goto err_out2; 230 goto err_out2;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 16b43bf544b7..64dfaa303dcc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -600,6 +600,9 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
600 if (!vlan->port->passthru) 600 if (!vlan->port->passthru)
601 return -EOPNOTSUPP; 601 return -EOPNOTSUPP;
602 602
603 if (flags & NLM_F_REPLACE)
604 return -EOPNOTSUPP;
605
603 if (is_unicast_ether_addr(addr)) 606 if (is_unicast_ether_addr(addr))
604 err = dev_uc_add_excl(dev, addr); 607 err = dev_uc_add_excl(dev, addr);
605 else if (is_multicast_ether_addr(addr)) 608 else if (is_multicast_ether_addr(addr))
@@ -683,7 +686,7 @@ void macvlan_common_setup(struct net_device *dev)
683 dev->priv_flags |= IFF_UNICAST_FLT; 686 dev->priv_flags |= IFF_UNICAST_FLT;
684 dev->netdev_ops = &macvlan_netdev_ops; 687 dev->netdev_ops = &macvlan_netdev_ops;
685 dev->destructor = free_netdev; 688 dev->destructor = free_netdev;
686 dev->header_ops = &macvlan_hard_header_ops, 689 dev->header_ops = &macvlan_hard_header_ops;
687 dev->ethtool_ops = &macvlan_ethtool_ops; 690 dev->ethtool_ops = &macvlan_ethtool_ops;
688} 691}
689EXPORT_SYMBOL_GPL(macvlan_common_setup); 692EXPORT_SYMBOL_GPL(macvlan_common_setup);
@@ -820,7 +823,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
820 if (port->count) 823 if (port->count)
821 return -EINVAL; 824 return -EINVAL;
822 port->passthru = true; 825 port->passthru = true;
823 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); 826 eth_hw_addr_inherit(dev, lowerdev);
824 } 827 }
825 828
826 err = netdev_upper_dev_link(lowerdev, dev); 829 err = netdev_upper_dev_link(lowerdev, dev);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ea53abb20988..9dccb1edfd2a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -529,7 +529,7 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
529 linear = len; 529 linear = len;
530 530
531 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 531 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
532 err); 532 err, 0);
533 if (!skb) 533 if (!skb)
534 return NULL; 534 return NULL;
535 535
@@ -541,86 +541,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
541 return skb; 541 return skb;
542} 542}
543 543
544/* set skb frags from iovec, this can move to core network code for reuse */
545static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
546 int offset, size_t count)
547{
548 int len = iov_length(from, count) - offset;
549 int copy = skb_headlen(skb);
550 int size, offset1 = 0;
551 int i = 0;
552
553 /* Skip over from offset */
554 while (count && (offset >= from->iov_len)) {
555 offset -= from->iov_len;
556 ++from;
557 --count;
558 }
559
560 /* copy up to skb headlen */
561 while (count && (copy > 0)) {
562 size = min_t(unsigned int, copy, from->iov_len - offset);
563 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
564 size))
565 return -EFAULT;
566 if (copy > size) {
567 ++from;
568 --count;
569 offset = 0;
570 } else
571 offset += size;
572 copy -= size;
573 offset1 += size;
574 }
575
576 if (len == offset1)
577 return 0;
578
579 while (count--) {
580 struct page *page[MAX_SKB_FRAGS];
581 int num_pages;
582 unsigned long base;
583 unsigned long truesize;
584
585 len = from->iov_len - offset;
586 if (!len) {
587 offset = 0;
588 ++from;
589 continue;
590 }
591 base = (unsigned long)from->iov_base + offset;
592 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
593 if (i + size > MAX_SKB_FRAGS)
594 return -EMSGSIZE;
595 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
596 if (num_pages != size) {
597 int j;
598
599 for (j = 0; j < num_pages; j++)
600 put_page(page[i + j]);
601 return -EFAULT;
602 }
603 truesize = size * PAGE_SIZE;
604 skb->data_len += len;
605 skb->len += len;
606 skb->truesize += truesize;
607 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
608 while (len) {
609 int off = base & ~PAGE_MASK;
610 int size = min_t(int, len, PAGE_SIZE - off);
611 __skb_fill_page_desc(skb, i, page[i], off, size);
612 skb_shinfo(skb)->nr_frags++;
613 /* increase sk_wmem_alloc */
614 base += size;
615 len -= size;
616 i++;
617 }
618 offset = 0;
619 ++from;
620 }
621 return 0;
622}
623
624/* 544/*
625 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should 545 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
626 * be shared with the tun/tap driver. 546 * be shared with the tun/tap driver.
@@ -703,29 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
703 return 0; 623 return 0;
704} 624}
705 625
706static unsigned long iov_pages(const struct iovec *iv, int offset,
707 unsigned long nr_segs)
708{
709 unsigned long seg, base;
710 int pages = 0, len, size;
711
712 while (nr_segs && (offset >= iv->iov_len)) {
713 offset -= iv->iov_len;
714 ++iv;
715 --nr_segs;
716 }
717
718 for (seg = 0; seg < nr_segs; seg++) {
719 base = (unsigned long)iv[seg].iov_base + offset;
720 len = iv[seg].iov_len - offset;
721 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
722 pages += size;
723 offset = 0;
724 }
725
726 return pages;
727}
728
729/* Get packet from user space buffer */ 626/* Get packet from user space buffer */
730static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 627static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
731 const struct iovec *iv, unsigned long total_len, 628 const struct iovec *iv, unsigned long total_len,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4822aafe638b..dcb21347c670 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -102,6 +102,7 @@ struct netconsole_target {
102 struct config_item item; 102 struct config_item item;
103#endif 103#endif
104 int enabled; 104 int enabled;
105 struct mutex mutex;
105 struct netpoll np; 106 struct netpoll np;
106}; 107};
107 108
@@ -181,6 +182,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
181 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); 182 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
182 nt->np.local_port = 6665; 183 nt->np.local_port = 6665;
183 nt->np.remote_port = 6666; 184 nt->np.remote_port = 6666;
185 mutex_init(&nt->mutex);
184 memset(nt->np.remote_mac, 0xff, ETH_ALEN); 186 memset(nt->np.remote_mac, 0xff, ETH_ALEN);
185 187
186 /* Parse parameters and setup netpoll */ 188 /* Parse parameters and setup netpoll */
@@ -322,6 +324,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
322 return -EINVAL; 324 return -EINVAL;
323 } 325 }
324 326
327 mutex_lock(&nt->mutex);
325 if (enabled) { /* 1 */ 328 if (enabled) { /* 1 */
326 329
327 /* 330 /*
@@ -331,8 +334,10 @@ static ssize_t store_enabled(struct netconsole_target *nt,
331 netpoll_print_options(&nt->np); 334 netpoll_print_options(&nt->np);
332 335
333 err = netpoll_setup(&nt->np); 336 err = netpoll_setup(&nt->np);
334 if (err) 337 if (err) {
338 mutex_unlock(&nt->mutex);
335 return err; 339 return err;
340 }
336 341
337 printk(KERN_INFO "netconsole: network logging started\n"); 342 printk(KERN_INFO "netconsole: network logging started\n");
338 343
@@ -341,6 +346,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
341 } 346 }
342 347
343 nt->enabled = enabled; 348 nt->enabled = enabled;
349 mutex_unlock(&nt->mutex);
344 350
345 return strnlen(buf, count); 351 return strnlen(buf, count);
346} 352}
@@ -597,6 +603,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
597 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); 603 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
598 nt->np.local_port = 6665; 604 nt->np.local_port = 6665;
599 nt->np.remote_port = 6666; 605 nt->np.remote_port = 6666;
606 mutex_init(&nt->mutex);
600 memset(nt->np.remote_mac, 0xff, ETH_ALEN); 607 memset(nt->np.remote_mac, 0xff, ETH_ALEN);
601 608
602 /* Initialize the config_item member */ 609 /* Initialize the config_item member */
@@ -682,7 +689,11 @@ restart:
682 * we might sleep in __netpoll_cleanup() 689 * we might sleep in __netpoll_cleanup()
683 */ 690 */
684 spin_unlock_irqrestore(&target_list_lock, flags); 691 spin_unlock_irqrestore(&target_list_lock, flags);
692
693 mutex_lock(&nt->mutex);
685 __netpoll_cleanup(&nt->np); 694 __netpoll_cleanup(&nt->np);
695 mutex_unlock(&nt->mutex);
696
686 spin_lock_irqsave(&target_list_lock, flags); 697 spin_lock_irqsave(&target_list_lock, flags);
687 dev_put(nt->np.dev); 698 dev_put(nt->np.dev);
688 nt->np.dev = NULL; 699 nt->np.dev = NULL;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index a47f9236d966..8004acbef2c9 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -191,7 +191,7 @@ static int mdio_gpio_probe(struct platform_device *pdev)
191 pdata = mdio_gpio_of_get_data(pdev); 191 pdata = mdio_gpio_of_get_data(pdev);
192 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); 192 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
193 } else { 193 } else {
194 pdata = pdev->dev.platform_data; 194 pdata = dev_get_platdata(&pdev->dev);
195 bus_id = pdev->id; 195 bus_id = pdev->id;
196 } 196 }
197 197
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e91d7d736ae2..d2dd9e473e2c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -106,7 +106,7 @@ err:
106 106
107static int mdio_mux_gpio_remove(struct platform_device *pdev) 107static int mdio_mux_gpio_remove(struct platform_device *pdev)
108{ 108{
109 struct mdio_mux_gpio_state *s = pdev->dev.platform_data; 109 struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
110 mdio_mux_uninit(s->mux_handle); 110 mdio_mux_uninit(s->mux_handle);
111 return 0; 111 return 0;
112} 112}
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 9733bd239a86..f8e305d8da76 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -48,7 +48,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
48 struct mdio_mux_mmioreg_state *s = data; 48 struct mdio_mux_mmioreg_state *s = data;
49 49
50 if (current_child ^ desired_child) { 50 if (current_child ^ desired_child) {
51 void *p = ioremap(s->phys, 1); 51 void __iomem *p = ioremap(s->phys, 1);
52 uint8_t x, y; 52 uint8_t x, y;
53 53
54 if (!p) 54 if (!p)
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index b51fa1f469b0..6aee02ed97ac 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -222,7 +222,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
222 bus->mii_bus->read = octeon_mdiobus_read; 222 bus->mii_bus->read = octeon_mdiobus_read;
223 bus->mii_bus->write = octeon_mdiobus_write; 223 bus->mii_bus->write = octeon_mdiobus_write;
224 224
225 dev_set_drvdata(&pdev->dev, bus); 225 platform_set_drvdata(pdev, bus);
226 226
227 err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node); 227 err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node);
228 if (err) 228 if (err)
@@ -244,7 +244,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
244 struct octeon_mdiobus *bus; 244 struct octeon_mdiobus *bus;
245 union cvmx_smix_en smi_en; 245 union cvmx_smix_en smi_en;
246 246
247 bus = dev_get_drvdata(&pdev->dev); 247 bus = platform_get_drvdata(pdev);
248 248
249 mdiobus_unregister(bus->mii_bus); 249 mdiobus_unregister(bus->mii_bus);
250 mdiobus_free(bus->mii_bus); 250 mdiobus_free(bus->mii_bus);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 7f25e49ae37f..18969b3ad8bb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -101,6 +101,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
101 struct device_node *np = pdev->dev.of_node; 101 struct device_node *np = pdev->dev.of_node;
102 struct mii_bus *bus; 102 struct mii_bus *bus;
103 struct sun4i_mdio_data *data; 103 struct sun4i_mdio_data *data;
104 struct resource *res;
104 int ret, i; 105 int ret, i;
105 106
106 bus = mdiobus_alloc_size(sizeof(*data)); 107 bus = mdiobus_alloc_size(sizeof(*data));
@@ -114,7 +115,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
114 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); 115 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
115 bus->parent = &pdev->dev; 116 bus->parent = &pdev->dev;
116 117
117 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 118 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
119 GFP_KERNEL);
118 if (!bus->irq) { 120 if (!bus->irq) {
119 ret = -ENOMEM; 121 ret = -ENOMEM;
120 goto err_out_free_mdiobus; 122 goto err_out_free_mdiobus;
@@ -124,10 +126,11 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
124 bus->irq[i] = PHY_POLL; 126 bus->irq[i] = PHY_POLL;
125 127
126 data = bus->priv; 128 data = bus->priv;
127 data->membase = of_iomap(np, 0); 129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
128 if (!data->membase) { 130 data->membase = devm_ioremap_resource(&pdev->dev, res);
129 ret = -ENOMEM; 131 if (IS_ERR(data->membase)) {
130 goto err_out_free_mdio_irq; 132 ret = PTR_ERR(data->membase);
133 goto err_out_free_mdiobus;
131 } 134 }
132 135
133 data->regulator = devm_regulator_get(&pdev->dev, "phy"); 136 data->regulator = devm_regulator_get(&pdev->dev, "phy");
@@ -139,7 +142,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
139 } else { 142 } else {
140 ret = regulator_enable(data->regulator); 143 ret = regulator_enable(data->regulator);
141 if (ret) 144 if (ret)
142 goto err_out_free_mdio_irq; 145 goto err_out_free_mdiobus;
143 } 146 }
144 147
145 ret = of_mdiobus_register(bus, np); 148 ret = of_mdiobus_register(bus, np);
@@ -152,8 +155,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
152 155
153err_out_disable_regulator: 156err_out_disable_regulator:
154 regulator_disable(data->regulator); 157 regulator_disable(data->regulator);
155err_out_free_mdio_irq:
156 kfree(bus->irq);
157err_out_free_mdiobus: 158err_out_free_mdiobus:
158 mdiobus_free(bus); 159 mdiobus_free(bus);
159 return ret; 160 return ret;
@@ -164,7 +165,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
164 struct mii_bus *bus = platform_get_drvdata(pdev); 165 struct mii_bus *bus = platform_get_drvdata(pdev);
165 166
166 mdiobus_unregister(bus); 167 mdiobus_unregister(bus);
167 kfree(bus->irq);
168 mdiobus_free(bus); 168 mdiobus_free(bus);
169 169
170 return 0; 170 return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2510435f34ed..c31aad0004cb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/phy.h> 26#include <linux/phy.h>
27#include <linux/micrel_phy.h> 27#include <linux/micrel_phy.h>
28#include <linux/of.h>
28 29
29/* Operation Mode Strap Override */ 30/* Operation Mode Strap Override */
30#define MII_KSZPHY_OMSO 0x16 31#define MII_KSZPHY_OMSO 0x16
@@ -53,6 +54,20 @@
53#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 54#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
54#define KSZ8051_RMII_50MHZ_CLK (1 << 7) 55#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
55 56
57/* Write/read to/from extended registers */
58#define MII_KSZPHY_EXTREG 0x0b
59#define KSZPHY_EXTREG_WRITE 0x8000
60
61#define MII_KSZPHY_EXTREG_WRITE 0x0c
62#define MII_KSZPHY_EXTREG_READ 0x0d
63
64/* Extended registers */
65#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
66#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
67#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
68
69#define PS_TO_REG 200
70
56static int ksz_config_flags(struct phy_device *phydev) 71static int ksz_config_flags(struct phy_device *phydev)
57{ 72{
58 int regval; 73 int regval;
@@ -65,6 +80,20 @@ static int ksz_config_flags(struct phy_device *phydev)
65 return 0; 80 return 0;
66} 81}
67 82
83static int kszphy_extended_write(struct phy_device *phydev,
84 u32 regnum, u16 val)
85{
86 phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
87 return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
88}
89
90static int kszphy_extended_read(struct phy_device *phydev,
91 u32 regnum)
92{
93 phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
94 return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
95}
96
68static int kszphy_ack_interrupt(struct phy_device *phydev) 97static int kszphy_ack_interrupt(struct phy_device *phydev)
69{ 98{
70 /* bit[7..0] int status, which is a read and clear register. */ 99 /* bit[7..0] int status, which is a read and clear register. */
@@ -141,10 +170,82 @@ static int ks8051_config_init(struct phy_device *phydev)
141 return rc < 0 ? rc : 0; 170 return rc < 0 ? rc : 0;
142} 171}
143 172
173static int ksz9021_load_values_from_of(struct phy_device *phydev,
174 struct device_node *of_node, u16 reg,
175 char *field1, char *field2,
176 char *field3, char *field4)
177{
178 int val1 = -1;
179 int val2 = -2;
180 int val3 = -3;
181 int val4 = -4;
182 int newval;
183 int matches = 0;
184
185 if (!of_property_read_u32(of_node, field1, &val1))
186 matches++;
187
188 if (!of_property_read_u32(of_node, field2, &val2))
189 matches++;
190
191 if (!of_property_read_u32(of_node, field3, &val3))
192 matches++;
193
194 if (!of_property_read_u32(of_node, field4, &val4))
195 matches++;
196
197 if (!matches)
198 return 0;
199
200 if (matches < 4)
201 newval = kszphy_extended_read(phydev, reg);
202 else
203 newval = 0;
204
205 if (val1 != -1)
206 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
207
208 if (val2 != -1)
209 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
210
211 if (val3 != -1)
212 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
213
214 if (val4 != -1)
215 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
216
217 return kszphy_extended_write(phydev, reg, newval);
218}
219
220static int ksz9021_config_init(struct phy_device *phydev)
221{
222 struct device *dev = &phydev->dev;
223 struct device_node *of_node = dev->of_node;
224
225 if (!of_node && dev->parent->of_node)
226 of_node = dev->parent->of_node;
227
228 if (of_node) {
229 ksz9021_load_values_from_of(phydev, of_node,
230 MII_KSZPHY_CLK_CONTROL_PAD_SKEW,
231 "txen-skew-ps", "txc-skew-ps",
232 "rxdv-skew-ps", "rxc-skew-ps");
233 ksz9021_load_values_from_of(phydev, of_node,
234 MII_KSZPHY_RX_DATA_PAD_SKEW,
235 "rxd0-skew-ps", "rxd1-skew-ps",
236 "rxd2-skew-ps", "rxd3-skew-ps");
237 ksz9021_load_values_from_of(phydev, of_node,
238 MII_KSZPHY_TX_DATA_PAD_SKEW,
239 "txd0-skew-ps", "txd1-skew-ps",
240 "txd2-skew-ps", "txd3-skew-ps");
241 }
242 return 0;
243}
244
144#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 245#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
145#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) 246#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
146#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) 247#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
147int ksz8873mll_read_status(struct phy_device *phydev) 248static int ksz8873mll_read_status(struct phy_device *phydev)
148{ 249{
149 int regval; 250 int regval;
150 251
@@ -281,7 +382,7 @@ static struct phy_driver ksphy_driver[] = {
281 .name = "Micrel KSZ9021 Gigabit PHY", 382 .name = "Micrel KSZ9021 Gigabit PHY",
282 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), 383 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
283 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 384 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
284 .config_init = kszphy_config_init, 385 .config_init = ksz9021_config_init,
285 .config_aneg = genphy_config_aneg, 386 .config_aneg = genphy_config_aneg,
286 .read_status = genphy_read_status, 387 .read_status = genphy_read_status,
287 .ack_interrupt = kszphy_ack_interrupt, 388 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 162464fe86bf..6fa5ae00039f 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -47,7 +47,7 @@
47#define MAX_CALLID 65535 47#define MAX_CALLID 65535
48 48
49static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1); 49static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
50static struct pppox_sock **callid_sock; 50static struct pppox_sock __rcu **callid_sock;
51 51
52static DEFINE_SPINLOCK(chan_lock); 52static DEFINE_SPINLOCK(chan_lock);
53 53
@@ -83,11 +83,11 @@ static const struct proto_ops pptp_ops;
83struct pptp_gre_header { 83struct pptp_gre_header {
84 u8 flags; 84 u8 flags;
85 u8 ver; 85 u8 ver;
86 u16 protocol; 86 __be16 protocol;
87 u16 payload_len; 87 __be16 payload_len;
88 u16 call_id; 88 __be16 call_id;
89 u32 seq; 89 __be32 seq;
90 u32 ack; 90 __be32 ack;
91} __packed; 91} __packed;
92 92
93static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) 93static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bff7e0b0b4e7..50e43e64d51d 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
622} 622}
623 623
624 624
625/*********************
626 * Peers notification
627 *********************/
628
629static void team_notify_peers_work(struct work_struct *work)
630{
631 struct team *team;
632
633 team = container_of(work, struct team, notify_peers.dw.work);
634
635 if (!rtnl_trylock()) {
636 schedule_delayed_work(&team->notify_peers.dw, 0);
637 return;
638 }
639 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
640 rtnl_unlock();
641 if (!atomic_dec_and_test(&team->notify_peers.count_pending))
642 schedule_delayed_work(&team->notify_peers.dw,
643 msecs_to_jiffies(team->notify_peers.interval));
644}
645
646static void team_notify_peers(struct team *team)
647{
648 if (!team->notify_peers.count || !netif_running(team->dev))
649 return;
650 atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
651 schedule_delayed_work(&team->notify_peers.dw, 0);
652}
653
654static void team_notify_peers_init(struct team *team)
655{
656 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
657}
658
659static void team_notify_peers_fini(struct team *team)
660{
661 cancel_delayed_work_sync(&team->notify_peers.dw);
662}
663
664
665/*******************************
666 * Send multicast group rejoins
667 *******************************/
668
669static void team_mcast_rejoin_work(struct work_struct *work)
670{
671 struct team *team;
672
673 team = container_of(work, struct team, mcast_rejoin.dw.work);
674
675 if (!rtnl_trylock()) {
676 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
677 return;
678 }
679 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
680 rtnl_unlock();
681 if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
682 schedule_delayed_work(&team->mcast_rejoin.dw,
683 msecs_to_jiffies(team->mcast_rejoin.interval));
684}
685
686static void team_mcast_rejoin(struct team *team)
687{
688 if (!team->mcast_rejoin.count || !netif_running(team->dev))
689 return;
690 atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
691 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
692}
693
694static void team_mcast_rejoin_init(struct team *team)
695{
696 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
697}
698
699static void team_mcast_rejoin_fini(struct team *team)
700{
701 cancel_delayed_work_sync(&team->mcast_rejoin.dw);
702}
703
704
625/************************ 705/************************
626 * Rx path frame handler 706 * Rx path frame handler
627 ************************/ 707 ************************/
@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
846 team_queue_override_port_add(team, port); 926 team_queue_override_port_add(team, port);
847 if (team->ops.port_enabled) 927 if (team->ops.port_enabled)
848 team->ops.port_enabled(team, port); 928 team->ops.port_enabled(team, port);
929 team_notify_peers(team);
930 team_mcast_rejoin(team);
849} 931}
850 932
851static void __reconstruct_port_hlist(struct team *team, int rm_index) 933static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
875 team->en_port_count--; 957 team->en_port_count--;
876 team_queue_override_port_del(team, port); 958 team_queue_override_port_del(team, port);
877 team_adjust_ops(team); 959 team_adjust_ops(team);
960 team_notify_peers(team);
961 team_mcast_rejoin(team);
878} 962}
879 963
880#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 964#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -953,6 +1037,9 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
953 struct netpoll *np; 1037 struct netpoll *np;
954 int err; 1038 int err;
955 1039
1040 if (!team->dev->npinfo)
1041 return 0;
1042
956 np = kzalloc(sizeof(*np), gfp); 1043 np = kzalloc(sizeof(*np), gfp);
957 if (!np) 1044 if (!np)
958 return -ENOMEM; 1045 return -ENOMEM;
@@ -979,12 +1066,6 @@ static void team_port_disable_netpoll(struct team_port *port)
979 __netpoll_cleanup(np); 1066 __netpoll_cleanup(np);
980 kfree(np); 1067 kfree(np);
981} 1068}
982
983static struct netpoll_info *team_netpoll_info(struct team *team)
984{
985 return team->dev->npinfo;
986}
987
988#else 1069#else
989static int team_port_enable_netpoll(struct team *team, struct team_port *port, 1070static int team_port_enable_netpoll(struct team *team, struct team_port *port,
990 gfp_t gfp) 1071 gfp_t gfp)
@@ -994,10 +1075,6 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
994static void team_port_disable_netpoll(struct team_port *port) 1075static void team_port_disable_netpoll(struct team_port *port)
995{ 1076{
996} 1077}
997static struct netpoll_info *team_netpoll_info(struct team *team)
998{
999 return NULL;
1000}
1001#endif 1078#endif
1002 1079
1003static void __team_port_change_port_added(struct team_port *port, bool linkup); 1080static void __team_port_change_port_added(struct team_port *port, bool linkup);
@@ -1079,13 +1156,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1079 goto err_vids_add; 1156 goto err_vids_add;
1080 } 1157 }
1081 1158
1082 if (team_netpoll_info(team)) { 1159 err = team_port_enable_netpoll(team, port, GFP_KERNEL);
1083 err = team_port_enable_netpoll(team, port, GFP_KERNEL); 1160 if (err) {
1084 if (err) { 1161 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1085 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1162 portname);
1086 portname); 1163 goto err_enable_netpoll;
1087 goto err_enable_netpoll;
1088 }
1089 } 1164 }
1090 1165
1091 err = netdev_master_upper_dev_link(port_dev, dev); 1166 err = netdev_master_upper_dev_link(port_dev, dev);
@@ -1205,6 +1280,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1205 return team_change_mode(team, ctx->data.str_val); 1280 return team_change_mode(team, ctx->data.str_val);
1206} 1281}
1207 1282
1283static int team_notify_peers_count_get(struct team *team,
1284 struct team_gsetter_ctx *ctx)
1285{
1286 ctx->data.u32_val = team->notify_peers.count;
1287 return 0;
1288}
1289
1290static int team_notify_peers_count_set(struct team *team,
1291 struct team_gsetter_ctx *ctx)
1292{
1293 team->notify_peers.count = ctx->data.u32_val;
1294 return 0;
1295}
1296
1297static int team_notify_peers_interval_get(struct team *team,
1298 struct team_gsetter_ctx *ctx)
1299{
1300 ctx->data.u32_val = team->notify_peers.interval;
1301 return 0;
1302}
1303
1304static int team_notify_peers_interval_set(struct team *team,
1305 struct team_gsetter_ctx *ctx)
1306{
1307 team->notify_peers.interval = ctx->data.u32_val;
1308 return 0;
1309}
1310
1311static int team_mcast_rejoin_count_get(struct team *team,
1312 struct team_gsetter_ctx *ctx)
1313{
1314 ctx->data.u32_val = team->mcast_rejoin.count;
1315 return 0;
1316}
1317
1318static int team_mcast_rejoin_count_set(struct team *team,
1319 struct team_gsetter_ctx *ctx)
1320{
1321 team->mcast_rejoin.count = ctx->data.u32_val;
1322 return 0;
1323}
1324
1325static int team_mcast_rejoin_interval_get(struct team *team,
1326 struct team_gsetter_ctx *ctx)
1327{
1328 ctx->data.u32_val = team->mcast_rejoin.interval;
1329 return 0;
1330}
1331
1332static int team_mcast_rejoin_interval_set(struct team *team,
1333 struct team_gsetter_ctx *ctx)
1334{
1335 team->mcast_rejoin.interval = ctx->data.u32_val;
1336 return 0;
1337}
1338
1208static int team_port_en_option_get(struct team *team, 1339static int team_port_en_option_get(struct team *team,
1209 struct team_gsetter_ctx *ctx) 1340 struct team_gsetter_ctx *ctx)
1210{ 1341{
@@ -1317,6 +1448,30 @@ static const struct team_option team_options[] = {
1317 .setter = team_mode_option_set, 1448 .setter = team_mode_option_set,
1318 }, 1449 },
1319 { 1450 {
1451 .name = "notify_peers_count",
1452 .type = TEAM_OPTION_TYPE_U32,
1453 .getter = team_notify_peers_count_get,
1454 .setter = team_notify_peers_count_set,
1455 },
1456 {
1457 .name = "notify_peers_interval",
1458 .type = TEAM_OPTION_TYPE_U32,
1459 .getter = team_notify_peers_interval_get,
1460 .setter = team_notify_peers_interval_set,
1461 },
1462 {
1463 .name = "mcast_rejoin_count",
1464 .type = TEAM_OPTION_TYPE_U32,
1465 .getter = team_mcast_rejoin_count_get,
1466 .setter = team_mcast_rejoin_count_set,
1467 },
1468 {
1469 .name = "mcast_rejoin_interval",
1470 .type = TEAM_OPTION_TYPE_U32,
1471 .getter = team_mcast_rejoin_interval_get,
1472 .setter = team_mcast_rejoin_interval_set,
1473 },
1474 {
1320 .name = "enabled", 1475 .name = "enabled",
1321 .type = TEAM_OPTION_TYPE_BOOL, 1476 .type = TEAM_OPTION_TYPE_BOOL,
1322 .per_port = true, 1477 .per_port = true,
@@ -1396,6 +1551,10 @@ static int team_init(struct net_device *dev)
1396 1551
1397 INIT_LIST_HEAD(&team->option_list); 1552 INIT_LIST_HEAD(&team->option_list);
1398 INIT_LIST_HEAD(&team->option_inst_list); 1553 INIT_LIST_HEAD(&team->option_inst_list);
1554
1555 team_notify_peers_init(team);
1556 team_mcast_rejoin_init(team);
1557
1399 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1558 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1400 if (err) 1559 if (err)
1401 goto err_options_register; 1560 goto err_options_register;
@@ -1406,6 +1565,8 @@ static int team_init(struct net_device *dev)
1406 return 0; 1565 return 0;
1407 1566
1408err_options_register: 1567err_options_register:
1568 team_mcast_rejoin_fini(team);
1569 team_notify_peers_fini(team);
1409 team_queue_override_fini(team); 1570 team_queue_override_fini(team);
1410err_team_queue_override_init: 1571err_team_queue_override_init:
1411 free_percpu(team->pcpu_stats); 1572 free_percpu(team->pcpu_stats);
@@ -1425,6 +1586,8 @@ static void team_uninit(struct net_device *dev)
1425 1586
1426 __team_change_mode(team, NULL); /* cleanup */ 1587 __team_change_mode(team, NULL); /* cleanup */
1427 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1588 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1589 team_mcast_rejoin_fini(team);
1590 team_notify_peers_fini(team);
1428 team_queue_override_fini(team); 1591 team_queue_override_fini(team);
1429 mutex_unlock(&team->lock); 1592 mutex_unlock(&team->lock);
1430} 1593}
@@ -1811,7 +1974,7 @@ static void team_setup_by_port(struct net_device *dev,
1811 dev->addr_len = port_dev->addr_len; 1974 dev->addr_len = port_dev->addr_len;
1812 dev->mtu = port_dev->mtu; 1975 dev->mtu = port_dev->mtu;
1813 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); 1976 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
1814 memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); 1977 eth_hw_addr_inherit(dev, port_dev);
1815} 1978}
1816 1979
1817static int team_dev_type_check_change(struct net_device *dev, 1980static int team_dev_type_check_change(struct net_device *dev,
@@ -2698,6 +2861,10 @@ static int team_device_event(struct notifier_block *unused,
2698 case NETDEV_PRE_TYPE_CHANGE: 2861 case NETDEV_PRE_TYPE_CHANGE:
2699 /* Forbid to change type of underlaying device */ 2862 /* Forbid to change type of underlaying device */
2700 return NOTIFY_BAD; 2863 return NOTIFY_BAD;
2864 case NETDEV_RESEND_IGMP:
2865 /* Propagate to master device */
2866 call_netdevice_notifiers(event, port->team->dev);
2867 break;
2701 } 2868 }
2702 return NOTIFY_DONE; 2869 return NOTIFY_DONE;
2703} 2870}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 71af122edf2d..a639de8401f8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -60,6 +60,7 @@
60#include <linux/if_arp.h> 60#include <linux/if_arp.h>
61#include <linux/if_ether.h> 61#include <linux/if_ether.h>
62#include <linux/if_tun.h> 62#include <linux/if_tun.h>
63#include <linux/if_vlan.h>
63#include <linux/crc32.h> 64#include <linux/crc32.h>
64#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
65#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
@@ -137,7 +138,10 @@ struct tun_file {
137 struct fasync_struct *fasync; 138 struct fasync_struct *fasync;
138 /* only used for fasnyc */ 139 /* only used for fasnyc */
139 unsigned int flags; 140 unsigned int flags;
140 u16 queue_index; 141 union {
142 u16 queue_index;
143 unsigned int ifindex;
144 };
141 struct list_head next; 145 struct list_head next;
142 struct tun_struct *detached; 146 struct tun_struct *detached;
143}; 147};
@@ -405,6 +409,12 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
405 return tun; 409 return tun;
406} 410}
407 411
412static void tun_queue_purge(struct tun_file *tfile)
413{
414 skb_queue_purge(&tfile->sk.sk_receive_queue);
415 skb_queue_purge(&tfile->sk.sk_error_queue);
416}
417
408static void __tun_detach(struct tun_file *tfile, bool clean) 418static void __tun_detach(struct tun_file *tfile, bool clean)
409{ 419{
410 struct tun_file *ntfile; 420 struct tun_file *ntfile;
@@ -431,7 +441,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
431 synchronize_net(); 441 synchronize_net();
432 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 442 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
433 /* Drop read queue */ 443 /* Drop read queue */
434 skb_queue_purge(&tfile->sk.sk_receive_queue); 444 tun_queue_purge(tfile);
435 tun_set_real_num_queues(tun); 445 tun_set_real_num_queues(tun);
436 } else if (tfile->detached && clean) { 446 } else if (tfile->detached && clean) {
437 tun = tun_enable_queue(tfile); 447 tun = tun_enable_queue(tfile);
@@ -483,12 +493,12 @@ static void tun_detach_all(struct net_device *dev)
483 for (i = 0; i < n; i++) { 493 for (i = 0; i < n; i++) {
484 tfile = rtnl_dereference(tun->tfiles[i]); 494 tfile = rtnl_dereference(tun->tfiles[i]);
485 /* Drop read queue */ 495 /* Drop read queue */
486 skb_queue_purge(&tfile->sk.sk_receive_queue); 496 tun_queue_purge(tfile);
487 sock_put(&tfile->sk); 497 sock_put(&tfile->sk);
488 } 498 }
489 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 499 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
490 tun_enable_queue(tfile); 500 tun_enable_queue(tfile);
491 skb_queue_purge(&tfile->sk.sk_receive_queue); 501 tun_queue_purge(tfile);
492 sock_put(&tfile->sk); 502 sock_put(&tfile->sk);
493 } 503 }
494 BUG_ON(tun->numdisabled != 0); 504 BUG_ON(tun->numdisabled != 0);
@@ -497,7 +507,7 @@ static void tun_detach_all(struct net_device *dev)
497 module_put(THIS_MODULE); 507 module_put(THIS_MODULE);
498} 508}
499 509
500static int tun_attach(struct tun_struct *tun, struct file *file) 510static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
501{ 511{
502 struct tun_file *tfile = file->private_data; 512 struct tun_file *tfile = file->private_data;
503 int err; 513 int err;
@@ -522,7 +532,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
522 err = 0; 532 err = 0;
523 533
524 /* Re-attach the filter to presist device */ 534 /* Re-attach the filter to presist device */
525 if (tun->filter_attached == true) { 535 if (!skip_filter && (tun->filter_attached == true)) {
526 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 536 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
527 if (!err) 537 if (!err)
528 goto out; 538 goto out;
@@ -739,10 +749,17 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
739 >= dev->tx_queue_len / tun->numqueues) 749 >= dev->tx_queue_len / tun->numqueues)
740 goto drop; 750 goto drop;
741 751
742 /* Orphan the skb - required as we might hang on to it
743 * for indefinite time. */
744 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 752 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
745 goto drop; 753 goto drop;
754
755 if (skb->sk) {
756 sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
757 sw_tx_timestamp(skb);
758 }
759
760 /* Orphan the skb - required as we might hang on to it
761 * for indefinite time.
762 */
746 skb_orphan(skb); 763 skb_orphan(skb);
747 764
748 nf_reset(skb); 765 nf_reset(skb);
@@ -943,7 +960,7 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
943 linear = len; 960 linear = len;
944 961
945 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 962 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
946 &err); 963 &err, 0);
947 if (!skb) 964 if (!skb)
948 return ERR_PTR(err); 965 return ERR_PTR(err);
949 966
@@ -955,109 +972,6 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
955 return skb; 972 return skb;
956} 973}
957 974
958/* set skb frags from iovec, this can move to core network code for reuse */
959static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
960 int offset, size_t count)
961{
962 int len = iov_length(from, count) - offset;
963 int copy = skb_headlen(skb);
964 int size, offset1 = 0;
965 int i = 0;
966
967 /* Skip over from offset */
968 while (count && (offset >= from->iov_len)) {
969 offset -= from->iov_len;
970 ++from;
971 --count;
972 }
973
974 /* copy up to skb headlen */
975 while (count && (copy > 0)) {
976 size = min_t(unsigned int, copy, from->iov_len - offset);
977 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
978 size))
979 return -EFAULT;
980 if (copy > size) {
981 ++from;
982 --count;
983 offset = 0;
984 } else
985 offset += size;
986 copy -= size;
987 offset1 += size;
988 }
989
990 if (len == offset1)
991 return 0;
992
993 while (count--) {
994 struct page *page[MAX_SKB_FRAGS];
995 int num_pages;
996 unsigned long base;
997 unsigned long truesize;
998
999 len = from->iov_len - offset;
1000 if (!len) {
1001 offset = 0;
1002 ++from;
1003 continue;
1004 }
1005 base = (unsigned long)from->iov_base + offset;
1006 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1007 if (i + size > MAX_SKB_FRAGS)
1008 return -EMSGSIZE;
1009 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
1010 if (num_pages != size) {
1011 int j;
1012
1013 for (j = 0; j < num_pages; j++)
1014 put_page(page[i + j]);
1015 return -EFAULT;
1016 }
1017 truesize = size * PAGE_SIZE;
1018 skb->data_len += len;
1019 skb->len += len;
1020 skb->truesize += truesize;
1021 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
1022 while (len) {
1023 int off = base & ~PAGE_MASK;
1024 int size = min_t(int, len, PAGE_SIZE - off);
1025 __skb_fill_page_desc(skb, i, page[i], off, size);
1026 skb_shinfo(skb)->nr_frags++;
1027 /* increase sk_wmem_alloc */
1028 base += size;
1029 len -= size;
1030 i++;
1031 }
1032 offset = 0;
1033 ++from;
1034 }
1035 return 0;
1036}
1037
1038static unsigned long iov_pages(const struct iovec *iv, int offset,
1039 unsigned long nr_segs)
1040{
1041 unsigned long seg, base;
1042 int pages = 0, len, size;
1043
1044 while (nr_segs && (offset >= iv->iov_len)) {
1045 offset -= iv->iov_len;
1046 ++iv;
1047 --nr_segs;
1048 }
1049
1050 for (seg = 0; seg < nr_segs; seg++) {
1051 base = (unsigned long)iv[seg].iov_base + offset;
1052 len = iv[seg].iov_len - offset;
1053 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1054 pages += size;
1055 offset = 0;
1056 }
1057
1058 return pages;
1059}
1060
1061/* Get packet from user space buffer */ 975/* Get packet from user space buffer */
1062static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 976static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1063 void *msg_control, const struct iovec *iv, 977 void *msg_control, const struct iovec *iv,
@@ -1262,6 +1176,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1262{ 1176{
1263 struct tun_pi pi = { 0, skb->protocol }; 1177 struct tun_pi pi = { 0, skb->protocol };
1264 ssize_t total = 0; 1178 ssize_t total = 0;
1179 int vlan_offset = 0;
1265 1180
1266 if (!(tun->flags & TUN_NO_PI)) { 1181 if (!(tun->flags & TUN_NO_PI)) {
1267 if ((len -= sizeof(pi)) < 0) 1182 if ((len -= sizeof(pi)) < 0)
@@ -1325,11 +1240,40 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1325 total += tun->vnet_hdr_sz; 1240 total += tun->vnet_hdr_sz;
1326 } 1241 }
1327 1242
1328 len = min_t(int, skb->len, len); 1243 if (!vlan_tx_tag_present(skb)) {
1244 len = min_t(int, skb->len, len);
1245 } else {
1246 int copy, ret;
1247 struct {
1248 __be16 h_vlan_proto;
1249 __be16 h_vlan_TCI;
1250 } veth;
1251
1252 veth.h_vlan_proto = skb->vlan_proto;
1253 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
1254
1255 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1256 len = min_t(int, skb->len + VLAN_HLEN, len);
1257
1258 copy = min_t(int, vlan_offset, len);
1259 ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
1260 len -= copy;
1261 total += copy;
1262 if (ret || !len)
1263 goto done;
1264
1265 copy = min_t(int, sizeof(veth), len);
1266 ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
1267 len -= copy;
1268 total += copy;
1269 if (ret || !len)
1270 goto done;
1271 }
1329 1272
1330 skb_copy_datagram_const_iovec(skb, 0, iv, total, len); 1273 skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
1331 total += skb->len; 1274 total += len;
1332 1275
1276done:
1333 tun->dev->stats.tx_packets++; 1277 tun->dev->stats.tx_packets++;
1334 tun->dev->stats.tx_bytes += len; 1278 tun->dev->stats.tx_bytes += len;
1335 1279
@@ -1478,7 +1422,6 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1478 return ret; 1422 return ret;
1479} 1423}
1480 1424
1481
1482static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, 1425static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1483 struct msghdr *m, size_t total_len, 1426 struct msghdr *m, size_t total_len,
1484 int flags) 1427 int flags)
@@ -1490,10 +1433,15 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1490 if (!tun) 1433 if (!tun)
1491 return -EBADFD; 1434 return -EBADFD;
1492 1435
1493 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { 1436 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
1494 ret = -EINVAL; 1437 ret = -EINVAL;
1495 goto out; 1438 goto out;
1496 } 1439 }
1440 if (flags & MSG_ERRQUEUE) {
1441 ret = sock_recv_errqueue(sock->sk, m, total_len,
1442 SOL_PACKET, TUN_TX_TIMESTAMP);
1443 goto out;
1444 }
1497 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len, 1445 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
1498 flags & MSG_DONTWAIT); 1446 flags & MSG_DONTWAIT);
1499 if (ret > total_len) { 1447 if (ret > total_len) {
@@ -1617,7 +1565,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1617 if (err < 0) 1565 if (err < 0)
1618 return err; 1566 return err;
1619 1567
1620 err = tun_attach(tun, file); 1568 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1621 if (err < 0) 1569 if (err < 0)
1622 return err; 1570 return err;
1623 1571
@@ -1664,6 +1612,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1664 1612
1665 dev_net_set(dev, net); 1613 dev_net_set(dev, net);
1666 dev->rtnl_link_ops = &tun_link_ops; 1614 dev->rtnl_link_ops = &tun_link_ops;
1615 dev->ifindex = tfile->ifindex;
1667 1616
1668 tun = netdev_priv(dev); 1617 tun = netdev_priv(dev);
1669 tun->dev = dev; 1618 tun->dev = dev;
@@ -1684,12 +1633,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1684 tun_flow_init(tun); 1633 tun_flow_init(tun);
1685 1634
1686 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 1635 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1687 TUN_USER_FEATURES; 1636 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1637 NETIF_F_HW_VLAN_STAG_TX;
1688 dev->features = dev->hw_features; 1638 dev->features = dev->hw_features;
1689 dev->vlan_features = dev->features; 1639 dev->vlan_features = dev->features;
1690 1640
1691 INIT_LIST_HEAD(&tun->disabled); 1641 INIT_LIST_HEAD(&tun->disabled);
1692 err = tun_attach(tun, file); 1642 err = tun_attach(tun, file, false);
1693 if (err < 0) 1643 if (err < 0)
1694 goto err_free_dev; 1644 goto err_free_dev;
1695 1645
@@ -1853,7 +1803,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1853 ret = security_tun_dev_attach_queue(tun->security); 1803 ret = security_tun_dev_attach_queue(tun->security);
1854 if (ret < 0) 1804 if (ret < 0)
1855 goto unlock; 1805 goto unlock;
1856 ret = tun_attach(tun, file); 1806 ret = tun_attach(tun, file, false);
1857 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1807 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1858 tun = rtnl_dereference(tfile->tun); 1808 tun = rtnl_dereference(tfile->tun);
1859 if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) 1809 if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
@@ -1879,6 +1829,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1879 kgid_t group; 1829 kgid_t group;
1880 int sndbuf; 1830 int sndbuf;
1881 int vnet_hdr_sz; 1831 int vnet_hdr_sz;
1832 unsigned int ifindex;
1882 int ret; 1833 int ret;
1883 1834
1884 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { 1835 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@@ -1913,6 +1864,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1913 ret = -EFAULT; 1864 ret = -EFAULT;
1914 goto unlock; 1865 goto unlock;
1915 } 1866 }
1867 if (cmd == TUNSETIFINDEX) {
1868 ret = -EPERM;
1869 if (tun)
1870 goto unlock;
1871
1872 ret = -EFAULT;
1873 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
1874 goto unlock;
1875
1876 ret = 0;
1877 tfile->ifindex = ifindex;
1878 goto unlock;
1879 }
1916 1880
1917 ret = -EBADFD; 1881 ret = -EBADFD;
1918 if (!tun) 1882 if (!tun)
@@ -1925,6 +1889,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1925 case TUNGETIFF: 1889 case TUNGETIFF:
1926 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 1890 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1927 1891
1892 if (tfile->detached)
1893 ifr.ifr_flags |= IFF_DETACH_QUEUE;
1894 if (!tfile->socket.sk->sk_filter)
1895 ifr.ifr_flags |= IFF_NOFILTER;
1896
1928 if (copy_to_user(argp, &ifr, ifreq_len)) 1897 if (copy_to_user(argp, &ifr, ifreq_len))
1929 ret = -EFAULT; 1898 ret = -EFAULT;
1930 break; 1899 break;
@@ -2081,6 +2050,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2081 tun_detach_filter(tun, tun->numqueues); 2050 tun_detach_filter(tun, tun->numqueues);
2082 break; 2051 break;
2083 2052
2053 case TUNGETFILTER:
2054 ret = -EINVAL;
2055 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2056 break;
2057 ret = -EFAULT;
2058 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
2059 break;
2060 ret = 0;
2061 break;
2062
2084 default: 2063 default:
2085 ret = -EINVAL; 2064 ret = -EINVAL;
2086 break; 2065 break;
@@ -2161,6 +2140,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2161 rcu_assign_pointer(tfile->tun, NULL); 2140 rcu_assign_pointer(tfile->tun, NULL);
2162 tfile->net = get_net(current->nsproxy->net_ns); 2141 tfile->net = get_net(current->nsproxy->net_ns);
2163 tfile->flags = 0; 2142 tfile->flags = 0;
2143 tfile->ifindex = 0;
2164 2144
2165 rcu_assign_pointer(tfile->socket.wq, &tfile->wq); 2145 rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2166 init_waitqueue_head(&tfile->wq.wait); 2146 init_waitqueue_head(&tfile->wq.wait);
@@ -2276,6 +2256,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
2276 .get_msglevel = tun_get_msglevel, 2256 .get_msglevel = tun_get_msglevel,
2277 .set_msglevel = tun_set_msglevel, 2257 .set_msglevel = tun_set_msglevel,
2278 .get_link = ethtool_op_get_link, 2258 .get_link = ethtool_op_get_link,
2259 .get_ts_info = ethtool_op_get_ts_info,
2279}; 2260};
2280 2261
2281 2262
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d84bfd4109a4..40db31233313 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -268,6 +268,14 @@ config USB_NET_DM9601
268 This option adds support for Davicom DM9601 based USB 1.1 268 This option adds support for Davicom DM9601 based USB 1.1
269 10/100 Ethernet adapters. 269 10/100 Ethernet adapters.
270 270
271config USB_NET_SR9700
272 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
273 depends on USB_USBNET
274 select CRC32
275 help
276 This option adds support for CoreChip-sz SR9700 based USB 1.1
277 10/100 Ethernet adapters.
278
271config USB_NET_SMSC75XX 279config USB_NET_SMSC75XX
272 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" 280 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
273 depends on USB_USBNET 281 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e8171784529d..8b342cf992fd 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
14obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o 14obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
17obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o 18obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
18obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 19obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
19obj-$(CONFIG_USB_NET_GL620A) += gl620a.o 20obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 346c032aa795..bdaa12d07a12 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -178,6 +178,8 @@ struct asix_common_private {
178 struct asix_rx_fixup_info rx_fixup_info; 178 struct asix_rx_fixup_info rx_fixup_info;
179}; 179};
180 180
181extern const struct driver_info ax88172a_info;
182
181/* ASIX specific flags */ 183/* ASIX specific flags */
182#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */ 184#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
183 185
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index ad5d1e4384db..386a3df53678 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -778,6 +778,9 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
778 dev->hard_mtu = net->mtu + net->hard_header_len; 778 dev->hard_mtu = net->mtu + net->hard_header_len;
779 ax88178_set_mfb(dev); 779 ax88178_set_mfb(dev);
780 780
781 /* max qlen depend on hard_mtu and rx_urb_size */
782 usbnet_update_max_qlen(dev);
783
781 return 0; 784 return 0;
782} 785}
783 786
@@ -943,8 +946,6 @@ static const struct driver_info hg20f9_info = {
943 .data = FLAG_EEPROM_MAC, 946 .data = FLAG_EEPROM_MAC,
944}; 947};
945 948
946extern const struct driver_info ax88172a_info;
947
948static const struct usb_device_id products [] = { 949static const struct usb_device_id products [] = {
949{ 950{
950 // Linksys USB200M 951 // Linksys USB200M
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index d012203b0f29..723b3879ecc2 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -161,7 +161,8 @@ static const struct net_device_ops ax88172a_netdev_ops = {
161 .ndo_set_rx_mode = asix_set_multicast, 161 .ndo_set_rx_mode = asix_set_multicast,
162}; 162};
163 163
164int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd) 164static int ax88172a_get_settings(struct net_device *net,
165 struct ethtool_cmd *cmd)
165{ 166{
166 if (!net->phydev) 167 if (!net->phydev)
167 return -ENODEV; 168 return -ENODEV;
@@ -169,7 +170,8 @@ int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
169 return phy_ethtool_gset(net->phydev, cmd); 170 return phy_ethtool_gset(net->phydev, cmd);
170} 171}
171 172
172int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd) 173static int ax88172a_set_settings(struct net_device *net,
174 struct ethtool_cmd *cmd)
173{ 175{
174 if (!net->phydev) 176 if (!net->phydev)
175 return -ENODEV; 177 return -ENODEV;
@@ -177,7 +179,7 @@ int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
177 return phy_ethtool_sset(net->phydev, cmd); 179 return phy_ethtool_sset(net->phydev, cmd);
178} 180}
179 181
180int ax88172a_nway_reset(struct net_device *net) 182static int ax88172a_nway_reset(struct net_device *net)
181{ 183{
182 if (!net->phydev) 184 if (!net->phydev)
183 return -ENODEV; 185 return -ENODEV;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 4233c05a3e32..3569293df872 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -688,6 +688,9 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
688 2, 2, &tmp16); 688 2, 2, &tmp16);
689 } 689 }
690 690
691 /* max qlen depend on hard_mtu and rx_urb_size */
692 usbnet_update_max_qlen(dev);
693
691 return 0; 694 return 0;
692} 695}
693 696
@@ -1174,31 +1177,18 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1174 int frame_size = dev->maxpacket; 1177 int frame_size = dev->maxpacket;
1175 int mss = skb_shinfo(skb)->gso_size; 1178 int mss = skb_shinfo(skb)->gso_size;
1176 int headroom; 1179 int headroom;
1177 int tailroom;
1178 1180
1179 tx_hdr1 = skb->len; 1181 tx_hdr1 = skb->len;
1180 tx_hdr2 = mss; 1182 tx_hdr2 = mss;
1181 if (((skb->len + 8) % frame_size) == 0) 1183 if (((skb->len + 8) % frame_size) == 0)
1182 tx_hdr2 |= 0x80008000; /* Enable padding */ 1184 tx_hdr2 |= 0x80008000; /* Enable padding */
1183 1185
1184 headroom = skb_headroom(skb); 1186 headroom = skb_headroom(skb) - 8;
1185 tailroom = skb_tailroom(skb);
1186
1187 if (!skb_header_cloned(skb) &&
1188 !skb_cloned(skb) &&
1189 (headroom + tailroom) >= 8) {
1190 if (headroom < 8) {
1191 skb->data = memmove(skb->head + 8, skb->data, skb->len);
1192 skb_set_tail_pointer(skb, skb->len);
1193 }
1194 } else {
1195 struct sk_buff *skb2;
1196 1187
1197 skb2 = skb_copy_expand(skb, 8, 0, flags); 1188 if ((skb_header_cloned(skb) || headroom < 0) &&
1189 pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
1198 dev_kfree_skb_any(skb); 1190 dev_kfree_skb_any(skb);
1199 skb = skb2; 1191 return NULL;
1200 if (!skb)
1201 return NULL;
1202 } 1192 }
1203 1193
1204 skb_push(skb, 4); 1194 skb_push(skb, 4);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 606eba2872bd..3a8131582e75 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -323,7 +323,7 @@ next_desc:
323 /* Never use the same address on both ends of the link, even 323 /* Never use the same address on both ends of the link, even
324 * if the buggy firmware told us to. 324 * if the buggy firmware told us to.
325 */ 325 */
326 if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) 326 if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
327 eth_hw_addr_random(dev->net); 327 eth_hw_addr_random(dev->net);
328 328
329 /* make MAC addr easily distinguishable from an IP header */ 329 /* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 11c51f275366..f3fce412c0c1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -19,9 +19,12 @@
19#include <linux/crc32.h> 19#include <linux/crc32.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/list.h>
23#include <linux/ip.h>
24#include <linux/ipv6.h>
22 25
23/* Version Information */ 26/* Version Information */
24#define DRIVER_VERSION "v1.0.0 (2013/05/03)" 27#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
25#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 28#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
26#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" 29#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
27#define MODULENAME "r8152" 30#define MODULENAME "r8152"
@@ -267,6 +270,12 @@ enum rtl_register_content {
267 FULL_DUP = 0x01, 270 FULL_DUP = 0x01,
268}; 271};
269 272
273#define RTL8152_MAX_TX 10
274#define RTL8152_MAX_RX 10
275#define INTBUFSIZE 2
276
277#define INTR_LINK 0x0004
278
270#define RTL8152_REQT_READ 0xc0 279#define RTL8152_REQT_READ 0xc0
271#define RTL8152_REQT_WRITE 0x40 280#define RTL8152_REQT_WRITE 0x40
272#define RTL8152_REQ_GET_REGS 0x05 281#define RTL8152_REQ_GET_REGS 0x05
@@ -285,9 +294,9 @@ enum rtl_register_content {
285/* rtl8152 flags */ 294/* rtl8152 flags */
286enum rtl8152_flags { 295enum rtl8152_flags {
287 RTL8152_UNPLUG = 0, 296 RTL8152_UNPLUG = 0,
288 RX_URB_FAIL,
289 RTL8152_SET_RX_MODE, 297 RTL8152_SET_RX_MODE,
290 WORK_ENABLE 298 WORK_ENABLE,
299 RTL8152_LINK_CHG,
291}; 300};
292 301
293/* Define these values to match your device */ 302/* Define these values to match your device */
@@ -311,21 +320,53 @@ struct tx_desc {
311 u32 opts1; 320 u32 opts1;
312#define TX_FS (1 << 31) /* First segment of a packet */ 321#define TX_FS (1 << 31) /* First segment of a packet */
313#define TX_LS (1 << 30) /* Final segment of a packet */ 322#define TX_LS (1 << 30) /* Final segment of a packet */
314#define TX_LEN_MASK 0xffff 323#define TX_LEN_MASK 0x3ffff
324
315 u32 opts2; 325 u32 opts2;
326#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
327#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
328#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
329#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
330};
331
332struct r8152;
333
334struct rx_agg {
335 struct list_head list;
336 struct urb *urb;
337 struct r8152 *context;
338 void *buffer;
339 void *head;
340};
341
342struct tx_agg {
343 struct list_head list;
344 struct urb *urb;
345 struct r8152 *context;
346 void *buffer;
347 void *head;
348 u32 skb_num;
349 u32 skb_len;
316}; 350};
317 351
318struct r8152 { 352struct r8152 {
319 unsigned long flags; 353 unsigned long flags;
320 struct usb_device *udev; 354 struct usb_device *udev;
321 struct tasklet_struct tl; 355 struct tasklet_struct tl;
356 struct usb_interface *intf;
322 struct net_device *netdev; 357 struct net_device *netdev;
323 struct urb *rx_urb, *tx_urb; 358 struct urb *intr_urb;
324 struct sk_buff *tx_skb, *rx_skb; 359 struct tx_agg tx_info[RTL8152_MAX_TX];
360 struct rx_agg rx_info[RTL8152_MAX_RX];
361 struct list_head rx_done, tx_free;
362 struct sk_buff_head tx_queue;
363 spinlock_t rx_lock, tx_lock;
325 struct delayed_work schedule; 364 struct delayed_work schedule;
326 struct mii_if_info mii; 365 struct mii_if_info mii;
366 int intr_interval;
327 u32 msg_enable; 367 u32 msg_enable;
328 u16 ocp_base; 368 u16 ocp_base;
369 u8 *intr_buff;
329 u8 version; 370 u8 version;
330 u8 speed; 371 u8 speed;
331}; 372};
@@ -340,6 +381,7 @@ enum rtl_version {
340 * The RTL chips use a 64 element hash table based on the Ethernet CRC. 381 * The RTL chips use a 64 element hash table based on the Ethernet CRC.
341 */ 382 */
342static const int multicast_filter_limit = 32; 383static const int multicast_filter_limit = 32;
384static unsigned int rx_buf_sz = 16384;
343 385
344static 386static
345int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) 387int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -686,6 +728,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
686 ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data); 728 ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
687} 729}
688 730
731static
732int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
733
689static inline void set_ethernet_addr(struct r8152 *tp) 734static inline void set_ethernet_addr(struct r8152 *tp)
690{ 735{
691 struct net_device *dev = tp->netdev; 736 struct net_device *dev = tp->netdev;
@@ -716,26 +761,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
716 return 0; 761 return 0;
717} 762}
718 763
719static int alloc_all_urbs(struct r8152 *tp)
720{
721 tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
722 if (!tp->rx_urb)
723 return 0;
724 tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
725 if (!tp->tx_urb) {
726 usb_free_urb(tp->rx_urb);
727 return 0;
728 }
729
730 return 1;
731}
732
733static void free_all_urbs(struct r8152 *tp)
734{
735 usb_free_urb(tp->rx_urb);
736 usb_free_urb(tp->tx_urb);
737}
738
739static struct net_device_stats *rtl8152_get_stats(struct net_device *dev) 764static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
740{ 765{
741 return &dev->stats; 766 return &dev->stats;
@@ -743,137 +768,574 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
743 768
744static void read_bulk_callback(struct urb *urb) 769static void read_bulk_callback(struct urb *urb)
745{ 770{
746 struct r8152 *tp;
747 unsigned pkt_len;
748 struct sk_buff *skb;
749 struct net_device *netdev; 771 struct net_device *netdev;
750 struct net_device_stats *stats; 772 unsigned long flags;
751 int status = urb->status; 773 int status = urb->status;
774 struct rx_agg *agg;
775 struct r8152 *tp;
752 int result; 776 int result;
753 struct rx_desc *rx_desc;
754 777
755 tp = urb->context; 778 agg = urb->context;
779 if (!agg)
780 return;
781
782 tp = agg->context;
756 if (!tp) 783 if (!tp)
757 return; 784 return;
785
758 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 786 if (test_bit(RTL8152_UNPLUG, &tp->flags))
759 return; 787 return;
788
789 if (!test_bit(WORK_ENABLE, &tp->flags))
790 return;
791
760 netdev = tp->netdev; 792 netdev = tp->netdev;
761 if (!netif_device_present(netdev)) 793
794 /* When link down, the driver would cancel all bulks. */
795 /* This avoid the re-submitting bulk */
796 if (!netif_carrier_ok(netdev))
762 return; 797 return;
763 798
764 stats = rtl8152_get_stats(netdev);
765 switch (status) { 799 switch (status) {
766 case 0: 800 case 0:
767 break; 801 if (urb->actual_length < ETH_ZLEN)
802 break;
803
804 spin_lock_irqsave(&tp->rx_lock, flags);
805 list_add_tail(&agg->list, &tp->rx_done);
806 spin_unlock_irqrestore(&tp->rx_lock, flags);
807 tasklet_schedule(&tp->tl);
808 return;
768 case -ESHUTDOWN: 809 case -ESHUTDOWN:
769 set_bit(RTL8152_UNPLUG, &tp->flags); 810 set_bit(RTL8152_UNPLUG, &tp->flags);
770 netif_device_detach(tp->netdev); 811 netif_device_detach(tp->netdev);
812 return;
771 case -ENOENT: 813 case -ENOENT:
772 return; /* the urb is in unlink state */ 814 return; /* the urb is in unlink state */
773 case -ETIME: 815 case -ETIME:
774 pr_warn_ratelimited("may be reset is needed?..\n"); 816 pr_warn_ratelimited("may be reset is needed?..\n");
775 goto goon; 817 break;
776 default: 818 default:
777 pr_warn_ratelimited("Rx status %d\n", status); 819 pr_warn_ratelimited("Rx status %d\n", status);
778 goto goon; 820 break;
779 } 821 }
780 822
781 /* protect against short packets (tell me why we got some?!?) */ 823 result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
782 if (urb->actual_length < sizeof(*rx_desc))
783 goto goon;
784
785
786 rx_desc = (struct rx_desc *)urb->transfer_buffer;
787 pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
788 if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
789 goto goon;
790
791 skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
792 if (!skb)
793 goto goon;
794
795 memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
796 skb_put(skb, pkt_len);
797 skb->protocol = eth_type_trans(skb, netdev);
798 netif_rx(skb);
799 stats->rx_packets++;
800 stats->rx_bytes += pkt_len;
801goon:
802 usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
803 tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
804 (usb_complete_t)read_bulk_callback, tp);
805 result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
806 if (result == -ENODEV) { 824 if (result == -ENODEV) {
807 netif_device_detach(tp->netdev); 825 netif_device_detach(tp->netdev);
808 } else if (result) { 826 } else if (result) {
809 set_bit(RX_URB_FAIL, &tp->flags); 827 spin_lock_irqsave(&tp->rx_lock, flags);
810 goto resched; 828 list_add_tail(&agg->list, &tp->rx_done);
829 spin_unlock_irqrestore(&tp->rx_lock, flags);
830 tasklet_schedule(&tp->tl);
831 }
832}
833
834static void write_bulk_callback(struct urb *urb)
835{
836 struct net_device_stats *stats;
837 unsigned long flags;
838 struct tx_agg *agg;
839 struct r8152 *tp;
840 int status = urb->status;
841
842 agg = urb->context;
843 if (!agg)
844 return;
845
846 tp = agg->context;
847 if (!tp)
848 return;
849
850 stats = rtl8152_get_stats(tp->netdev);
851 if (status) {
852 pr_warn_ratelimited("Tx status %d\n", status);
853 stats->tx_errors += agg->skb_num;
811 } else { 854 } else {
812 clear_bit(RX_URB_FAIL, &tp->flags); 855 stats->tx_packets += agg->skb_num;
856 stats->tx_bytes += agg->skb_len;
813 } 857 }
814 858
815 return; 859 spin_lock_irqsave(&tp->tx_lock, flags);
816resched: 860 list_add_tail(&agg->list, &tp->tx_free);
817 tasklet_schedule(&tp->tl); 861 spin_unlock_irqrestore(&tp->tx_lock, flags);
862
863 if (!netif_carrier_ok(tp->netdev))
864 return;
865
866 if (!test_bit(WORK_ENABLE, &tp->flags))
867 return;
868
869 if (test_bit(RTL8152_UNPLUG, &tp->flags))
870 return;
871
872 if (!skb_queue_empty(&tp->tx_queue))
873 tasklet_schedule(&tp->tl);
818} 874}
819 875
820static void rx_fixup(unsigned long data) 876static void intr_callback(struct urb *urb)
821{ 877{
822 struct r8152 *tp; 878 struct r8152 *tp;
823 int status; 879 __u16 *d;
880 int status = urb->status;
881 int res;
882
883 tp = urb->context;
884 if (!tp)
885 return;
824 886
825 tp = (struct r8152 *)data;
826 if (!test_bit(WORK_ENABLE, &tp->flags)) 887 if (!test_bit(WORK_ENABLE, &tp->flags))
827 return; 888 return;
828 889
829 status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC); 890 if (test_bit(RTL8152_UNPLUG, &tp->flags))
830 if (status == -ENODEV) { 891 return;
892
893 switch (status) {
894 case 0: /* success */
895 break;
896 case -ECONNRESET: /* unlink */
897 case -ESHUTDOWN:
831 netif_device_detach(tp->netdev); 898 netif_device_detach(tp->netdev);
832 } else if (status) { 899 case -ENOENT:
833 set_bit(RX_URB_FAIL, &tp->flags); 900 return;
834 goto tlsched; 901 case -EOVERFLOW:
902 netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
903 goto resubmit;
904 /* -EPIPE: should clear the halt */
905 default:
906 netif_info(tp, intr, tp->netdev, "intr status %d\n", status);
907 goto resubmit;
908 }
909
910 d = urb->transfer_buffer;
911 if (INTR_LINK & __le16_to_cpu(d[0])) {
912 if (!(tp->speed & LINK_STATUS)) {
913 set_bit(RTL8152_LINK_CHG, &tp->flags);
914 schedule_delayed_work(&tp->schedule, 0);
915 }
835 } else { 916 } else {
836 clear_bit(RX_URB_FAIL, &tp->flags); 917 if (tp->speed & LINK_STATUS) {
918 set_bit(RTL8152_LINK_CHG, &tp->flags);
919 schedule_delayed_work(&tp->schedule, 0);
920 }
837 } 921 }
838 922
839 return; 923resubmit:
840tlsched: 924 res = usb_submit_urb(urb, GFP_ATOMIC);
841 tasklet_schedule(&tp->tl); 925 if (res == -ENODEV)
926 netif_device_detach(tp->netdev);
927 else if (res)
928 netif_err(tp, intr, tp->netdev,
929 "can't resubmit intr, status %d\n", res);
842} 930}
843 931
844static void write_bulk_callback(struct urb *urb) 932static inline void *rx_agg_align(void *data)
933{
934 return (void *)ALIGN((uintptr_t)data, 8);
935}
936
937static inline void *tx_agg_align(void *data)
938{
939 return (void *)ALIGN((uintptr_t)data, 4);
940}
941
942static void free_all_mem(struct r8152 *tp)
943{
944 int i;
945
946 for (i = 0; i < RTL8152_MAX_RX; i++) {
947 if (tp->rx_info[i].urb) {
948 usb_free_urb(tp->rx_info[i].urb);
949 tp->rx_info[i].urb = NULL;
950 }
951
952 if (tp->rx_info[i].buffer) {
953 kfree(tp->rx_info[i].buffer);
954 tp->rx_info[i].buffer = NULL;
955 tp->rx_info[i].head = NULL;
956 }
957 }
958
959 for (i = 0; i < RTL8152_MAX_TX; i++) {
960 if (tp->tx_info[i].urb) {
961 usb_free_urb(tp->tx_info[i].urb);
962 tp->tx_info[i].urb = NULL;
963 }
964
965 if (tp->tx_info[i].buffer) {
966 kfree(tp->tx_info[i].buffer);
967 tp->tx_info[i].buffer = NULL;
968 tp->tx_info[i].head = NULL;
969 }
970 }
971
972 if (tp->intr_urb) {
973 usb_free_urb(tp->intr_urb);
974 tp->intr_urb = NULL;
975 }
976
977 if (tp->intr_buff) {
978 kfree(tp->intr_buff);
979 tp->intr_buff = NULL;
980 }
981}
982
983static int alloc_all_mem(struct r8152 *tp)
984{
985 struct net_device *netdev = tp->netdev;
986 struct usb_interface *intf = tp->intf;
987 struct usb_host_interface *alt = intf->cur_altsetting;
988 struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
989 struct urb *urb;
990 int node, i;
991 u8 *buf;
992
993 node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
994
995 spin_lock_init(&tp->rx_lock);
996 spin_lock_init(&tp->tx_lock);
997 INIT_LIST_HEAD(&tp->rx_done);
998 INIT_LIST_HEAD(&tp->tx_free);
999 skb_queue_head_init(&tp->tx_queue);
1000
1001 for (i = 0; i < RTL8152_MAX_RX; i++) {
1002 buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
1003 if (!buf)
1004 goto err1;
1005
1006 if (buf != rx_agg_align(buf)) {
1007 kfree(buf);
1008 buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
1009 if (!buf)
1010 goto err1;
1011 }
1012
1013 urb = usb_alloc_urb(0, GFP_KERNEL);
1014 if (!urb) {
1015 kfree(buf);
1016 goto err1;
1017 }
1018
1019 INIT_LIST_HEAD(&tp->rx_info[i].list);
1020 tp->rx_info[i].context = tp;
1021 tp->rx_info[i].urb = urb;
1022 tp->rx_info[i].buffer = buf;
1023 tp->rx_info[i].head = rx_agg_align(buf);
1024 }
1025
1026 for (i = 0; i < RTL8152_MAX_TX; i++) {
1027 buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
1028 if (!buf)
1029 goto err1;
1030
1031 if (buf != tx_agg_align(buf)) {
1032 kfree(buf);
1033 buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
1034 if (!buf)
1035 goto err1;
1036 }
1037
1038 urb = usb_alloc_urb(0, GFP_KERNEL);
1039 if (!urb) {
1040 kfree(buf);
1041 goto err1;
1042 }
1043
1044 INIT_LIST_HEAD(&tp->tx_info[i].list);
1045 tp->tx_info[i].context = tp;
1046 tp->tx_info[i].urb = urb;
1047 tp->tx_info[i].buffer = buf;
1048 tp->tx_info[i].head = tx_agg_align(buf);
1049
1050 list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
1051 }
1052
1053 tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
1054 if (!tp->intr_urb)
1055 goto err1;
1056
1057 tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
1058 if (!tp->intr_buff)
1059 goto err1;
1060
1061 tp->intr_interval = (int)ep_intr->desc.bInterval;
1062 usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
1063 tp->intr_buff, INTBUFSIZE, intr_callback,
1064 tp, tp->intr_interval);
1065
1066 return 0;
1067
1068err1:
1069 free_all_mem(tp);
1070 return -ENOMEM;
1071}
1072
1073static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
1074{
1075 struct tx_agg *agg = NULL;
1076 unsigned long flags;
1077
1078 spin_lock_irqsave(&tp->tx_lock, flags);
1079 if (!list_empty(&tp->tx_free)) {
1080 struct list_head *cursor;
1081
1082 cursor = tp->tx_free.next;
1083 list_del_init(cursor);
1084 agg = list_entry(cursor, struct tx_agg, list);
1085 }
1086 spin_unlock_irqrestore(&tp->tx_lock, flags);
1087
1088 return agg;
1089}
1090
1091static void
1092r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
1093{
1094 memset(desc, 0, sizeof(*desc));
1095
1096 desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
1097
1098 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1099 __be16 protocol;
1100 u8 ip_protocol;
1101 u32 opts2 = 0;
1102
1103 if (skb->protocol == htons(ETH_P_8021Q))
1104 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1105 else
1106 protocol = skb->protocol;
1107
1108 switch (protocol) {
1109 case htons(ETH_P_IP):
1110 opts2 |= IPV4_CS;
1111 ip_protocol = ip_hdr(skb)->protocol;
1112 break;
1113
1114 case htons(ETH_P_IPV6):
1115 opts2 |= IPV6_CS;
1116 ip_protocol = ipv6_hdr(skb)->nexthdr;
1117 break;
1118
1119 default:
1120 ip_protocol = IPPROTO_RAW;
1121 break;
1122 }
1123
1124 if (ip_protocol == IPPROTO_TCP) {
1125 opts2 |= TCP_CS;
1126 opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
1127 } else if (ip_protocol == IPPROTO_UDP) {
1128 opts2 |= UDP_CS;
1129 } else {
1130 WARN_ON_ONCE(1);
1131 }
1132
1133 desc->opts2 = cpu_to_le32(opts2);
1134 }
1135}
1136
1137static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1138{
1139 u32 remain;
1140 u8 *tx_data;
1141
1142 tx_data = agg->head;
1143 agg->skb_num = agg->skb_len = 0;
1144 remain = rx_buf_sz - sizeof(struct tx_desc);
1145
1146 while (remain >= ETH_ZLEN) {
1147 struct tx_desc *tx_desc;
1148 struct sk_buff *skb;
1149 unsigned int len;
1150
1151 skb = skb_dequeue(&tp->tx_queue);
1152 if (!skb)
1153 break;
1154
1155 len = skb->len;
1156 if (remain < len) {
1157 skb_queue_head(&tp->tx_queue, skb);
1158 break;
1159 }
1160
1161 tx_desc = (struct tx_desc *)tx_data;
1162 tx_data += sizeof(*tx_desc);
1163
1164 r8152_tx_csum(tp, tx_desc, skb);
1165 memcpy(tx_data, skb->data, len);
1166 agg->skb_num++;
1167 agg->skb_len += len;
1168 dev_kfree_skb_any(skb);
1169
1170 tx_data = tx_agg_align(tx_data + len);
1171 remain = rx_buf_sz - sizeof(*tx_desc) -
1172 (u32)((void *)tx_data - agg->head);
1173 }
1174
1175 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
1176 agg->head, (int)(tx_data - (u8 *)agg->head),
1177 (usb_complete_t)write_bulk_callback, agg);
1178
1179 return usb_submit_urb(agg->urb, GFP_ATOMIC);
1180}
1181
1182static void rx_bottom(struct r8152 *tp)
1183{
1184 unsigned long flags;
1185 struct list_head *cursor, *next;
1186
1187 spin_lock_irqsave(&tp->rx_lock, flags);
1188 list_for_each_safe(cursor, next, &tp->rx_done) {
1189 struct rx_desc *rx_desc;
1190 struct rx_agg *agg;
1191 unsigned pkt_len;
1192 int len_used = 0;
1193 struct urb *urb;
1194 u8 *rx_data;
1195 int ret;
1196
1197 list_del_init(cursor);
1198 spin_unlock_irqrestore(&tp->rx_lock, flags);
1199
1200 agg = list_entry(cursor, struct rx_agg, list);
1201 urb = agg->urb;
1202 if (urb->actual_length < ETH_ZLEN)
1203 goto submit;
1204
1205 rx_desc = agg->head;
1206 rx_data = agg->head;
1207 pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
1208 len_used += sizeof(struct rx_desc) + pkt_len;
1209
1210 while (urb->actual_length >= len_used) {
1211 struct net_device *netdev = tp->netdev;
1212 struct net_device_stats *stats;
1213 struct sk_buff *skb;
1214
1215 if (pkt_len < ETH_ZLEN)
1216 break;
1217
1218 stats = rtl8152_get_stats(netdev);
1219
1220 pkt_len -= 4; /* CRC */
1221 rx_data += sizeof(struct rx_desc);
1222
1223 skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
1224 if (!skb) {
1225 stats->rx_dropped++;
1226 break;
1227 }
1228 memcpy(skb->data, rx_data, pkt_len);
1229 skb_put(skb, pkt_len);
1230 skb->protocol = eth_type_trans(skb, netdev);
1231 netif_rx(skb);
1232 stats->rx_packets++;
1233 stats->rx_bytes += pkt_len;
1234
1235 rx_data = rx_agg_align(rx_data + pkt_len + 4);
1236 rx_desc = (struct rx_desc *)rx_data;
1237 pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
1238 len_used = (int)(rx_data - (u8 *)agg->head);
1239 len_used += sizeof(struct rx_desc) + pkt_len;
1240 }
1241
1242submit:
1243 ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
1244 spin_lock_irqsave(&tp->rx_lock, flags);
1245 if (ret && ret != -ENODEV) {
1246 list_add_tail(&agg->list, next);
1247 tasklet_schedule(&tp->tl);
1248 }
1249 }
1250 spin_unlock_irqrestore(&tp->rx_lock, flags);
1251}
1252
1253static void tx_bottom(struct r8152 *tp)
1254{
1255 int res;
1256
1257 do {
1258 struct tx_agg *agg;
1259
1260 if (skb_queue_empty(&tp->tx_queue))
1261 break;
1262
1263 agg = r8152_get_tx_agg(tp);
1264 if (!agg)
1265 break;
1266
1267 res = r8152_tx_agg_fill(tp, agg);
1268 if (res) {
1269 struct net_device_stats *stats;
1270 struct net_device *netdev;
1271 unsigned long flags;
1272
1273 netdev = tp->netdev;
1274 stats = rtl8152_get_stats(netdev);
1275
1276 if (res == -ENODEV) {
1277 netif_device_detach(netdev);
1278 } else {
1279 netif_warn(tp, tx_err, netdev,
1280 "failed tx_urb %d\n", res);
1281 stats->tx_dropped += agg->skb_num;
1282 spin_lock_irqsave(&tp->tx_lock, flags);
1283 list_add_tail(&agg->list, &tp->tx_free);
1284 spin_unlock_irqrestore(&tp->tx_lock, flags);
1285 }
1286 }
1287 } while (res == 0);
1288}
1289
1290static void bottom_half(unsigned long data)
845{ 1291{
846 struct r8152 *tp; 1292 struct r8152 *tp;
847 int status = urb->status;
848 1293
849 tp = urb->context; 1294 tp = (struct r8152 *)data;
850 if (!tp) 1295
1296 if (test_bit(RTL8152_UNPLUG, &tp->flags))
851 return; 1297 return;
852 dev_kfree_skb_irq(tp->tx_skb); 1298
853 if (!netif_device_present(tp->netdev)) 1299 if (!test_bit(WORK_ENABLE, &tp->flags))
854 return; 1300 return;
855 if (status) 1301
856 dev_info(&urb->dev->dev, "%s: Tx status %d\n", 1302 /* When link down, the driver would cancel all bulks. */
857 tp->netdev->name, status); 1303 /* This avoid the re-submitting bulk */
858 tp->netdev->trans_start = jiffies; 1304 if (!netif_carrier_ok(tp->netdev))
859 netif_wake_queue(tp->netdev); 1305 return;
1306
1307 rx_bottom(tp);
1308 tx_bottom(tp);
1309}
1310
1311static
1312int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
1313{
1314 usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
1315 agg->head, rx_buf_sz,
1316 (usb_complete_t)read_bulk_callback, agg);
1317
1318 return usb_submit_urb(agg->urb, mem_flags);
860} 1319}
861 1320
862static void rtl8152_tx_timeout(struct net_device *netdev) 1321static void rtl8152_tx_timeout(struct net_device *netdev)
863{ 1322{
864 struct r8152 *tp = netdev_priv(netdev); 1323 struct r8152 *tp = netdev_priv(netdev);
865 struct net_device_stats *stats = rtl8152_get_stats(netdev); 1324 int i;
1325
866 netif_warn(tp, tx_err, netdev, "Tx timeout.\n"); 1326 netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
867 usb_unlink_urb(tp->tx_urb); 1327 for (i = 0; i < RTL8152_MAX_TX; i++)
868 stats->tx_errors++; 1328 usb_unlink_urb(tp->tx_info[i].urb);
869} 1329}
870 1330
871static void rtl8152_set_rx_mode(struct net_device *netdev) 1331static void rtl8152_set_rx_mode(struct net_device *netdev)
872{ 1332{
873 struct r8152 *tp = netdev_priv(netdev); 1333 struct r8152 *tp = netdev_priv(netdev);
874 1334
875 if (tp->speed & LINK_STATUS) 1335 if (tp->speed & LINK_STATUS) {
876 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 1336 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
1337 schedule_delayed_work(&tp->schedule, 0);
1338 }
877} 1339}
878 1340
879static void _rtl8152_set_rx_mode(struct net_device *netdev) 1341static void _rtl8152_set_rx_mode(struct net_device *netdev)
@@ -923,33 +1385,39 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
923{ 1385{
924 struct r8152 *tp = netdev_priv(netdev); 1386 struct r8152 *tp = netdev_priv(netdev);
925 struct net_device_stats *stats = rtl8152_get_stats(netdev); 1387 struct net_device_stats *stats = rtl8152_get_stats(netdev);
1388 unsigned long flags;
1389 struct tx_agg *agg = NULL;
926 struct tx_desc *tx_desc; 1390 struct tx_desc *tx_desc;
927 unsigned int len; 1391 unsigned int len;
1392 u8 *tx_data;
928 int res; 1393 int res;
929 1394
930 netif_stop_queue(netdev); 1395 skb_tx_timestamp(skb);
931 len = skb->len;
932 if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
933 struct sk_buff *tx_skb;
934 1396
935 tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC); 1397 /* If tx_queue is not empty, it means at least one previous packt */
936 dev_kfree_skb_any(skb); 1398 /* is waiting for sending. Don't send current one before it. */
937 if (!tx_skb) { 1399 if (skb_queue_empty(&tp->tx_queue))
938 stats->tx_dropped++; 1400 agg = r8152_get_tx_agg(tp);
939 netif_wake_queue(netdev); 1401
940 return NETDEV_TX_OK; 1402 if (!agg) {
941 } 1403 skb_queue_tail(&tp->tx_queue, skb);
942 skb = tx_skb; 1404 return NETDEV_TX_OK;
943 } 1405 }
944 tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc)); 1406
945 memset(tx_desc, 0, sizeof(*tx_desc)); 1407 tx_desc = (struct tx_desc *)agg->head;
946 tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS); 1408 tx_data = agg->head + sizeof(*tx_desc);
947 tp->tx_skb = skb; 1409 agg->skb_num = agg->skb_len = 0;
948 skb_tx_timestamp(skb); 1410
949 usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), 1411 len = skb->len;
950 skb->data, skb->len, 1412 r8152_tx_csum(tp, tx_desc, skb);
951 (usb_complete_t)write_bulk_callback, tp); 1413 memcpy(tx_data, skb->data, len);
952 res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC); 1414 dev_kfree_skb_any(skb);
1415 agg->skb_num++;
1416 agg->skb_len += len;
1417 usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
1418 agg->head, len + sizeof(*tx_desc),
1419 (usb_complete_t)write_bulk_callback, agg);
1420 res = usb_submit_urb(agg->urb, GFP_ATOMIC);
953 if (res) { 1421 if (res) {
954 /* Can we get/handle EPIPE here? */ 1422 /* Can we get/handle EPIPE here? */
955 if (res == -ENODEV) { 1423 if (res == -ENODEV) {
@@ -957,12 +1425,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
957 } else { 1425 } else {
958 netif_warn(tp, tx_err, netdev, 1426 netif_warn(tp, tx_err, netdev,
959 "failed tx_urb %d\n", res); 1427 "failed tx_urb %d\n", res);
960 stats->tx_errors++; 1428 stats->tx_dropped++;
961 netif_start_queue(netdev); 1429 spin_lock_irqsave(&tp->tx_lock, flags);
1430 list_add_tail(&agg->list, &tp->tx_free);
1431 spin_unlock_irqrestore(&tp->tx_lock, flags);
962 } 1432 }
963 } else {
964 stats->tx_packets++;
965 stats->tx_bytes += skb->len;
966 } 1433 }
967 1434
968 return NETDEV_TX_OK; 1435 return NETDEV_TX_OK;
@@ -999,17 +1466,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
999 1466
1000static int rtl8152_enable(struct r8152 *tp) 1467static int rtl8152_enable(struct r8152 *tp)
1001{ 1468{
1002 u32 ocp_data; 1469 u32 ocp_data;
1470 int i, ret;
1003 u8 speed; 1471 u8 speed;
1004 1472
1005 speed = rtl8152_get_speed(tp); 1473 speed = rtl8152_get_speed(tp);
1006 if (speed & _100bps) { 1474 if (speed & _10bps) {
1007 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); 1475 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
1008 ocp_data &= ~EEEP_CR_EEEP_TX; 1476 ocp_data |= EEEP_CR_EEEP_TX;
1009 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); 1477 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
1010 } else { 1478 } else {
1011 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); 1479 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
1012 ocp_data |= EEEP_CR_EEEP_TX; 1480 ocp_data &= ~EEEP_CR_EEEP_TX;
1013 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); 1481 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
1014 } 1482 }
1015 1483
@@ -1023,23 +1491,34 @@ static int rtl8152_enable(struct r8152 *tp)
1023 ocp_data &= ~RXDY_GATED_EN; 1491 ocp_data &= ~RXDY_GATED_EN;
1024 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); 1492 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1025 1493
1026 usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), 1494 INIT_LIST_HEAD(&tp->rx_done);
1027 tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc), 1495 ret = 0;
1028 (usb_complete_t)read_bulk_callback, tp); 1496 for (i = 0; i < RTL8152_MAX_RX; i++) {
1497 INIT_LIST_HEAD(&tp->rx_info[i].list);
1498 ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
1499 }
1029 1500
1030 return usb_submit_urb(tp->rx_urb, GFP_KERNEL); 1501 return ret;
1031} 1502}
1032 1503
1033static void rtl8152_disable(struct r8152 *tp) 1504static void rtl8152_disable(struct r8152 *tp)
1034{ 1505{
1035 u32 ocp_data; 1506 struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
1036 int i; 1507 struct sk_buff *skb;
1508 u32 ocp_data;
1509 int i;
1037 1510
1038 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 1511 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
1039 ocp_data &= ~RCR_ACPT_ALL; 1512 ocp_data &= ~RCR_ACPT_ALL;
1040 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 1513 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
1041 1514
1042 usb_kill_urb(tp->tx_urb); 1515 while ((skb = skb_dequeue(&tp->tx_queue))) {
1516 dev_kfree_skb(skb);
1517 stats->tx_dropped++;
1518 }
1519
1520 for (i = 0; i < RTL8152_MAX_TX; i++)
1521 usb_kill_urb(tp->tx_info[i].urb);
1043 1522
1044 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); 1523 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
1045 ocp_data |= RXDY_GATED_EN; 1524 ocp_data |= RXDY_GATED_EN;
@@ -1058,7 +1537,8 @@ static void rtl8152_disable(struct r8152 *tp)
1058 mdelay(1); 1537 mdelay(1);
1059 } 1538 }
1060 1539
1061 usb_kill_urb(tp->rx_urb); 1540 for (i = 0; i < RTL8152_MAX_RX; i++)
1541 usb_kill_urb(tp->rx_info[i].urb);
1062 1542
1063 rtl8152_nic_reset(tp); 1543 rtl8152_nic_reset(tp);
1064} 1544}
@@ -1269,7 +1749,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
1269 r8152_mdio_write(tp, MII_BMCR, bmcr); 1749 r8152_mdio_write(tp, MII_BMCR, bmcr);
1270 1750
1271out: 1751out:
1272 schedule_delayed_work(&tp->schedule, 5 * HZ);
1273 1752
1274 return ret; 1753 return ret;
1275} 1754}
@@ -1292,6 +1771,7 @@ static void set_carrier(struct r8152 *tp)
1292 struct net_device *netdev = tp->netdev; 1771 struct net_device *netdev = tp->netdev;
1293 u8 speed; 1772 u8 speed;
1294 1773
1774 clear_bit(RTL8152_LINK_CHG, &tp->flags);
1295 speed = rtl8152_get_speed(tp); 1775 speed = rtl8152_get_speed(tp);
1296 1776
1297 if (speed & LINK_STATUS) { 1777 if (speed & LINK_STATUS) {
@@ -1303,7 +1783,9 @@ static void set_carrier(struct r8152 *tp)
1303 } else { 1783 } else {
1304 if (tp->speed & LINK_STATUS) { 1784 if (tp->speed & LINK_STATUS) {
1305 netif_carrier_off(netdev); 1785 netif_carrier_off(netdev);
1786 tasklet_disable(&tp->tl);
1306 rtl8152_disable(tp); 1787 rtl8152_disable(tp);
1788 tasklet_enable(&tp->tl);
1307 } 1789 }
1308 } 1790 }
1309 tp->speed = speed; 1791 tp->speed = speed;
@@ -1319,13 +1801,12 @@ static void rtl_work_func_t(struct work_struct *work)
1319 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 1801 if (test_bit(RTL8152_UNPLUG, &tp->flags))
1320 goto out1; 1802 goto out1;
1321 1803
1322 set_carrier(tp); 1804 if (test_bit(RTL8152_LINK_CHG, &tp->flags))
1805 set_carrier(tp);
1323 1806
1324 if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) 1807 if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
1325 _rtl8152_set_rx_mode(tp->netdev); 1808 _rtl8152_set_rx_mode(tp->netdev);
1326 1809
1327 schedule_delayed_work(&tp->schedule, HZ);
1328
1329out1: 1810out1:
1330 return; 1811 return;
1331} 1812}
@@ -1335,28 +1816,20 @@ static int rtl8152_open(struct net_device *netdev)
1335 struct r8152 *tp = netdev_priv(netdev); 1816 struct r8152 *tp = netdev_priv(netdev);
1336 int res = 0; 1817 int res = 0;
1337 1818
1338 tp->speed = rtl8152_get_speed(tp); 1819 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
1339 if (tp->speed & LINK_STATUS) { 1820 if (res) {
1340 res = rtl8152_enable(tp); 1821 if (res == -ENODEV)
1341 if (res) { 1822 netif_device_detach(tp->netdev);
1342 if (res == -ENODEV) 1823 netif_warn(tp, ifup, netdev,
1343 netif_device_detach(tp->netdev); 1824 "intr_urb submit failed: %d\n", res);
1344 1825 return res;
1345 netif_err(tp, ifup, netdev,
1346 "rtl8152_open failed: %d\n", res);
1347 return res;
1348 }
1349
1350 netif_carrier_on(netdev);
1351 } else {
1352 netif_stop_queue(netdev);
1353 netif_carrier_off(netdev);
1354 } 1826 }
1355 1827
1356 rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL); 1828 rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
1829 tp->speed = 0;
1830 netif_carrier_off(netdev);
1357 netif_start_queue(netdev); 1831 netif_start_queue(netdev);
1358 set_bit(WORK_ENABLE, &tp->flags); 1832 set_bit(WORK_ENABLE, &tp->flags);
1359 schedule_delayed_work(&tp->schedule, 0);
1360 1833
1361 return res; 1834 return res;
1362} 1835}
@@ -1366,10 +1839,13 @@ static int rtl8152_close(struct net_device *netdev)
1366 struct r8152 *tp = netdev_priv(netdev); 1839 struct r8152 *tp = netdev_priv(netdev);
1367 int res = 0; 1840 int res = 0;
1368 1841
1842 usb_kill_urb(tp->intr_urb);
1369 clear_bit(WORK_ENABLE, &tp->flags); 1843 clear_bit(WORK_ENABLE, &tp->flags);
1370 cancel_delayed_work_sync(&tp->schedule); 1844 cancel_delayed_work_sync(&tp->schedule);
1371 netif_stop_queue(netdev); 1845 netif_stop_queue(netdev);
1846 tasklet_disable(&tp->tl);
1372 rtl8152_disable(tp); 1847 rtl8152_disable(tp);
1848 tasklet_enable(&tp->tl);
1373 1849
1374 return res; 1850 return res;
1375} 1851}
@@ -1429,8 +1905,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
1429 1905
1430static void r8152b_init(struct r8152 *tp) 1906static void r8152b_init(struct r8152 *tp)
1431{ 1907{
1432 u32 ocp_data; 1908 u32 ocp_data;
1433 int i; 1909 int i;
1434 1910
1435 rtl_clear_bp(tp); 1911 rtl_clear_bp(tp);
1436 1912
@@ -1475,9 +1951,9 @@ static void r8152b_init(struct r8152 *tp)
1475 break; 1951 break;
1476 } 1952 }
1477 1953
1478 /* disable rx aggregation */ 1954 /* enable rx aggregation */
1479 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); 1955 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
1480 ocp_data |= RX_AGG_DISABLE; 1956 ocp_data &= ~RX_AGG_DISABLE;
1481 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); 1957 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
1482} 1958}
1483 1959
@@ -1489,7 +1965,9 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
1489 1965
1490 if (netif_running(tp->netdev)) { 1966 if (netif_running(tp->netdev)) {
1491 clear_bit(WORK_ENABLE, &tp->flags); 1967 clear_bit(WORK_ENABLE, &tp->flags);
1968 usb_kill_urb(tp->intr_urb);
1492 cancel_delayed_work_sync(&tp->schedule); 1969 cancel_delayed_work_sync(&tp->schedule);
1970 tasklet_disable(&tp->tl);
1493 } 1971 }
1494 1972
1495 rtl8152_down(tp); 1973 rtl8152_down(tp);
@@ -1504,10 +1982,12 @@ static int rtl8152_resume(struct usb_interface *intf)
1504 r8152b_init(tp); 1982 r8152b_init(tp);
1505 netif_device_attach(tp->netdev); 1983 netif_device_attach(tp->netdev);
1506 if (netif_running(tp->netdev)) { 1984 if (netif_running(tp->netdev)) {
1507 rtl8152_enable(tp); 1985 rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
1986 tp->speed = 0;
1987 netif_carrier_off(tp->netdev);
1508 set_bit(WORK_ENABLE, &tp->flags); 1988 set_bit(WORK_ENABLE, &tp->flags);
1509 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 1989 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
1510 schedule_delayed_work(&tp->schedule, 0); 1990 tasklet_enable(&tp->tl);
1511 } 1991 }
1512 1992
1513 return 0; 1993 return 0;
@@ -1619,6 +2099,7 @@ static int rtl8152_probe(struct usb_interface *intf,
1619 struct usb_device *udev = interface_to_usbdev(intf); 2099 struct usb_device *udev = interface_to_usbdev(intf);
1620 struct r8152 *tp; 2100 struct r8152 *tp;
1621 struct net_device *netdev; 2101 struct net_device *netdev;
2102 int ret;
1622 2103
1623 if (udev->actconfig->desc.bConfigurationValue != 1) { 2104 if (udev->actconfig->desc.bConfigurationValue != 1) {
1624 usb_driver_set_configuration(udev, 1); 2105 usb_driver_set_configuration(udev, 1);
@@ -1631,19 +2112,22 @@ static int rtl8152_probe(struct usb_interface *intf,
1631 return -ENOMEM; 2112 return -ENOMEM;
1632 } 2113 }
1633 2114
2115 SET_NETDEV_DEV(netdev, &intf->dev);
1634 tp = netdev_priv(netdev); 2116 tp = netdev_priv(netdev);
1635 tp->msg_enable = 0x7FFF; 2117 tp->msg_enable = 0x7FFF;
1636 2118
1637 tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp); 2119 tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
1638 INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); 2120 INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
1639 2121
1640 tp->udev = udev; 2122 tp->udev = udev;
1641 tp->netdev = netdev; 2123 tp->netdev = netdev;
2124 tp->intf = intf;
1642 netdev->netdev_ops = &rtl8152_netdev_ops; 2125 netdev->netdev_ops = &rtl8152_netdev_ops;
1643 netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; 2126 netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
1644 netdev->features &= ~NETIF_F_IP_CSUM; 2127
2128 netdev->features |= NETIF_F_IP_CSUM;
2129 netdev->hw_features = NETIF_F_IP_CSUM;
1645 SET_ETHTOOL_OPS(netdev, &ops); 2130 SET_ETHTOOL_OPS(netdev, &ops);
1646 tp->speed = 0;
1647 2131
1648 tp->mii.dev = netdev; 2132 tp->mii.dev = netdev;
1649 tp->mii.mdio_read = read_mii_word; 2133 tp->mii.mdio_read = read_mii_word;
@@ -1657,37 +2141,27 @@ static int rtl8152_probe(struct usb_interface *intf,
1657 r8152b_init(tp); 2141 r8152b_init(tp);
1658 set_ethernet_addr(tp); 2142 set_ethernet_addr(tp);
1659 2143
1660 if (!alloc_all_urbs(tp)) { 2144 ret = alloc_all_mem(tp);
1661 netif_err(tp, probe, netdev, "out of memory"); 2145 if (ret)
1662 goto out; 2146 goto out;
1663 }
1664
1665 tp->rx_skb = netdev_alloc_skb(netdev,
1666 RTL8152_RMS + sizeof(struct rx_desc));
1667 if (!tp->rx_skb)
1668 goto out1;
1669 2147
1670 usb_set_intfdata(intf, tp); 2148 usb_set_intfdata(intf, tp);
1671 SET_NETDEV_DEV(netdev, &intf->dev);
1672
1673 2149
1674 if (register_netdev(netdev) != 0) { 2150 ret = register_netdev(netdev);
2151 if (ret != 0) {
1675 netif_err(tp, probe, netdev, "couldn't register the device"); 2152 netif_err(tp, probe, netdev, "couldn't register the device");
1676 goto out2; 2153 goto out1;
1677 } 2154 }
1678 2155
1679 netif_info(tp, probe, netdev, "%s", DRIVER_VERSION); 2156 netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
1680 2157
1681 return 0; 2158 return 0;
1682 2159
1683out2:
1684 usb_set_intfdata(intf, NULL);
1685 dev_kfree_skb(tp->rx_skb);
1686out1: 2160out1:
1687 free_all_urbs(tp); 2161 usb_set_intfdata(intf, NULL);
1688out: 2162out:
1689 free_netdev(netdev); 2163 free_netdev(netdev);
1690 return -EIO; 2164 return ret;
1691} 2165}
1692 2166
1693static void rtl8152_unload(struct r8152 *tp) 2167static void rtl8152_unload(struct r8152 *tp)
@@ -1715,9 +2189,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
1715 tasklet_kill(&tp->tl); 2189 tasklet_kill(&tp->tl);
1716 unregister_netdev(tp->netdev); 2190 unregister_netdev(tp->netdev);
1717 rtl8152_unload(tp); 2191 rtl8152_unload(tp);
1718 free_all_urbs(tp); 2192 free_all_mem(tp);
1719 if (tp->rx_skb)
1720 dev_kfree_skb(tp->rx_skb);
1721 free_netdev(tp->netdev); 2193 free_netdev(tp->netdev);
1722 } 2194 }
1723} 2195}
@@ -1732,11 +2204,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table);
1732 2204
1733static struct usb_driver rtl8152_driver = { 2205static struct usb_driver rtl8152_driver = {
1734 .name = MODULENAME, 2206 .name = MODULENAME,
2207 .id_table = rtl8152_table,
1735 .probe = rtl8152_probe, 2208 .probe = rtl8152_probe,
1736 .disconnect = rtl8152_disconnect, 2209 .disconnect = rtl8152_disconnect,
1737 .id_table = rtl8152_table,
1738 .suspend = rtl8152_suspend, 2210 .suspend = rtl8152_suspend,
1739 .resume = rtl8152_resume 2211 .resume = rtl8152_resume,
2212 .reset_resume = rtl8152_resume,
1740}; 2213};
1741 2214
1742module_usb_driver(rtl8152_driver); 2215module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
new file mode 100644
index 000000000000..7ec3e0ee0783
--- /dev/null
+++ b/drivers/net/usb/sr9700.c
@@ -0,0 +1,560 @@
1/*
2 * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices
3 *
4 * Author : Liu Junliang <liujunliang_ljl@163.com>
5 *
6 * Based on dm9601.c
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/stddef.h>
16#include <linux/init.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/mii.h>
21#include <linux/usb.h>
22#include <linux/crc32.h>
23#include <linux/usb/usbnet.h>
24
25#include "sr9700.h"
26
27static int sr_read(struct usbnet *dev, u8 reg, u16 length, void *data)
28{
29 int err;
30
31 err = usbnet_read_cmd(dev, SR_RD_REGS, SR_REQ_RD_REG, 0, reg, data,
32 length);
33 if ((err != length) && (err >= 0))
34 err = -EINVAL;
35 return err;
36}
37
38static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data)
39{
40 int err;
41
42 err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data,
43 length);
44 if ((err >= 0) && (err < length))
45 err = -EINVAL;
46 return err;
47}
48
49static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
50{
51 return sr_read(dev, reg, 1, value);
52}
53
54static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
55{
56 return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG,
57 value, reg, NULL, 0);
58}
59
60static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
61{
62 usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
63 0, reg, data, length);
64}
65
66static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
67{
68 usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
69 value, reg, NULL, 0);
70}
71
72static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
73{
74 int i;
75
76 for (i = 0; i < SR_SHARE_TIMEOUT; i++) {
77 u8 tmp = 0;
78 int ret;
79
80 udelay(1);
81 ret = sr_read_reg(dev, EPCR, &tmp);
82 if (ret < 0)
83 return ret;
84
85 /* ready */
86 if (!(tmp & EPCR_ERRE))
87 return 0;
88 }
89
90 netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
91
92 return -EIO;
93}
94
95static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
96 __le16 *value)
97{
98 int ret;
99
100 mutex_lock(&dev->phy_mutex);
101
102 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
103 sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
104
105 ret = wait_phy_eeprom_ready(dev, phy);
106 if (ret < 0)
107 goto out_unlock;
108
109 sr_write_reg(dev, EPCR, 0x0);
110 ret = sr_read(dev, EPDR, 2, value);
111
112 netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
113 phy, reg, *value, ret);
114
115out_unlock:
116 mutex_unlock(&dev->phy_mutex);
117 return ret;
118}
119
120static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
121 __le16 value)
122{
123 int ret;
124
125 mutex_lock(&dev->phy_mutex);
126
127 ret = sr_write(dev, EPDR, 2, &value);
128 if (ret < 0)
129 goto out_unlock;
130
131 sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
132 sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
133 (EPCR_WEP | EPCR_ERPRW));
134
135 ret = wait_phy_eeprom_ready(dev, phy);
136 if (ret < 0)
137 goto out_unlock;
138
139 sr_write_reg(dev, EPCR, 0x0);
140
141out_unlock:
142 mutex_unlock(&dev->phy_mutex);
143 return ret;
144}
145
146static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value)
147{
148 return sr_share_read_word(dev, 0, offset, value);
149}
150
151static int sr9700_get_eeprom_len(struct net_device *netdev)
152{
153 return SR_EEPROM_LEN;
154}
155
156static int sr9700_get_eeprom(struct net_device *netdev,
157 struct ethtool_eeprom *eeprom, u8 *data)
158{
159 struct usbnet *dev = netdev_priv(netdev);
160 __le16 *buf = (__le16 *)data;
161 int ret = 0;
162 int i;
163
164 /* access is 16bit */
165 if ((eeprom->offset & 0x01) || (eeprom->len & 0x01))
166 return -EINVAL;
167
168 for (i = 0; i < eeprom->len / 2; i++) {
169 ret = sr_read_eeprom_word(dev, eeprom->offset / 2 + i, buf + i);
170 if (ret < 0)
171 break;
172 }
173
174 return ret;
175}
176
177static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
178{
179 struct usbnet *dev = netdev_priv(netdev);
180 __le16 res;
181 int rc = 0;
182
183 if (phy_id) {
184 netdev_dbg(netdev, "Only internal phy supported\n");
185 return 0;
186 }
187
188 /* Access NSR_LINKST bit for link status instead of MII_BMSR */
189 if (loc == MII_BMSR) {
190 u8 value;
191
192 sr_read_reg(dev, NSR, &value);
193 if (value & NSR_LINKST)
194 rc = 1;
195 }
196 sr_share_read_word(dev, 1, loc, &res);
197 if (rc == 1)
198 res = le16_to_cpu(res) | BMSR_LSTATUS;
199 else
200 res = le16_to_cpu(res) & ~BMSR_LSTATUS;
201
202 netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
203 phy_id, loc, res);
204
205 return res;
206}
207
208static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc,
209 int val)
210{
211 struct usbnet *dev = netdev_priv(netdev);
212 __le16 res = cpu_to_le16(val);
213
214 if (phy_id) {
215 netdev_dbg(netdev, "Only internal phy supported\n");
216 return;
217 }
218
219 netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
220 phy_id, loc, val);
221
222 sr_share_write_word(dev, 1, loc, res);
223}
224
225static u32 sr9700_get_link(struct net_device *netdev)
226{
227 struct usbnet *dev = netdev_priv(netdev);
228 u8 value = 0;
229 int rc = 0;
230
231 /* Get the Link Status directly */
232 sr_read_reg(dev, NSR, &value);
233 if (value & NSR_LINKST)
234 rc = 1;
235
236 return rc;
237}
238
239static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
240{
241 struct usbnet *dev = netdev_priv(netdev);
242
243 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
244}
245
246static const struct ethtool_ops sr9700_ethtool_ops = {
247 .get_drvinfo = usbnet_get_drvinfo,
248 .get_link = sr9700_get_link,
249 .get_msglevel = usbnet_get_msglevel,
250 .set_msglevel = usbnet_set_msglevel,
251 .get_eeprom_len = sr9700_get_eeprom_len,
252 .get_eeprom = sr9700_get_eeprom,
253 .get_settings = usbnet_get_settings,
254 .set_settings = usbnet_set_settings,
255 .nway_reset = usbnet_nway_reset,
256};
257
258static void sr9700_set_multicast(struct net_device *netdev)
259{
260 struct usbnet *dev = netdev_priv(netdev);
261 /* We use the 20 byte dev->data for our 8 byte filter buffer
262 * to avoid allocating memory that is tricky to free later
263 */
264 u8 *hashes = (u8 *)&dev->data;
265 /* rx_ctl setting : enable, disable_long, disable_crc */
266 u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG;
267
268 memset(hashes, 0x00, SR_MCAST_SIZE);
269 /* broadcast address */
270 hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG;
271 if (netdev->flags & IFF_PROMISC) {
272 rx_ctl |= RCR_PRMSC;
273 } else if (netdev->flags & IFF_ALLMULTI ||
274 netdev_mc_count(netdev) > SR_MCAST_MAX) {
275 rx_ctl |= RCR_RUNT;
276 } else if (!netdev_mc_empty(netdev)) {
277 struct netdev_hw_addr *ha;
278
279 netdev_for_each_mc_addr(ha, netdev) {
280 u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
281 hashes[crc >> 3] |= 1 << (crc & 0x7);
282 }
283 }
284
285 sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
286 sr_write_reg_async(dev, RCR, rx_ctl);
287}
288
289static int sr9700_set_mac_address(struct net_device *netdev, void *p)
290{
291 struct usbnet *dev = netdev_priv(netdev);
292 struct sockaddr *addr = p;
293
294 if (!is_valid_ether_addr(addr->sa_data)) {
295 netdev_err(netdev, "not setting invalid mac address %pM\n",
296 addr->sa_data);
297 return -EINVAL;
298 }
299
300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
301 sr_write_async(dev, PAR, 6, netdev->dev_addr);
302
303 return 0;
304}
305
306static const struct net_device_ops sr9700_netdev_ops = {
307 .ndo_open = usbnet_open,
308 .ndo_stop = usbnet_stop,
309 .ndo_start_xmit = usbnet_start_xmit,
310 .ndo_tx_timeout = usbnet_tx_timeout,
311 .ndo_change_mtu = usbnet_change_mtu,
312 .ndo_validate_addr = eth_validate_addr,
313 .ndo_do_ioctl = sr9700_ioctl,
314 .ndo_set_rx_mode = sr9700_set_multicast,
315 .ndo_set_mac_address = sr9700_set_mac_address,
316};
317
318static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
319{
320 struct net_device *netdev;
321 struct mii_if_info *mii;
322 int ret;
323
324 ret = usbnet_get_endpoints(dev, intf);
325 if (ret)
326 goto out;
327
328 netdev = dev->net;
329
330 netdev->netdev_ops = &sr9700_netdev_ops;
331 netdev->ethtool_ops = &sr9700_ethtool_ops;
332 netdev->hard_header_len += SR_TX_OVERHEAD;
333 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
334 /* bulkin buffer is preferably not less than 3K */
335 dev->rx_urb_size = 3072;
336
337 mii = &dev->mii;
338 mii->dev = netdev;
339 mii->mdio_read = sr_mdio_read;
340 mii->mdio_write = sr_mdio_write;
341 mii->phy_id_mask = 0x1f;
342 mii->reg_num_mask = 0x1f;
343
344 sr_write_reg(dev, NCR, NCR_RST);
345 udelay(20);
346
347 /* read MAC
348 * After Chip Power on, the Chip will reload the MAC from
349 * EEPROM automatically to PAR. In case there is no EEPROM externally,
350 * a default MAC address is stored in PAR for making chip work properly.
351 */
352 if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
353 netdev_err(netdev, "Error reading MAC address\n");
354 ret = -ENODEV;
355 goto out;
356 }
357
358 /* power up and reset phy */
359 sr_write_reg(dev, PRR, PRR_PHY_RST);
360 /* at least 10ms, here 20ms for safe */
361 mdelay(20);
362 sr_write_reg(dev, PRR, 0);
363 /* at least 1ms, here 2ms for reading right register */
364 udelay(2 * 1000);
365
366 /* receive broadcast packets */
367 sr9700_set_multicast(netdev);
368
369 sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET);
370 sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL |
371 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
372 mii_nway_restart(mii);
373
374out:
375 return ret;
376}
377
378static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
379{
380 struct sk_buff *sr_skb;
381 int len;
382
383 /* skb content (packets) format :
384 * p0 p1 p2 ...... pm
385 * / \
386 * / \
387 * / \
388 * / \
389 * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn
390 *
391 * p0 : packet 0
392 * p0b0 : packet 0 byte 0
393 *
394 * b0: rx status
395 * b1: packet length (incl crc) low
396 * b2: packet length (incl crc) high
397 * b3..n-4: packet data
398 * bn-3..bn: ethernet packet crc
399 */
400 if (unlikely(skb->len < SR_RX_OVERHEAD)) {
401 netdev_err(dev->net, "unexpected tiny rx frame\n");
402 return 0;
403 }
404
405 /* one skb may contains multiple packets */
406 while (skb->len > SR_RX_OVERHEAD) {
407 if (skb->data[0] != 0x40)
408 return 0;
409
410 /* ignore the CRC length */
411 len = (skb->data[1] | (skb->data[2] << 8)) - 4;
412
413 if (len > ETH_FRAME_LEN)
414 return 0;
415
416 /* the last packet of current skb */
417 if (skb->len == (len + SR_RX_OVERHEAD)) {
418 skb_pull(skb, 3);
419 skb->len = len;
420 skb_set_tail_pointer(skb, len);
421 skb->truesize = len + sizeof(struct sk_buff);
422 return 2;
423 }
424
425 /* skb_clone is used for address align */
426 sr_skb = skb_clone(skb, GFP_ATOMIC);
427 if (!sr_skb)
428 return 0;
429
430 sr_skb->len = len;
431 sr_skb->data = skb->data + 3;
432 skb_set_tail_pointer(sr_skb, len);
433 sr_skb->truesize = len + sizeof(struct sk_buff);
434 usbnet_skb_return(dev, sr_skb);
435
436 skb_pull(skb, len + SR_RX_OVERHEAD);
437 };
438
439 return 0;
440}
441
442static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
443 gfp_t flags)
444{
445 int len;
446
447 /* SR9700 can only send out one ethernet packet at once.
448 *
449 * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn
450 *
451 * b0: rx status
452 * b1: packet length (incl crc) low
453 * b2: packet length (incl crc) high
454 * b3..n-4: packet data
455 * bn-3..bn: ethernet packet crc
456 */
457
458 len = skb->len;
459
460 if (skb_headroom(skb) < SR_TX_OVERHEAD) {
461 struct sk_buff *skb2;
462
463 skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
464 dev_kfree_skb_any(skb);
465 skb = skb2;
466 if (!skb)
467 return NULL;
468 }
469
470 __skb_push(skb, SR_TX_OVERHEAD);
471
472 /* usbnet adds padding if length is a multiple of packet size
473 * if so, adjust length value in header
474 */
475 if ((skb->len % dev->maxpacket) == 0)
476 len++;
477
478 skb->data[0] = len;
479 skb->data[1] = len >> 8;
480
481 return skb;
482}
483
484static void sr9700_status(struct usbnet *dev, struct urb *urb)
485{
486 int link;
487 u8 *buf;
488
489 /* format:
490 b0: net status
491 b1: tx status 1
492 b2: tx status 2
493 b3: rx status
494 b4: rx overflow
495 b5: rx count
496 b6: tx count
497 b7: gpr
498 */
499
500 if (urb->actual_length < 8)
501 return;
502
503 buf = urb->transfer_buffer;
504
505 link = !!(buf[0] & 0x40);
506 if (netif_carrier_ok(dev->net) != link) {
507 usbnet_link_change(dev, link, 1);
508 netdev_dbg(dev->net, "Link Status is: %d\n", link);
509 }
510}
511
512static int sr9700_link_reset(struct usbnet *dev)
513{
514 struct ethtool_cmd ecmd;
515
516 mii_check_media(&dev->mii, 1, 1);
517 mii_ethtool_gset(&dev->mii, &ecmd);
518
519 netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
520 ecmd.speed, ecmd.duplex);
521
522 return 0;
523}
524
525static const struct driver_info sr9700_driver_info = {
526 .description = "CoreChip SR9700 USB Ethernet",
527 .flags = FLAG_ETHER,
528 .bind = sr9700_bind,
529 .rx_fixup = sr9700_rx_fixup,
530 .tx_fixup = sr9700_tx_fixup,
531 .status = sr9700_status,
532 .link_reset = sr9700_link_reset,
533 .reset = sr9700_link_reset,
534};
535
536static const struct usb_device_id products[] = {
537 {
538 USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */
539 .driver_info = (unsigned long)&sr9700_driver_info,
540 },
541 {}, /* END */
542};
543
544MODULE_DEVICE_TABLE(usb, products);
545
546static struct usb_driver sr9700_usb_driver = {
547 .name = "sr9700",
548 .id_table = products,
549 .probe = usbnet_probe,
550 .disconnect = usbnet_disconnect,
551 .suspend = usbnet_suspend,
552 .resume = usbnet_resume,
553 .disable_hub_initiated_lpm = 1,
554};
555
556module_usb_driver(sr9700_usb_driver);
557
558MODULE_AUTHOR("liujl <liujunliang_ljl@163.com>");
559MODULE_DESCRIPTION("SR9700 one chip USB 1.1 USB to Ethernet device from http://www.corechip-sz.com/");
560MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
new file mode 100644
index 000000000000..fd687c575e74
--- /dev/null
+++ b/drivers/net/usb/sr9700.h
@@ -0,0 +1,173 @@
1/*
2 * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices
3 *
4 * Author : Liu Junliang <liujunliang_ljl@163.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#ifndef _SR9700_H
12#define _SR9700_H
13
14/* sr9700 spec. register table on Linux platform */
15
16/* Network Control Reg */
17#define NCR 0x00
18#define NCR_RST (1 << 0)
19#define NCR_LBK (3 << 1)
20#define NCR_FDX (1 << 3)
21#define NCR_WAKEEN (1 << 6)
22/* Network Status Reg */
23#define NSR 0x01
24#define NSR_RXRDY (1 << 0)
25#define NSR_RXOV (1 << 1)
26#define NSR_TX1END (1 << 2)
27#define NSR_TX2END (1 << 3)
28#define NSR_TXFULL (1 << 4)
29#define NSR_WAKEST (1 << 5)
30#define NSR_LINKST (1 << 6)
31#define NSR_SPEED (1 << 7)
32/* Tx Control Reg */
33#define TCR 0x02
34#define TCR_CRC_DIS (1 << 1)
35#define TCR_PAD_DIS (1 << 2)
36#define TCR_LC_CARE (1 << 3)
37#define TCR_CRS_CARE (1 << 4)
38#define TCR_EXCECM (1 << 5)
39#define TCR_LF_EN (1 << 6)
40/* Tx Status Reg for Packet Index 1 */
41#define TSR1 0x03
42#define TSR1_EC (1 << 2)
43#define TSR1_COL (1 << 3)
44#define TSR1_LC (1 << 4)
45#define TSR1_NC (1 << 5)
46#define TSR1_LOC (1 << 6)
47#define TSR1_TLF (1 << 7)
48/* Tx Status Reg for Packet Index 2 */
49#define TSR2 0x04
50#define TSR2_EC (1 << 2)
51#define TSR2_COL (1 << 3)
52#define TSR2_LC (1 << 4)
53#define TSR2_NC (1 << 5)
54#define TSR2_LOC (1 << 6)
55#define TSR2_TLF (1 << 7)
56/* Rx Control Reg*/
57#define RCR 0x05
58#define RCR_RXEN (1 << 0)
59#define RCR_PRMSC (1 << 1)
60#define RCR_RUNT (1 << 2)
61#define RCR_ALL (1 << 3)
62#define RCR_DIS_CRC (1 << 4)
63#define RCR_DIS_LONG (1 << 5)
64/* Rx Status Reg */
65#define RSR 0x06
66#define RSR_AE (1 << 2)
67#define RSR_MF (1 << 6)
68#define RSR_RF (1 << 7)
69/* Rx Overflow Counter Reg */
70#define ROCR 0x07
71#define ROCR_ROC (0x7F << 0)
72#define ROCR_RXFU (1 << 7)
73/* Back Pressure Threshold Reg */
74#define BPTR 0x08
75#define BPTR_JPT (0x0F << 0)
76#define BPTR_BPHW (0x0F << 4)
77/* Flow Control Threshold Reg */
78#define FCTR 0x09
79#define FCTR_LWOT (0x0F << 0)
80#define FCTR_HWOT (0x0F << 4)
81/* rx/tx Flow Control Reg */
82#define FCR 0x0A
83#define FCR_FLCE (1 << 0)
84#define FCR_BKPA (1 << 4)
85#define FCR_TXPEN (1 << 5)
86#define FCR_TXPF (1 << 6)
87#define FCR_TXP0 (1 << 7)
88/* Eeprom & Phy Control Reg */
89#define EPCR 0x0B
90#define EPCR_ERRE (1 << 0)
91#define EPCR_ERPRW (1 << 1)
92#define EPCR_ERPRR (1 << 2)
93#define EPCR_EPOS (1 << 3)
94#define EPCR_WEP (1 << 4)
95/* Eeprom & Phy Address Reg */
96#define EPAR 0x0C
97#define EPAR_EROA (0x3F << 0)
98#define EPAR_PHY_ADR_MASK (0x03 << 6)
99#define EPAR_PHY_ADR (0x01 << 6)
100/* Eeprom & Phy Data Reg */
101#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
102/* Wakeup Control Reg */
103#define WCR 0x0F
104#define WCR_MAGICST (1 << 0)
105#define WCR_LINKST (1 << 2)
106#define WCR_MAGICEN (1 << 3)
107#define WCR_LINKEN (1 << 5)
108/* Physical Address Reg */
109#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
110/* Multicast Address Reg */
111#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
112/* 0x1e unused */
113/* Phy Reset Reg */
114#define PRR 0x1F
115#define PRR_PHY_RST (1 << 0)
116/* Tx sdram Write Pointer Address Low */
117#define TWPAL 0x20
118/* Tx sdram Write Pointer Address High */
119#define TWPAH 0x21
120/* Tx sdram Read Pointer Address Low */
121#define TRPAL 0x22
122/* Tx sdram Read Pointer Address High */
123#define TRPAH 0x23
124/* Rx sdram Write Pointer Address Low */
125#define RWPAL 0x24
126/* Rx sdram Write Pointer Address High */
127#define RWPAH 0x25
128/* Rx sdram Read Pointer Address Low */
129#define RRPAL 0x26
130/* Rx sdram Read Pointer Address High */
131#define RRPAH 0x27
132/* Vendor ID register */
133#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
134/* Product ID register */
135#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
136/* CHIP Revision register */
137#define CHIPR 0x2C
138/* 0x2D --> 0xEF unused */
139/* USB Device Address */
140#define USBDA 0xF0
141#define USBDA_USBFA (0x7F << 0)
142/* RX packet Counter Reg */
143#define RXC 0xF1
144/* Tx packet Counter & USB Status Reg */
145#define TXC_USBS 0xF2
146#define TXC_USBS_TXC0 (1 << 0)
147#define TXC_USBS_TXC1 (1 << 1)
148#define TXC_USBS_TXC2 (1 << 2)
149#define TXC_USBS_EP1RDY (1 << 5)
150#define TXC_USBS_SUSFLAG (1 << 6)
151#define TXC_USBS_RXFAULT (1 << 7)
152/* USB Control register */
153#define USBC 0xF4
154#define USBC_EP3NAK (1 << 4)
155#define USBC_EP3ACK (1 << 5)
156
157/* Register access commands and flags */
158#define SR_RD_REGS 0x00
159#define SR_WR_REGS 0x01
160#define SR_WR_REG 0x03
161#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
162#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
163
164/* parameters */
165#define SR_SHARE_TIMEOUT 1000
166#define SR_EEPROM_LEN 256
167#define SR_MCAST_SIZE 8
168#define SR_MCAST_ADDR_FLAG 0x80
169#define SR_MCAST_MAX 64
170#define SR_TX_OVERHEAD 2 /* 2bytes header */
171#define SR_RX_OVERHEAD 7 /* 3bytes header + 4crc tail */
172
173#endif /* _SR9700_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 27a00b006033..7b331e613e02 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -59,15 +59,13 @@
59 * For high speed, each frame comfortably fits almost 36 max size 59 * For high speed, each frame comfortably fits almost 36 max size
60 * Ethernet packets (so queues should be bigger). 60 * Ethernet packets (so queues should be bigger).
61 * 61 *
62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to 62 * The goal is to let the USB host controller be busy for 5msec or
63 * let the USB host controller be busy for 5msec or more before an irq 63 * more before an irq is required, under load. Jumbograms change
64 * is required, under load. Jumbograms change the equation. 64 * the equation.
65 */ 65 */
66#define RX_MAX_QUEUE_MEMORY (60 * 1518) 66#define MAX_QUEUE_MEMORY (60 * 1518)
67#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 67#define RX_QLEN(dev) ((dev)->rx_qlen)
68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) 68#define TX_QLEN(dev) ((dev)->tx_qlen)
69#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
71 69
72// reawaken network queue this soon after stopping; else watchdog barks 70// reawaken network queue this soon after stopping; else watchdog barks
73#define TX_TIMEOUT_JIFFIES (5*HZ) 71#define TX_TIMEOUT_JIFFIES (5*HZ)
@@ -347,6 +345,31 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
347} 345}
348EXPORT_SYMBOL_GPL(usbnet_skb_return); 346EXPORT_SYMBOL_GPL(usbnet_skb_return);
349 347
348/* must be called if hard_mtu or rx_urb_size changed */
349void usbnet_update_max_qlen(struct usbnet *dev)
350{
351 enum usb_device_speed speed = dev->udev->speed;
352
353 switch (speed) {
354 case USB_SPEED_HIGH:
355 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
356 dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
357 break;
358 case USB_SPEED_SUPER:
359 /*
360 * Not take default 5ms qlen for super speed HC to
361 * save memory, and iperf tests show 2.5ms qlen can
362 * work well
363 */
364 dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
365 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
366 break;
367 default:
368 dev->rx_qlen = dev->tx_qlen = 4;
369 }
370}
371EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
372
350 373
351/*------------------------------------------------------------------------- 374/*-------------------------------------------------------------------------
352 * 375 *
@@ -375,6 +398,9 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
375 usbnet_unlink_rx_urbs(dev); 398 usbnet_unlink_rx_urbs(dev);
376 } 399 }
377 400
401 /* max qlen depend on hard_mtu and rx_urb_size */
402 usbnet_update_max_qlen(dev);
403
378 return 0; 404 return 0;
379} 405}
380EXPORT_SYMBOL_GPL(usbnet_change_mtu); 406EXPORT_SYMBOL_GPL(usbnet_change_mtu);
@@ -843,6 +869,9 @@ int usbnet_open (struct net_device *net)
843 goto done; 869 goto done;
844 } 870 }
845 871
872 /* hard_mtu or rx_urb_size may change in reset() */
873 usbnet_update_max_qlen(dev);
874
846 // insist peer be connected 875 // insist peer be connected
847 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 876 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
848 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); 877 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
@@ -927,6 +956,9 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
927 if (dev->driver_info->link_reset) 956 if (dev->driver_info->link_reset)
928 dev->driver_info->link_reset(dev); 957 dev->driver_info->link_reset(dev);
929 958
959 /* hard_mtu or rx_urb_size may change in link_reset() */
960 usbnet_update_max_qlen(dev);
961
930 return retval; 962 return retval;
931 963
932} 964}
@@ -1020,6 +1052,9 @@ static void __handle_link_change(struct usbnet *dev)
1020 tasklet_schedule(&dev->bh); 1052 tasklet_schedule(&dev->bh);
1021 } 1053 }
1022 1054
1055 /* hard_mtu or rx_urb_size may change during link change */
1056 usbnet_update_max_qlen(dev);
1057
1023 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1058 clear_bit(EVENT_LINK_CHANGE, &dev->flags);
1024} 1059}
1025 1060
@@ -1632,11 +1667,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1632 dev->rx_urb_size = dev->hard_mtu; 1667 dev->rx_urb_size = dev->hard_mtu;
1633 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1668 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1634 1669
1670 /* let userspace know we have a random address */
1671 if (ether_addr_equal(net->dev_addr, node_id))
1672 net->addr_assign_type = NET_ADDR_RANDOM;
1673
1635 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1674 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1636 SET_NETDEV_DEVTYPE(net, &wlan_type); 1675 SET_NETDEV_DEVTYPE(net, &wlan_type);
1637 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1676 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1638 SET_NETDEV_DEVTYPE(net, &wwan_type); 1677 SET_NETDEV_DEVTYPE(net, &wwan_type);
1639 1678
1679 /* initialize max rx_qlen and tx_qlen */
1680 usbnet_update_max_qlen(dev);
1681
1640 status = register_netdev (net); 1682 status = register_netdev (net);
1641 if (status) 1683 if (status)
1642 goto out4; 1684 goto out4;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3d2a90a62649..defec2b3c5a4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -106,6 +106,9 @@ struct virtnet_info {
106 /* Has control virtqueue */ 106 /* Has control virtqueue */
107 bool has_cvq; 107 bool has_cvq;
108 108
109 /* Host can handle any s/g split between our header and packet data */
110 bool any_header_sg;
111
109 /* enable config space updates */ 112 /* enable config space updates */
110 bool config_enable; 113 bool config_enable;
111 114
@@ -669,12 +672,28 @@ static void free_old_xmit_skbs(struct send_queue *sq)
669 672
670static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 673static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
671{ 674{
672 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 675 struct skb_vnet_hdr *hdr;
673 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 676 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
674 struct virtnet_info *vi = sq->vq->vdev->priv; 677 struct virtnet_info *vi = sq->vq->vdev->priv;
675 unsigned num_sg; 678 unsigned num_sg;
679 unsigned hdr_len;
680 bool can_push;
676 681
677 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 682 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
683 if (vi->mergeable_rx_bufs)
684 hdr_len = sizeof hdr->mhdr;
685 else
686 hdr_len = sizeof hdr->hdr;
687
688 can_push = vi->any_header_sg &&
689 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
690 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
691 /* Even if we can, don't push here yet as this would skew
692 * csum_start offset below. */
693 if (can_push)
694 hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
695 else
696 hdr = skb_vnet_hdr(skb);
678 697
679 if (skb->ip_summed == CHECKSUM_PARTIAL) { 698 if (skb->ip_summed == CHECKSUM_PARTIAL) {
680 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 699 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
@@ -703,15 +722,18 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
703 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; 722 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
704 } 723 }
705 724
706 hdr->mhdr.num_buffers = 0;
707
708 /* Encode metadata header at front. */
709 if (vi->mergeable_rx_bufs) 725 if (vi->mergeable_rx_bufs)
710 sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); 726 hdr->mhdr.num_buffers = 0;
711 else
712 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
713 727
714 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 728 if (can_push) {
729 __skb_push(skb, hdr_len);
730 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
731 /* Pull header back to avoid skew in tx bytes calculations. */
732 __skb_pull(skb, hdr_len);
733 } else {
734 sg_set_buf(sq->sg, hdr, hdr_len);
735 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
736 }
715 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 737 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
716} 738}
717 739
@@ -1516,6 +1538,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1516 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 1538 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1517 /* (!csum && gso) case will be fixed by register_netdev() */ 1539 /* (!csum && gso) case will be fixed by register_netdev() */
1518 } 1540 }
1541 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1542 dev->features |= NETIF_F_RXCSUM;
1519 1543
1520 dev->vlan_features = dev->features; 1544 dev->vlan_features = dev->features;
1521 1545
@@ -1552,6 +1576,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1552 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1576 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1553 vi->mergeable_rx_bufs = true; 1577 vi->mergeable_rx_bufs = true;
1554 1578
1579 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1580 vi->any_header_sg = true;
1581
1555 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1582 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1556 vi->has_cvq = true; 1583 vi->has_cvq = true;
1557 1584
@@ -1727,6 +1754,7 @@ static unsigned int features[] = {
1727 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1754 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1728 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1755 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1729 VIRTIO_NET_F_CTRL_MAC_ADDR, 1756 VIRTIO_NET_F_CTRL_MAC_ADDR,
1757 VIRTIO_F_ANY_LAYOUT,
1730}; 1758};
1731 1759
1732static struct virtio_driver virtio_net_driver = { 1760static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 55a62cae2cb4..7e2788c488ed 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev) 313 struct pci_dev *pdev)
314{ 314{
315 if (tbi->map_type == VMXNET3_MAP_SINGLE) 315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
316 pci_unmap_single(pdev, tbi->dma_addr, tbi->len, 316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
317 PCI_DMA_TODEVICE); 317 PCI_DMA_TODEVICE);
318 else if (tbi->map_type == VMXNET3_MAP_PAGE) 318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
319 pci_unmap_page(pdev, tbi->dma_addr, tbi->len, 319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
320 PCI_DMA_TODEVICE); 320 PCI_DMA_TODEVICE);
321 else 321 else
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429 struct vmxnet3_adapter *adapter) 429 struct vmxnet3_adapter *adapter)
430{ 430{
431 if (tq->tx_ring.base) { 431 if (tq->tx_ring.base) {
432 pci_free_consistent(adapter->pdev, tq->tx_ring.size * 432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433 sizeof(struct Vmxnet3_TxDesc), 433 sizeof(struct Vmxnet3_TxDesc),
434 tq->tx_ring.base, tq->tx_ring.basePA); 434 tq->tx_ring.base, tq->tx_ring.basePA);
435 tq->tx_ring.base = NULL; 435 tq->tx_ring.base = NULL;
436 } 436 }
437 if (tq->data_ring.base) { 437 if (tq->data_ring.base) {
438 pci_free_consistent(adapter->pdev, tq->data_ring.size * 438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
439 sizeof(struct Vmxnet3_TxDataDesc), 439 sizeof(struct Vmxnet3_TxDataDesc),
440 tq->data_ring.base, tq->data_ring.basePA); 440 tq->data_ring.base, tq->data_ring.basePA);
441 tq->data_ring.base = NULL; 441 tq->data_ring.base = NULL;
442 } 442 }
443 if (tq->comp_ring.base) { 443 if (tq->comp_ring.base) {
444 pci_free_consistent(adapter->pdev, tq->comp_ring.size * 444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445 sizeof(struct Vmxnet3_TxCompDesc), 445 sizeof(struct Vmxnet3_TxCompDesc),
446 tq->comp_ring.base, tq->comp_ring.basePA); 446 tq->comp_ring.base, tq->comp_ring.basePA);
447 tq->comp_ring.base = NULL; 447 tq->comp_ring.base = NULL;
448 } 448 }
449 kfree(tq->buf_info); 449 if (tq->buf_info) {
450 tq->buf_info = NULL; 450 dma_free_coherent(&adapter->pdev->dev,
451 tq->tx_ring.size * sizeof(tq->buf_info[0]),
452 tq->buf_info, tq->buf_info_pa);
453 tq->buf_info = NULL;
454 }
451} 455}
452 456
453 457
@@ -496,37 +500,38 @@ static int
496vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 500vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
497 struct vmxnet3_adapter *adapter) 501 struct vmxnet3_adapter *adapter)
498{ 502{
503 size_t sz;
504
499 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 505 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
500 tq->comp_ring.base || tq->buf_info); 506 tq->comp_ring.base || tq->buf_info);
501 507
502 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size 508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
503 * sizeof(struct Vmxnet3_TxDesc), 509 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
504 &tq->tx_ring.basePA); 510 &tq->tx_ring.basePA, GFP_KERNEL);
505 if (!tq->tx_ring.base) { 511 if (!tq->tx_ring.base) {
506 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); 512 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
507 goto err; 513 goto err;
508 } 514 }
509 515
510 tq->data_ring.base = pci_alloc_consistent(adapter->pdev, 516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
511 tq->data_ring.size * 517 tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
512 sizeof(struct Vmxnet3_TxDataDesc), 518 &tq->data_ring.basePA, GFP_KERNEL);
513 &tq->data_ring.basePA);
514 if (!tq->data_ring.base) { 519 if (!tq->data_ring.base) {
515 netdev_err(adapter->netdev, "failed to allocate data ring\n"); 520 netdev_err(adapter->netdev, "failed to allocate data ring\n");
516 goto err; 521 goto err;
517 } 522 }
518 523
519 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, 524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
520 tq->comp_ring.size * 525 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
521 sizeof(struct Vmxnet3_TxCompDesc), 526 &tq->comp_ring.basePA, GFP_KERNEL);
522 &tq->comp_ring.basePA);
523 if (!tq->comp_ring.base) { 527 if (!tq->comp_ring.base) {
524 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); 528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
525 goto err; 529 goto err;
526 } 530 }
527 531
528 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), 532 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
529 GFP_KERNEL); 533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534 &tq->buf_info_pa, GFP_KERNEL);
530 if (!tq->buf_info) 535 if (!tq->buf_info)
531 goto err; 536 goto err;
532 537
@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
578 break; 583 break;
579 } 584 }
580 585
581 rbi->dma_addr = pci_map_single(adapter->pdev, 586 rbi->dma_addr = dma_map_single(
587 &adapter->pdev->dev,
582 rbi->skb->data, rbi->len, 588 rbi->skb->data, rbi->len,
583 PCI_DMA_FROMDEVICE); 589 PCI_DMA_FROMDEVICE);
584 } else { 590 } else {
@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
595 rq->stats.rx_buf_alloc_failure++; 601 rq->stats.rx_buf_alloc_failure++;
596 break; 602 break;
597 } 603 }
598 rbi->dma_addr = pci_map_page(adapter->pdev, 604 rbi->dma_addr = dma_map_page(
605 &adapter->pdev->dev,
599 rbi->page, 0, PAGE_SIZE, 606 rbi->page, 0, PAGE_SIZE,
600 PCI_DMA_FROMDEVICE); 607 PCI_DMA_FROMDEVICE);
601 } else { 608 } else {
@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
705 712
706 tbi = tq->buf_info + tq->tx_ring.next2fill; 713 tbi = tq->buf_info + tq->tx_ring.next2fill;
707 tbi->map_type = VMXNET3_MAP_SINGLE; 714 tbi->map_type = VMXNET3_MAP_SINGLE;
708 tbi->dma_addr = pci_map_single(adapter->pdev, 715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
709 skb->data + buf_offset, buf_size, 716 skb->data + buf_offset, buf_size,
710 PCI_DMA_TODEVICE); 717 PCI_DMA_TODEVICE);
711 718
@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1221 goto rcd_done; 1228 goto rcd_done;
1222 } 1229 }
1223 1230
1224 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, 1231 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1232 rbi->len,
1225 PCI_DMA_FROMDEVICE); 1233 PCI_DMA_FROMDEVICE);
1226 1234
1227#ifdef VMXNET3_RSS 1235#ifdef VMXNET3_RSS
@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1233 1241
1234 /* Immediate refill */ 1242 /* Immediate refill */
1235 rbi->skb = new_skb; 1243 rbi->skb = new_skb;
1236 rbi->dma_addr = pci_map_single(adapter->pdev, 1244 rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
1237 rbi->skb->data, rbi->len, 1245 rbi->skb->data, rbi->len,
1238 PCI_DMA_FROMDEVICE); 1246 PCI_DMA_FROMDEVICE);
1239 rxd->addr = cpu_to_le64(rbi->dma_addr); 1247 rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1267 } 1275 }
1268 1276
1269 if (rcd->len) { 1277 if (rcd->len) {
1270 pci_unmap_page(adapter->pdev, 1278 dma_unmap_page(&adapter->pdev->dev,
1271 rbi->dma_addr, rbi->len, 1279 rbi->dma_addr, rbi->len,
1272 PCI_DMA_FROMDEVICE); 1280 PCI_DMA_FROMDEVICE);
1273 1281
@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1276 1284
1277 /* Immediate refill */ 1285 /* Immediate refill */
1278 rbi->page = new_page; 1286 rbi->page = new_page;
1279 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, 1287 rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
1288 rbi->page,
1280 0, PAGE_SIZE, 1289 0, PAGE_SIZE,
1281 PCI_DMA_FROMDEVICE); 1290 PCI_DMA_FROMDEVICE);
1282 rxd->addr = cpu_to_le64(rbi->dma_addr); 1291 rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1352 1361
1353 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1362 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1354 rq->buf_info[ring_idx][i].skb) { 1363 rq->buf_info[ring_idx][i].skb) {
1355 pci_unmap_single(adapter->pdev, rxd->addr, 1364 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1356 rxd->len, PCI_DMA_FROMDEVICE); 1365 rxd->len, PCI_DMA_FROMDEVICE);
1357 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1366 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1358 rq->buf_info[ring_idx][i].skb = NULL; 1367 rq->buf_info[ring_idx][i].skb = NULL;
1359 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1368 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1360 rq->buf_info[ring_idx][i].page) { 1369 rq->buf_info[ring_idx][i].page) {
1361 pci_unmap_page(adapter->pdev, rxd->addr, 1370 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1362 rxd->len, PCI_DMA_FROMDEVICE); 1371 rxd->len, PCI_DMA_FROMDEVICE);
1363 put_page(rq->buf_info[ring_idx][i].page); 1372 put_page(rq->buf_info[ring_idx][i].page);
1364 rq->buf_info[ring_idx][i].page = NULL; 1373 rq->buf_info[ring_idx][i].page = NULL;
@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1400 } 1409 }
1401 1410
1402 1411
1403 kfree(rq->buf_info[0]);
1404
1405 for (i = 0; i < 2; i++) { 1412 for (i = 0; i < 2; i++) {
1406 if (rq->rx_ring[i].base) { 1413 if (rq->rx_ring[i].base) {
1407 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size 1414 dma_free_coherent(&adapter->pdev->dev,
1408 * sizeof(struct Vmxnet3_RxDesc), 1415 rq->rx_ring[i].size
1409 rq->rx_ring[i].base, 1416 * sizeof(struct Vmxnet3_RxDesc),
1410 rq->rx_ring[i].basePA); 1417 rq->rx_ring[i].base,
1418 rq->rx_ring[i].basePA);
1411 rq->rx_ring[i].base = NULL; 1419 rq->rx_ring[i].base = NULL;
1412 } 1420 }
1413 rq->buf_info[i] = NULL; 1421 rq->buf_info[i] = NULL;
1414 } 1422 }
1415 1423
1416 if (rq->comp_ring.base) { 1424 if (rq->comp_ring.base) {
1417 pci_free_consistent(adapter->pdev, rq->comp_ring.size * 1425 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1418 sizeof(struct Vmxnet3_RxCompDesc), 1426 * sizeof(struct Vmxnet3_RxCompDesc),
1419 rq->comp_ring.base, rq->comp_ring.basePA); 1427 rq->comp_ring.base, rq->comp_ring.basePA);
1420 rq->comp_ring.base = NULL; 1428 rq->comp_ring.base = NULL;
1421 } 1429 }
1430
1431 if (rq->buf_info[0]) {
1432 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1433 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1434 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1435 rq->buf_info_pa);
1436 }
1422} 1437}
1423 1438
1424 1439
@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1503 for (i = 0; i < 2; i++) { 1518 for (i = 0; i < 2; i++) {
1504 1519
1505 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1520 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1506 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, 1521 rq->rx_ring[i].base = dma_alloc_coherent(
1507 &rq->rx_ring[i].basePA); 1522 &adapter->pdev->dev, sz,
1523 &rq->rx_ring[i].basePA,
1524 GFP_KERNEL);
1508 if (!rq->rx_ring[i].base) { 1525 if (!rq->rx_ring[i].base) {
1509 netdev_err(adapter->netdev, 1526 netdev_err(adapter->netdev,
1510 "failed to allocate rx ring %d\n", i); 1527 "failed to allocate rx ring %d\n", i);
@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1513 } 1530 }
1514 1531
1515 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1532 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1516 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, 1533 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1517 &rq->comp_ring.basePA); 1534 &rq->comp_ring.basePA,
1535 GFP_KERNEL);
1518 if (!rq->comp_ring.base) { 1536 if (!rq->comp_ring.base) {
1519 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); 1537 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1520 goto err; 1538 goto err;
@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1522 1540
1523 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1541 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1524 rq->rx_ring[1].size); 1542 rq->rx_ring[1].size);
1525 bi = kzalloc(sz, GFP_KERNEL); 1543 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1544 GFP_KERNEL);
1526 if (!bi) 1545 if (!bi)
1527 goto err; 1546 goto err;
1528 1547
@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2005 struct Vmxnet3_RxFilterConf *rxConf = 2024 struct Vmxnet3_RxFilterConf *rxConf =
2006 &adapter->shared->devRead.rxFilterConf; 2025 &adapter->shared->devRead.rxFilterConf;
2007 u8 *new_table = NULL; 2026 u8 *new_table = NULL;
2027 dma_addr_t new_table_pa = 0;
2008 u32 new_mode = VMXNET3_RXM_UCAST; 2028 u32 new_mode = VMXNET3_RXM_UCAST;
2009 2029
2010 if (netdev->flags & IFF_PROMISC) { 2030 if (netdev->flags & IFF_PROMISC) {
@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 new_mode |= VMXNET3_RXM_MCAST; 2048 new_mode |= VMXNET3_RXM_MCAST;
2029 rxConf->mfTableLen = cpu_to_le16( 2049 rxConf->mfTableLen = cpu_to_le16(
2030 netdev_mc_count(netdev) * ETH_ALEN); 2050 netdev_mc_count(netdev) * ETH_ALEN);
2031 rxConf->mfTablePA = cpu_to_le64(virt_to_phys( 2051 new_table_pa = dma_map_single(
2032 new_table)); 2052 &adapter->pdev->dev,
2053 new_table,
2054 rxConf->mfTableLen,
2055 PCI_DMA_TODEVICE);
2056 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2033 } else { 2057 } else {
2034 netdev_info(netdev, "failed to copy mcast list" 2058 netdev_info(netdev, "failed to copy mcast list"
2035 ", setting ALL_MULTI\n"); 2059 ", setting ALL_MULTI\n");
@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev)
2056 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2080 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2057 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2081 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2058 2082
2059 kfree(new_table); 2083 if (new_table) {
2084 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2085 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2086 kfree(new_table);
2087 }
2060} 2088}
2061 2089
2062void 2090void
@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2096 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); 2124 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2097 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); 2125 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2098 2126
2099 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); 2127 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2100 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2128 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2101 2129
2102 /* set up feature flags */ 2130 /* set up feature flags */
@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2125 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 2153 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2126 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 2154 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2127 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2155 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2128 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); 2156 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2129 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2157 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2130 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2158 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2131 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2159 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2143 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 2171 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2144 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 2172 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2145 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2173 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2146 rqc->ddPA = cpu_to_le64(virt_to_phys( 2174 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2147 rq->buf_info));
2148 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 2175 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2149 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 2176 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2150 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 2177 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2184 i, adapter->num_rx_queues); 2211 i, adapter->num_rx_queues);
2185 2212
2186 devRead->rssConfDesc.confVer = 1; 2213 devRead->rssConfDesc.confVer = 1;
2187 devRead->rssConfDesc.confLen = sizeof(*rssConf); 2214 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2188 devRead->rssConfDesc.confPA = virt_to_phys(rssConf); 2215 devRead->rssConfDesc.confPA =
2216 cpu_to_le64(adapter->rss_conf_pa);
2189 } 2217 }
2190 2218
2191#endif /* VMXNET3_RSS */ 2219#endif /* VMXNET3_RSS */
@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2948 adapter->pdev = pdev; 2976 adapter->pdev = pdev;
2949 2977
2950 spin_lock_init(&adapter->cmd_lock); 2978 spin_lock_init(&adapter->cmd_lock);
2951 adapter->shared = pci_alloc_consistent(adapter->pdev, 2979 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
2952 sizeof(struct Vmxnet3_DriverShared), 2980 sizeof(struct vmxnet3_adapter),
2953 &adapter->shared_pa); 2981 PCI_DMA_TODEVICE);
2982 adapter->shared = dma_alloc_coherent(
2983 &adapter->pdev->dev,
2984 sizeof(struct Vmxnet3_DriverShared),
2985 &adapter->shared_pa, GFP_KERNEL);
2954 if (!adapter->shared) { 2986 if (!adapter->shared) {
2955 dev_err(&pdev->dev, "Failed to allocate memory\n"); 2987 dev_err(&pdev->dev, "Failed to allocate memory\n");
2956 err = -ENOMEM; 2988 err = -ENOMEM;
@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2963 2995
2964 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2996 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2965 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2997 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2966 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, 2998 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
2967 &adapter->queue_desc_pa); 2999 &adapter->queue_desc_pa,
3000 GFP_KERNEL);
2968 3001
2969 if (!adapter->tqd_start) { 3002 if (!adapter->tqd_start) {
2970 dev_err(&pdev->dev, "Failed to allocate memory\n"); 3003 dev_err(&pdev->dev, "Failed to allocate memory\n");
@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2974 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + 3007 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2975 adapter->num_tx_queues); 3008 adapter->num_tx_queues);
2976 3009
2977 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 3010 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3011 sizeof(struct Vmxnet3_PMConf),
3012 &adapter->pm_conf_pa,
3013 GFP_KERNEL);
2978 if (adapter->pm_conf == NULL) { 3014 if (adapter->pm_conf == NULL) {
2979 err = -ENOMEM; 3015 err = -ENOMEM;
2980 goto err_alloc_pm; 3016 goto err_alloc_pm;
@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2982 3018
2983#ifdef VMXNET3_RSS 3019#ifdef VMXNET3_RSS
2984 3020
2985 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); 3021 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3022 sizeof(struct UPT1_RSSConf),
3023 &adapter->rss_conf_pa,
3024 GFP_KERNEL);
2986 if (adapter->rss_conf == NULL) { 3025 if (adapter->rss_conf == NULL) {
2987 err = -ENOMEM; 3026 err = -ENOMEM;
2988 goto err_alloc_rss; 3027 goto err_alloc_rss;
@@ -3077,17 +3116,22 @@ err_ver:
3077 vmxnet3_free_pci_resources(adapter); 3116 vmxnet3_free_pci_resources(adapter);
3078err_alloc_pci: 3117err_alloc_pci:
3079#ifdef VMXNET3_RSS 3118#ifdef VMXNET3_RSS
3080 kfree(adapter->rss_conf); 3119 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3120 adapter->rss_conf, adapter->rss_conf_pa);
3081err_alloc_rss: 3121err_alloc_rss:
3082#endif 3122#endif
3083 kfree(adapter->pm_conf); 3123 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3124 adapter->pm_conf, adapter->pm_conf_pa);
3084err_alloc_pm: 3125err_alloc_pm:
3085 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3126 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3086 adapter->queue_desc_pa); 3127 adapter->queue_desc_pa);
3087err_alloc_queue_desc: 3128err_alloc_queue_desc:
3088 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3129 dma_free_coherent(&adapter->pdev->dev,
3089 adapter->shared, adapter->shared_pa); 3130 sizeof(struct Vmxnet3_DriverShared),
3131 adapter->shared, adapter->shared_pa);
3090err_alloc_shared: 3132err_alloc_shared:
3133 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3134 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3091 pci_set_drvdata(pdev, NULL); 3135 pci_set_drvdata(pdev, NULL);
3092 free_netdev(netdev); 3136 free_netdev(netdev);
3093 return err; 3137 return err;
@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev)
3118 vmxnet3_free_intr_resources(adapter); 3162 vmxnet3_free_intr_resources(adapter);
3119 vmxnet3_free_pci_resources(adapter); 3163 vmxnet3_free_pci_resources(adapter);
3120#ifdef VMXNET3_RSS 3164#ifdef VMXNET3_RSS
3121 kfree(adapter->rss_conf); 3165 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3166 adapter->rss_conf, adapter->rss_conf_pa);
3122#endif 3167#endif
3123 kfree(adapter->pm_conf); 3168 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3169 adapter->pm_conf, adapter->pm_conf_pa);
3124 3170
3125 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 3171 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3126 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; 3172 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3127 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3173 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3128 adapter->queue_desc_pa); 3174 adapter->queue_desc_pa);
3129 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3175 dma_free_coherent(&adapter->pdev->dev,
3130 adapter->shared, adapter->shared_pa); 3176 sizeof(struct Vmxnet3_DriverShared),
3177 adapter->shared, adapter->shared_pa);
3178 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3179 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3131 free_netdev(netdev); 3180 free_netdev(netdev);
3132} 3181}
3133 3182
@@ -3227,8 +3276,8 @@ skip_arp:
3227 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3276 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3228 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3277 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3229 *pmConf)); 3278 *pmConf));
3230 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3279 adapter->shared->devRead.pmConfDesc.confPA =
3231 pmConf)); 3280 cpu_to_le64(adapter->pm_conf_pa);
3232 3281
3233 spin_lock_irqsave(&adapter->cmd_lock, flags); 3282 spin_lock_irqsave(&adapter->cmd_lock, flags);
3234 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3283 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -3265,8 +3314,8 @@ vmxnet3_resume(struct device *device)
3265 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3314 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3266 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3315 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3267 *pmConf)); 3316 *pmConf));
3268 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3317 adapter->shared->devRead.pmConfDesc.confPA =
3269 pmConf)); 3318 cpu_to_le64(adapter->pm_conf_pa);
3270 3319
3271 netif_device_attach(netdev); 3320 netif_device_attach(netdev);
3272 pci_set_power_state(pdev, PCI_D0); 3321 pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 35418146fa17..a03f358fd58b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
70/* 70/*
71 * Version numbers 71 * Version numbers
72 */ 72 */
73#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k" 73#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
74 74
75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
76#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00 76#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
77 77
78#if defined(CONFIG_PCI_MSI) 78#if defined(CONFIG_PCI_MSI)
79 /* RSS only makes sense if MSI-X is supported. */ 79 /* RSS only makes sense if MSI-X is supported. */
@@ -229,6 +229,7 @@ struct vmxnet3_tx_queue {
229 spinlock_t tx_lock; 229 spinlock_t tx_lock;
230 struct vmxnet3_cmd_ring tx_ring; 230 struct vmxnet3_cmd_ring tx_ring;
231 struct vmxnet3_tx_buf_info *buf_info; 231 struct vmxnet3_tx_buf_info *buf_info;
232 dma_addr_t buf_info_pa;
232 struct vmxnet3_tx_data_ring data_ring; 233 struct vmxnet3_tx_data_ring data_ring;
233 struct vmxnet3_comp_ring comp_ring; 234 struct vmxnet3_comp_ring comp_ring;
234 struct Vmxnet3_TxQueueCtrl *shared; 235 struct Vmxnet3_TxQueueCtrl *shared;
@@ -277,6 +278,7 @@ struct vmxnet3_rx_queue {
277 u32 qid; /* rqID in RCD for buffer from 1st ring */ 278 u32 qid; /* rqID in RCD for buffer from 1st ring */
278 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 279 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
279 struct vmxnet3_rx_buf_info *buf_info[2]; 280 struct vmxnet3_rx_buf_info *buf_info[2];
281 dma_addr_t buf_info_pa;
280 struct Vmxnet3_RxQueueCtrl *shared; 282 struct Vmxnet3_RxQueueCtrl *shared;
281 struct vmxnet3_rq_driver_stats stats; 283 struct vmxnet3_rq_driver_stats stats;
282} __attribute__((__aligned__(SMP_CACHE_BYTES))); 284} __attribute__((__aligned__(SMP_CACHE_BYTES)));
@@ -353,6 +355,10 @@ struct vmxnet3_adapter {
353 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 355 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
354 356
355 int share_intr; 357 int share_intr;
358
359 dma_addr_t adapter_pa;
360 dma_addr_t pm_conf_pa;
361 dma_addr_t rss_conf_pa;
356}; 362};
357 363
358#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 364#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 767f7af3bd40..bf64b4191dcc 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -6,9 +6,6 @@
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - IPv6 (not in RFC)
12 */ 9 */
13 10
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -27,6 +24,7 @@
27#include <linux/igmp.h> 24#include <linux/igmp.h>
28#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
29#include <linux/if_ether.h> 26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
30#include <linux/hash.h> 28#include <linux/hash.h>
31#include <linux/ethtool.h> 29#include <linux/ethtool.h>
32#include <net/arp.h> 30#include <net/arp.h>
@@ -41,6 +39,13 @@
41#include <net/inet_ecn.h> 39#include <net/inet_ecn.h>
42#include <net/net_namespace.h> 40#include <net/net_namespace.h>
43#include <net/netns/generic.h> 41#include <net/netns/generic.h>
42#include <net/vxlan.h>
43#if IS_ENABLED(CONFIG_IPV6)
44#include <net/ipv6.h>
45#include <net/addrconf.h>
46#include <net/ip6_tunnel.h>
47#include <net/ip6_checksum.h>
48#endif
44 49
45#define VXLAN_VERSION "0.1" 50#define VXLAN_VERSION "0.1"
46 51
@@ -57,6 +62,9 @@
57#define VXLAN_VID_MASK (VXLAN_N_VID - 1) 62#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
58/* IP header + UDP + VXLAN + Ethernet header */ 63/* IP header + UDP + VXLAN + Ethernet header */
59#define VXLAN_HEADROOM (20 + 8 + 8 + 14) 64#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
65/* IPv6 header + UDP + VXLAN + Ethernet header */
66#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
67#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
60 68
61#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ 69#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
62 70
@@ -82,16 +90,6 @@ static int vxlan_net_id;
82 90
83static const u8 all_zeros_mac[ETH_ALEN]; 91static const u8 all_zeros_mac[ETH_ALEN];
84 92
85/* per UDP socket information */
86struct vxlan_sock {
87 struct hlist_node hlist;
88 struct rcu_head rcu;
89 struct work_struct del_work;
90 atomic_t refcnt;
91 struct socket *sock;
92 struct hlist_head vni_list[VNI_HASH_SIZE];
93};
94
95/* per-network namespace private data for this module */ 93/* per-network namespace private data for this module */
96struct vxlan_net { 94struct vxlan_net {
97 struct list_head vxlan_list; 95 struct list_head vxlan_list;
@@ -99,8 +97,14 @@ struct vxlan_net {
99 spinlock_t sock_lock; 97 spinlock_t sock_lock;
100}; 98};
101 99
100union vxlan_addr {
101 struct sockaddr_in sin;
102 struct sockaddr_in6 sin6;
103 struct sockaddr sa;
104};
105
102struct vxlan_rdst { 106struct vxlan_rdst {
103 __be32 remote_ip; 107 union vxlan_addr remote_ip;
104 __be16 remote_port; 108 __be16 remote_port;
105 u32 remote_vni; 109 u32 remote_vni;
106 u32 remote_ifindex; 110 u32 remote_ifindex;
@@ -127,7 +131,7 @@ struct vxlan_dev {
127 struct vxlan_sock *vn_sock; /* listening socket */ 131 struct vxlan_sock *vn_sock; /* listening socket */
128 struct net_device *dev; 132 struct net_device *dev;
129 struct vxlan_rdst default_dst; /* default destination */ 133 struct vxlan_rdst default_dst; /* default destination */
130 __be32 saddr; /* source address */ 134 union vxlan_addr saddr; /* source address */
131 __be16 dst_port; 135 __be16 dst_port;
132 __u16 port_min; /* source port range */ 136 __u16 port_min; /* source port range */
133 __u16 port_max; 137 __u16 port_max;
@@ -153,6 +157,7 @@ struct vxlan_dev {
153#define VXLAN_F_RSC 0x04 157#define VXLAN_F_RSC 0x04
154#define VXLAN_F_L2MISS 0x08 158#define VXLAN_F_L2MISS 0x08
155#define VXLAN_F_L3MISS 0x10 159#define VXLAN_F_L3MISS 0x10
160#define VXLAN_F_IPV6 0x20 /* internal flag */
156 161
157/* salt for hash table */ 162/* salt for hash table */
158static u32 vxlan_salt __read_mostly; 163static u32 vxlan_salt __read_mostly;
@@ -160,6 +165,96 @@ static struct workqueue_struct *vxlan_wq;
160 165
161static void vxlan_sock_work(struct work_struct *work); 166static void vxlan_sock_work(struct work_struct *work);
162 167
168#if IS_ENABLED(CONFIG_IPV6)
169static inline
170bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
171{
172 if (a->sa.sa_family != b->sa.sa_family)
173 return false;
174 if (a->sa.sa_family == AF_INET6)
175 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
176 else
177 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
178}
179
180static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
181{
182 if (ipa->sa.sa_family == AF_INET6)
183 return ipv6_addr_any(&ipa->sin6.sin6_addr);
184 else
185 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
186}
187
188static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
189{
190 if (ipa->sa.sa_family == AF_INET6)
191 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
192 else
193 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
194}
195
196static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
197{
198 if (nla_len(nla) >= sizeof(struct in6_addr)) {
199 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
200 ip->sa.sa_family = AF_INET6;
201 return 0;
202 } else if (nla_len(nla) >= sizeof(__be32)) {
203 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
204 ip->sa.sa_family = AF_INET;
205 return 0;
206 } else {
207 return -EAFNOSUPPORT;
208 }
209}
210
211static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
212 const union vxlan_addr *ip)
213{
214 if (ip->sa.sa_family == AF_INET6)
215 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
216 else
217 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
218}
219
220#else /* !CONFIG_IPV6 */
221
222static inline
223bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
224{
225 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
226}
227
228static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
229{
230 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
231}
232
233static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
234{
235 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
236}
237
238static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
239{
240 if (nla_len(nla) >= sizeof(struct in6_addr)) {
241 return -EAFNOSUPPORT;
242 } else if (nla_len(nla) >= sizeof(__be32)) {
243 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
244 ip->sa.sa_family = AF_INET;
245 return 0;
246 } else {
247 return -EAFNOSUPPORT;
248 }
249}
250
251static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
252 const union vxlan_addr *ip)
253{
254 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
255}
256#endif
257
163/* Virtual Network hash table head */ 258/* Virtual Network hash table head */
164static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) 259static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
165{ 260{
@@ -177,13 +272,18 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
177/* First remote destination for a forwarding entry. 272/* First remote destination for a forwarding entry.
178 * Guaranteed to be non-NULL because remotes are never deleted. 273 * Guaranteed to be non-NULL because remotes are never deleted.
179 */ 274 */
180static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb) 275static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
276{
277 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
278}
279
280static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
181{ 281{
182 return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list); 282 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
183} 283}
184 284
185/* Find VXLAN socket based on network namespace and UDP port */ 285/* Find VXLAN socket based on network namespace and UDP port */
186static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port) 286static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
187{ 287{
188 struct vxlan_sock *vs; 288 struct vxlan_sock *vs;
189 289
@@ -194,16 +294,10 @@ static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
194 return NULL; 294 return NULL;
195} 295}
196 296
197/* Look up VNI in a per net namespace table */ 297static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
198static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
199{ 298{
200 struct vxlan_sock *vs;
201 struct vxlan_dev *vxlan; 299 struct vxlan_dev *vxlan;
202 300
203 vs = vxlan_find_port(net, port);
204 if (!vs)
205 return NULL;
206
207 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { 301 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
208 if (vxlan->default_dst.remote_vni == id) 302 if (vxlan->default_dst.remote_vni == id)
209 return vxlan; 303 return vxlan;
@@ -212,6 +306,18 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
212 return NULL; 306 return NULL;
213} 307}
214 308
309/* Look up VNI in a per net namespace table */
310static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
311{
312 struct vxlan_sock *vs;
313
314 vs = vxlan_find_sock(net, port);
315 if (!vs)
316 return NULL;
317
318 return vxlan_vs_find_vni(vs, id);
319}
320
215/* Fill in neighbour message in skbuff. */ 321/* Fill in neighbour message in skbuff. */
216static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 322static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
217 const struct vxlan_fdb *fdb, 323 const struct vxlan_fdb *fdb,
@@ -235,7 +341,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
235 341
236 if (type == RTM_GETNEIGH) { 342 if (type == RTM_GETNEIGH) {
237 ndm->ndm_family = AF_INET; 343 ndm->ndm_family = AF_INET;
238 send_ip = rdst->remote_ip != htonl(INADDR_ANY); 344 send_ip = !vxlan_addr_any(&rdst->remote_ip);
239 send_eth = !is_zero_ether_addr(fdb->eth_addr); 345 send_eth = !is_zero_ether_addr(fdb->eth_addr);
240 } else 346 } else
241 ndm->ndm_family = AF_BRIDGE; 347 ndm->ndm_family = AF_BRIDGE;
@@ -247,7 +353,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
247 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 353 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
248 goto nla_put_failure; 354 goto nla_put_failure;
249 355
250 if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip)) 356 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
251 goto nla_put_failure; 357 goto nla_put_failure;
252 358
253 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && 359 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
@@ -279,7 +385,7 @@ static inline size_t vxlan_nlmsg_size(void)
279{ 385{
280 return NLMSG_ALIGN(sizeof(struct ndmsg)) 386 return NLMSG_ALIGN(sizeof(struct ndmsg))
281 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 387 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
282 + nla_total_size(sizeof(__be32)) /* NDA_DST */ 388 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
283 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 389 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
284 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 390 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
285 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 391 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
@@ -297,7 +403,8 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
297 if (skb == NULL) 403 if (skb == NULL)
298 goto errout; 404 goto errout;
299 405
300 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb)); 406 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
407 first_remote_rtnl(fdb));
301 if (err < 0) { 408 if (err < 0) {
302 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 409 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
303 WARN_ON(err == -EMSGSIZE); 410 WARN_ON(err == -EMSGSIZE);
@@ -312,14 +419,14 @@ errout:
312 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 419 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
313} 420}
314 421
315static void vxlan_ip_miss(struct net_device *dev, __be32 ipa) 422static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
316{ 423{
317 struct vxlan_dev *vxlan = netdev_priv(dev); 424 struct vxlan_dev *vxlan = netdev_priv(dev);
318 struct vxlan_fdb f = { 425 struct vxlan_fdb f = {
319 .state = NUD_STALE, 426 .state = NUD_STALE,
320 }; 427 };
321 struct vxlan_rdst remote = { 428 struct vxlan_rdst remote = {
322 .remote_ip = ipa, /* goes to NDA_DST */ 429 .remote_ip = *ipa, /* goes to NDA_DST */
323 .remote_vni = VXLAN_N_VID, 430 .remote_vni = VXLAN_N_VID,
324 }; 431 };
325 432
@@ -371,7 +478,7 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
371 struct vxlan_fdb *f; 478 struct vxlan_fdb *f;
372 479
373 hlist_for_each_entry_rcu(f, head, hlist) { 480 hlist_for_each_entry_rcu(f, head, hlist) {
374 if (compare_ether_addr(mac, f->eth_addr) == 0) 481 if (ether_addr_equal(mac, f->eth_addr))
375 return f; 482 return f;
376 } 483 }
377 484
@@ -392,13 +499,13 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
392 499
393/* caller should hold vxlan->hash_lock */ 500/* caller should hold vxlan->hash_lock */
394static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 501static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
395 __be32 ip, __be16 port, 502 union vxlan_addr *ip, __be16 port,
396 __u32 vni, __u32 ifindex) 503 __u32 vni, __u32 ifindex)
397{ 504{
398 struct vxlan_rdst *rd; 505 struct vxlan_rdst *rd;
399 506
400 list_for_each_entry(rd, &f->remotes, list) { 507 list_for_each_entry(rd, &f->remotes, list) {
401 if (rd->remote_ip == ip && 508 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
402 rd->remote_port == port && 509 rd->remote_port == port &&
403 rd->remote_vni == vni && 510 rd->remote_vni == vni &&
404 rd->remote_ifindex == ifindex) 511 rd->remote_ifindex == ifindex)
@@ -408,9 +515,29 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
408 return NULL; 515 return NULL;
409} 516}
410 517
518/* Replace destination of unicast mac */
519static int vxlan_fdb_replace(struct vxlan_fdb *f,
520 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
521{
522 struct vxlan_rdst *rd;
523
524 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
525 if (rd)
526 return 0;
527
528 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
529 if (!rd)
530 return 0;
531 rd->remote_ip = *ip;
532 rd->remote_port = port;
533 rd->remote_vni = vni;
534 rd->remote_ifindex = ifindex;
535 return 1;
536}
537
411/* Add/update destinations for multicast */ 538/* Add/update destinations for multicast */
412static int vxlan_fdb_append(struct vxlan_fdb *f, 539static int vxlan_fdb_append(struct vxlan_fdb *f,
413 __be32 ip, __be16 port, __u32 vni, __u32 ifindex) 540 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
414{ 541{
415 struct vxlan_rdst *rd; 542 struct vxlan_rdst *rd;
416 543
@@ -421,7 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
421 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 548 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
422 if (rd == NULL) 549 if (rd == NULL)
423 return -ENOBUFS; 550 return -ENOBUFS;
424 rd->remote_ip = ip; 551 rd->remote_ip = *ip;
425 rd->remote_port = port; 552 rd->remote_port = port;
426 rd->remote_vni = vni; 553 rd->remote_vni = vni;
427 rd->remote_ifindex = ifindex; 554 rd->remote_ifindex = ifindex;
@@ -431,9 +558,43 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
431 return 1; 558 return 1;
432} 559}
433 560
561/* Notify netdevs that UDP port started listening */
562static void vxlan_notify_add_rx_port(struct sock *sk)
563{
564 struct net_device *dev;
565 struct net *net = sock_net(sk);
566 sa_family_t sa_family = sk->sk_family;
567 u16 port = htons(inet_sk(sk)->inet_sport);
568
569 rcu_read_lock();
570 for_each_netdev_rcu(net, dev) {
571 if (dev->netdev_ops->ndo_add_vxlan_port)
572 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
573 port);
574 }
575 rcu_read_unlock();
576}
577
578/* Notify netdevs that UDP port is no more listening */
579static void vxlan_notify_del_rx_port(struct sock *sk)
580{
581 struct net_device *dev;
582 struct net *net = sock_net(sk);
583 sa_family_t sa_family = sk->sk_family;
584 u16 port = htons(inet_sk(sk)->inet_sport);
585
586 rcu_read_lock();
587 for_each_netdev_rcu(net, dev) {
588 if (dev->netdev_ops->ndo_del_vxlan_port)
589 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
590 port);
591 }
592 rcu_read_unlock();
593}
594
434/* Add new entry to forwarding table -- assumes lock held */ 595/* Add new entry to forwarding table -- assumes lock held */
435static int vxlan_fdb_create(struct vxlan_dev *vxlan, 596static int vxlan_fdb_create(struct vxlan_dev *vxlan,
436 const u8 *mac, __be32 ip, 597 const u8 *mac, union vxlan_addr *ip,
437 __u16 state, __u16 flags, 598 __u16 state, __u16 flags,
438 __be16 port, __u32 vni, __u32 ifindex, 599 __be16 port, __u32 vni, __u32 ifindex,
439 __u8 ndm_flags) 600 __u8 ndm_flags)
@@ -458,6 +619,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
458 f->updated = jiffies; 619 f->updated = jiffies;
459 notify = 1; 620 notify = 1;
460 } 621 }
622 if ((flags & NLM_F_REPLACE)) {
623 /* Only change unicasts */
624 if (!(is_multicast_ether_addr(f->eth_addr) ||
625 is_zero_ether_addr(f->eth_addr))) {
626 int rc = vxlan_fdb_replace(f, ip, port, vni,
627 ifindex);
628
629 if (rc < 0)
630 return rc;
631 notify |= rc;
632 } else
633 return -EOPNOTSUPP;
634 }
461 if ((flags & NLM_F_APPEND) && 635 if ((flags & NLM_F_APPEND) &&
462 (is_multicast_ether_addr(f->eth_addr) || 636 (is_multicast_ether_addr(f->eth_addr) ||
463 is_zero_ether_addr(f->eth_addr))) { 637 is_zero_ether_addr(f->eth_addr))) {
@@ -474,7 +648,12 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
474 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) 648 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
475 return -ENOSPC; 649 return -ENOSPC;
476 650
477 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip); 651 /* Disallow replace to add a multicast entry */
652 if ((flags & NLM_F_REPLACE) &&
653 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
654 return -EOPNOTSUPP;
655
656 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
478 f = kmalloc(sizeof(*f), GFP_ATOMIC); 657 f = kmalloc(sizeof(*f), GFP_ATOMIC);
479 if (!f) 658 if (!f)
480 return -ENOMEM; 659 return -ENOMEM;
@@ -499,12 +678,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
499 return 0; 678 return 0;
500} 679}
501 680
502static void vxlan_fdb_free_rdst(struct rcu_head *head)
503{
504 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
505 kfree(rd);
506}
507
508static void vxlan_fdb_free(struct rcu_head *head) 681static void vxlan_fdb_free(struct rcu_head *head)
509{ 682{
510 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 683 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
@@ -528,17 +701,26 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
528} 701}
529 702
530static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 703static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
531 __be32 *ip, __be16 *port, u32 *vni, u32 *ifindex) 704 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
532{ 705{
533 struct net *net = dev_net(vxlan->dev); 706 struct net *net = dev_net(vxlan->dev);
707 int err;
534 708
535 if (tb[NDA_DST]) { 709 if (tb[NDA_DST]) {
536 if (nla_len(tb[NDA_DST]) != sizeof(__be32)) 710 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
537 return -EAFNOSUPPORT; 711 if (err)
538 712 return err;
539 *ip = nla_get_be32(tb[NDA_DST]);
540 } else { 713 } else {
541 *ip = htonl(INADDR_ANY); 714 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
715 if (remote->sa.sa_family == AF_INET) {
716 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
717 ip->sa.sa_family = AF_INET;
718#if IS_ENABLED(CONFIG_IPV6)
719 } else {
720 ip->sin6.sin6_addr = in6addr_any;
721 ip->sa.sa_family = AF_INET6;
722#endif
723 }
542 } 724 }
543 725
544 if (tb[NDA_PORT]) { 726 if (tb[NDA_PORT]) {
@@ -581,7 +763,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
581{ 763{
582 struct vxlan_dev *vxlan = netdev_priv(dev); 764 struct vxlan_dev *vxlan = netdev_priv(dev);
583 /* struct net *net = dev_net(vxlan->dev); */ 765 /* struct net *net = dev_net(vxlan->dev); */
584 __be32 ip; 766 union vxlan_addr ip;
585 __be16 port; 767 __be16 port;
586 u32 vni, ifindex; 768 u32 vni, ifindex;
587 int err; 769 int err;
@@ -600,7 +782,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
600 return err; 782 return err;
601 783
602 spin_lock_bh(&vxlan->hash_lock); 784 spin_lock_bh(&vxlan->hash_lock);
603 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, 785 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
604 port, vni, ifindex, ndm->ndm_flags); 786 port, vni, ifindex, ndm->ndm_flags);
605 spin_unlock_bh(&vxlan->hash_lock); 787 spin_unlock_bh(&vxlan->hash_lock);
606 788
@@ -615,7 +797,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
615 struct vxlan_dev *vxlan = netdev_priv(dev); 797 struct vxlan_dev *vxlan = netdev_priv(dev);
616 struct vxlan_fdb *f; 798 struct vxlan_fdb *f;
617 struct vxlan_rdst *rd = NULL; 799 struct vxlan_rdst *rd = NULL;
618 __be32 ip; 800 union vxlan_addr ip;
619 __be16 port; 801 __be16 port;
620 u32 vni, ifindex; 802 u32 vni, ifindex;
621 int err; 803 int err;
@@ -631,8 +813,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
631 if (!f) 813 if (!f)
632 goto out; 814 goto out;
633 815
634 if (ip != htonl(INADDR_ANY)) { 816 if (!vxlan_addr_any(&ip)) {
635 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 817 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
636 if (!rd) 818 if (!rd)
637 goto out; 819 goto out;
638 } 820 }
@@ -644,7 +826,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
644 */ 826 */
645 if (rd && !list_is_singular(&f->remotes)) { 827 if (rd && !list_is_singular(&f->remotes)) {
646 list_del_rcu(&rd->list); 828 list_del_rcu(&rd->list);
647 call_rcu(&rd->rcu, vxlan_fdb_free_rdst); 829 kfree_rcu(rd, rcu);
648 goto out; 830 goto out;
649 } 831 }
650 832
@@ -695,16 +877,16 @@ out:
695 * Return true if packet is bogus and should be droppped. 877 * Return true if packet is bogus and should be droppped.
696 */ 878 */
697static bool vxlan_snoop(struct net_device *dev, 879static bool vxlan_snoop(struct net_device *dev,
698 __be32 src_ip, const u8 *src_mac) 880 union vxlan_addr *src_ip, const u8 *src_mac)
699{ 881{
700 struct vxlan_dev *vxlan = netdev_priv(dev); 882 struct vxlan_dev *vxlan = netdev_priv(dev);
701 struct vxlan_fdb *f; 883 struct vxlan_fdb *f;
702 884
703 f = vxlan_find_mac(vxlan, src_mac); 885 f = vxlan_find_mac(vxlan, src_mac);
704 if (likely(f)) { 886 if (likely(f)) {
705 struct vxlan_rdst *rdst = first_remote(f); 887 struct vxlan_rdst *rdst = first_remote_rcu(f);
706 888
707 if (likely(rdst->remote_ip == src_ip)) 889 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
708 return false; 890 return false;
709 891
710 /* Don't migrate static entries, drop packets */ 892 /* Don't migrate static entries, drop packets */
@@ -713,10 +895,10 @@ static bool vxlan_snoop(struct net_device *dev,
713 895
714 if (net_ratelimit()) 896 if (net_ratelimit())
715 netdev_info(dev, 897 netdev_info(dev,
716 "%pM migrated from %pI4 to %pI4\n", 898 "%pM migrated from %pIS to %pIS\n",
717 src_mac, &rdst->remote_ip, &src_ip); 899 src_mac, &rdst->remote_ip, &src_ip);
718 900
719 rdst->remote_ip = src_ip; 901 rdst->remote_ip = *src_ip;
720 f->updated = jiffies; 902 f->updated = jiffies;
721 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 903 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
722 } else { 904 } else {
@@ -738,7 +920,7 @@ static bool vxlan_snoop(struct net_device *dev,
738} 920}
739 921
740/* See if multicast group is already in use by other ID */ 922/* See if multicast group is already in use by other ID */
741static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) 923static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
742{ 924{
743 struct vxlan_dev *vxlan; 925 struct vxlan_dev *vxlan;
744 926
@@ -746,7 +928,8 @@ static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
746 if (!netif_running(vxlan->dev)) 928 if (!netif_running(vxlan->dev))
747 continue; 929 continue;
748 930
749 if (vxlan->default_dst.remote_ip == remote_ip) 931 if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
932 remote_ip))
750 return true; 933 return true;
751 } 934 }
752 935
@@ -758,17 +941,25 @@ static void vxlan_sock_hold(struct vxlan_sock *vs)
758 atomic_inc(&vs->refcnt); 941 atomic_inc(&vs->refcnt);
759} 942}
760 943
761static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs) 944void vxlan_sock_release(struct vxlan_sock *vs)
762{ 945{
946 struct sock *sk = vs->sock->sk;
947 struct net *net = sock_net(sk);
948 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
949
763 if (!atomic_dec_and_test(&vs->refcnt)) 950 if (!atomic_dec_and_test(&vs->refcnt))
764 return; 951 return;
765 952
766 spin_lock(&vn->sock_lock); 953 spin_lock(&vn->sock_lock);
767 hlist_del_rcu(&vs->hlist); 954 hlist_del_rcu(&vs->hlist);
955 smp_wmb();
956 vs->sock->sk->sk_user_data = NULL;
957 vxlan_notify_del_rx_port(sk);
768 spin_unlock(&vn->sock_lock); 958 spin_unlock(&vn->sock_lock);
769 959
770 queue_work(vxlan_wq, &vs->del_work); 960 queue_work(vxlan_wq, &vs->del_work);
771} 961}
962EXPORT_SYMBOL_GPL(vxlan_sock_release);
772 963
773/* Callback to update multicast group membership when first VNI on 964/* Callback to update multicast group membership when first VNI on
774 * multicast asddress is brought up 965 * multicast asddress is brought up
@@ -777,19 +968,28 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
777static void vxlan_igmp_join(struct work_struct *work) 968static void vxlan_igmp_join(struct work_struct *work)
778{ 969{
779 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); 970 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
780 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
781 struct vxlan_sock *vs = vxlan->vn_sock; 971 struct vxlan_sock *vs = vxlan->vn_sock;
782 struct sock *sk = vs->sock->sk; 972 struct sock *sk = vs->sock->sk;
783 struct ip_mreqn mreq = { 973 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
784 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, 974 int ifindex = vxlan->default_dst.remote_ifindex;
785 .imr_ifindex = vxlan->default_dst.remote_ifindex,
786 };
787 975
788 lock_sock(sk); 976 lock_sock(sk);
789 ip_mc_join_group(sk, &mreq); 977 if (ip->sa.sa_family == AF_INET) {
978 struct ip_mreqn mreq = {
979 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
980 .imr_ifindex = ifindex,
981 };
982
983 ip_mc_join_group(sk, &mreq);
984#if IS_ENABLED(CONFIG_IPV6)
985 } else {
986 ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
987 &ip->sin6.sin6_addr);
988#endif
989 }
790 release_sock(sk); 990 release_sock(sk);
791 991
792 vxlan_sock_release(vn, vs); 992 vxlan_sock_release(vs);
793 dev_put(vxlan->dev); 993 dev_put(vxlan->dev);
794} 994}
795 995
@@ -797,42 +997,45 @@ static void vxlan_igmp_join(struct work_struct *work)
797static void vxlan_igmp_leave(struct work_struct *work) 997static void vxlan_igmp_leave(struct work_struct *work)
798{ 998{
799 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); 999 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
800 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
801 struct vxlan_sock *vs = vxlan->vn_sock; 1000 struct vxlan_sock *vs = vxlan->vn_sock;
802 struct sock *sk = vs->sock->sk; 1001 struct sock *sk = vs->sock->sk;
803 struct ip_mreqn mreq = { 1002 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
804 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, 1003 int ifindex = vxlan->default_dst.remote_ifindex;
805 .imr_ifindex = vxlan->default_dst.remote_ifindex,
806 };
807 1004
808 lock_sock(sk); 1005 lock_sock(sk);
809 ip_mc_leave_group(sk, &mreq); 1006 if (ip->sa.sa_family == AF_INET) {
1007 struct ip_mreqn mreq = {
1008 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1009 .imr_ifindex = ifindex,
1010 };
1011
1012 ip_mc_leave_group(sk, &mreq);
1013#if IS_ENABLED(CONFIG_IPV6)
1014 } else {
1015 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1016 &ip->sin6.sin6_addr);
1017#endif
1018 }
1019
810 release_sock(sk); 1020 release_sock(sk);
811 1021
812 vxlan_sock_release(vn, vs); 1022 vxlan_sock_release(vs);
813 dev_put(vxlan->dev); 1023 dev_put(vxlan->dev);
814} 1024}
815 1025
816/* Callback from net/ipv4/udp.c to receive packets */ 1026/* Callback from net/ipv4/udp.c to receive packets */
817static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 1027static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
818{ 1028{
819 struct iphdr *oip; 1029 struct vxlan_sock *vs;
820 struct vxlanhdr *vxh; 1030 struct vxlanhdr *vxh;
821 struct vxlan_dev *vxlan;
822 struct pcpu_tstats *stats;
823 __be16 port; 1031 __be16 port;
824 __u32 vni;
825 int err;
826
827 /* pop off outer UDP header */
828 __skb_pull(skb, sizeof(struct udphdr));
829 1032
830 /* Need Vxlan and inner Ethernet header to be present */ 1033 /* Need Vxlan and inner Ethernet header to be present */
831 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr))) 1034 if (!pskb_may_pull(skb, VXLAN_HLEN))
832 goto error; 1035 goto error;
833 1036
834 /* Drop packets with reserved bits set */ 1037 /* Return packets with reserved bits set */
835 vxh = (struct vxlanhdr *) skb->data; 1038 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
836 if (vxh->vx_flags != htonl(VXLAN_FLAGS) || 1039 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
837 (vxh->vx_vni & htonl(0xff))) { 1040 (vxh->vx_vni & htonl(0xff))) {
838 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1041 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
@@ -840,40 +1043,72 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
840 goto error; 1043 goto error;
841 } 1044 }
842 1045
843 __skb_pull(skb, sizeof(struct vxlanhdr)); 1046 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1047 goto drop;
844 1048
845 /* Is this VNI defined? */
846 vni = ntohl(vxh->vx_vni) >> 8;
847 port = inet_sk(sk)->inet_sport; 1049 port = inet_sk(sk)->inet_sport;
848 vxlan = vxlan_find_vni(sock_net(sk), vni, port); 1050
849 if (!vxlan) { 1051 smp_read_barrier_depends();
850 netdev_dbg(skb->dev, "unknown vni %d port %u\n", 1052 vs = (struct vxlan_sock *)sk->sk_user_data;
851 vni, ntohs(port)); 1053 if (!vs)
852 goto drop; 1054 goto drop;
853 }
854 1055
855 if (!pskb_may_pull(skb, ETH_HLEN)) { 1056 vs->rcv(vs, skb, vxh->vx_vni);
856 vxlan->dev->stats.rx_length_errors++; 1057 return 0;
857 vxlan->dev->stats.rx_errors++; 1058
1059drop:
1060 /* Consume bad packet */
1061 kfree_skb(skb);
1062 return 0;
1063
1064error:
1065 /* Return non vxlan pkt */
1066 return 1;
1067}
1068
1069static void vxlan_rcv(struct vxlan_sock *vs,
1070 struct sk_buff *skb, __be32 vx_vni)
1071{
1072 struct iphdr *oip = NULL;
1073 struct ipv6hdr *oip6 = NULL;
1074 struct vxlan_dev *vxlan;
1075 struct pcpu_tstats *stats;
1076 union vxlan_addr saddr;
1077 __u32 vni;
1078 int err = 0;
1079 union vxlan_addr *remote_ip;
1080
1081 vni = ntohl(vx_vni) >> 8;
1082 /* Is this VNI defined? */
1083 vxlan = vxlan_vs_find_vni(vs, vni);
1084 if (!vxlan)
858 goto drop; 1085 goto drop;
859 }
860 1086
1087 remote_ip = &vxlan->default_dst.remote_ip;
861 skb_reset_mac_header(skb); 1088 skb_reset_mac_header(skb);
862
863 /* Re-examine inner Ethernet packet */
864 oip = ip_hdr(skb);
865 skb->protocol = eth_type_trans(skb, vxlan->dev); 1089 skb->protocol = eth_type_trans(skb, vxlan->dev);
866 1090
867 /* Ignore packet loops (and multicast echo) */ 1091 /* Ignore packet loops (and multicast echo) */
868 if (compare_ether_addr(eth_hdr(skb)->h_source, 1092 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
869 vxlan->dev->dev_addr) == 0)
870 goto drop; 1093 goto drop;
871 1094
1095 /* Re-examine inner Ethernet packet */
1096 if (remote_ip->sa.sa_family == AF_INET) {
1097 oip = ip_hdr(skb);
1098 saddr.sin.sin_addr.s_addr = oip->saddr;
1099 saddr.sa.sa_family = AF_INET;
1100#if IS_ENABLED(CONFIG_IPV6)
1101 } else {
1102 oip6 = ipv6_hdr(skb);
1103 saddr.sin6.sin6_addr = oip6->saddr;
1104 saddr.sa.sa_family = AF_INET6;
1105#endif
1106 }
1107
872 if ((vxlan->flags & VXLAN_F_LEARN) && 1108 if ((vxlan->flags & VXLAN_F_LEARN) &&
873 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) 1109 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
874 goto drop; 1110 goto drop;
875 1111
876 __skb_tunnel_rx(skb, vxlan->dev);
877 skb_reset_network_header(skb); 1112 skb_reset_network_header(skb);
878 1113
879 /* If the NIC driver gave us an encapsulated packet with 1114 /* If the NIC driver gave us an encapsulated packet with
@@ -887,11 +1122,20 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
887 1122
888 skb->encapsulation = 0; 1123 skb->encapsulation = 0;
889 1124
890 err = IP_ECN_decapsulate(oip, skb); 1125 if (oip6)
1126 err = IP6_ECN_decapsulate(oip6, skb);
1127 if (oip)
1128 err = IP_ECN_decapsulate(oip, skb);
1129
891 if (unlikely(err)) { 1130 if (unlikely(err)) {
892 if (log_ecn_error) 1131 if (log_ecn_error) {
893 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1132 if (oip6)
894 &oip->saddr, oip->tos); 1133 net_info_ratelimited("non-ECT from %pI6\n",
1134 &oip6->saddr);
1135 if (oip)
1136 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1137 &oip->saddr, oip->tos);
1138 }
895 if (err > 1) { 1139 if (err > 1) {
896 ++vxlan->dev->stats.rx_frame_errors; 1140 ++vxlan->dev->stats.rx_frame_errors;
897 ++vxlan->dev->stats.rx_errors; 1141 ++vxlan->dev->stats.rx_errors;
@@ -907,16 +1151,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
907 1151
908 netif_rx(skb); 1152 netif_rx(skb);
909 1153
910 return 0; 1154 return;
911error:
912 /* Put UDP header back */
913 __skb_push(skb, sizeof(struct udphdr));
914
915 return 1;
916drop: 1155drop:
917 /* Consume bad packet */ 1156 /* Consume bad packet */
918 kfree_skb(skb); 1157 kfree_skb(skb);
919 return 0;
920} 1158}
921 1159
922static int arp_reduce(struct net_device *dev, struct sk_buff *skb) 1160static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
@@ -967,7 +1205,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
967 } 1205 }
968 1206
969 f = vxlan_find_mac(vxlan, n->ha); 1207 f = vxlan_find_mac(vxlan, n->ha);
970 if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) { 1208 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
971 /* bridge-local neighbor */ 1209 /* bridge-local neighbor */
972 neigh_release(n); 1210 neigh_release(n);
973 goto out; 1211 goto out;
@@ -985,18 +1223,87 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
985 1223
986 if (netif_rx_ni(reply) == NET_RX_DROP) 1224 if (netif_rx_ni(reply) == NET_RX_DROP)
987 dev->stats.rx_dropped++; 1225 dev->stats.rx_dropped++;
988 } else if (vxlan->flags & VXLAN_F_L3MISS) 1226 } else if (vxlan->flags & VXLAN_F_L3MISS) {
989 vxlan_ip_miss(dev, tip); 1227 union vxlan_addr ipa = {
1228 .sin.sin_addr.s_addr = tip,
1229 .sa.sa_family = AF_INET,
1230 };
1231
1232 vxlan_ip_miss(dev, &ipa);
1233 }
990out: 1234out:
991 consume_skb(skb); 1235 consume_skb(skb);
992 return NETDEV_TX_OK; 1236 return NETDEV_TX_OK;
993} 1237}
994 1238
1239#if IS_ENABLED(CONFIG_IPV6)
1240static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1241{
1242 struct vxlan_dev *vxlan = netdev_priv(dev);
1243 struct neighbour *n;
1244 union vxlan_addr ipa;
1245 const struct ipv6hdr *iphdr;
1246 const struct in6_addr *saddr, *daddr;
1247 struct nd_msg *msg;
1248 struct inet6_dev *in6_dev = NULL;
1249
1250 in6_dev = __in6_dev_get(dev);
1251 if (!in6_dev)
1252 goto out;
1253
1254 if (!pskb_may_pull(skb, skb->len))
1255 goto out;
1256
1257 iphdr = ipv6_hdr(skb);
1258 saddr = &iphdr->saddr;
1259 daddr = &iphdr->daddr;
1260
1261 if (ipv6_addr_loopback(daddr) ||
1262 ipv6_addr_is_multicast(daddr))
1263 goto out;
1264
1265 msg = (struct nd_msg *)skb_transport_header(skb);
1266 if (msg->icmph.icmp6_code != 0 ||
1267 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1268 goto out;
1269
1270 n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
1271
1272 if (n) {
1273 struct vxlan_fdb *f;
1274
1275 if (!(n->nud_state & NUD_CONNECTED)) {
1276 neigh_release(n);
1277 goto out;
1278 }
1279
1280 f = vxlan_find_mac(vxlan, n->ha);
1281 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1282 /* bridge-local neighbor */
1283 neigh_release(n);
1284 goto out;
1285 }
1286
1287 ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
1288 !!in6_dev->cnf.forwarding,
1289 true, false, false);
1290 neigh_release(n);
1291 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1292 ipa.sin6.sin6_addr = *daddr;
1293 ipa.sa.sa_family = AF_INET6;
1294 vxlan_ip_miss(dev, &ipa);
1295 }
1296
1297out:
1298 consume_skb(skb);
1299 return NETDEV_TX_OK;
1300}
1301#endif
1302
995static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 1303static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
996{ 1304{
997 struct vxlan_dev *vxlan = netdev_priv(dev); 1305 struct vxlan_dev *vxlan = netdev_priv(dev);
998 struct neighbour *n; 1306 struct neighbour *n;
999 struct iphdr *pip;
1000 1307
1001 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1308 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1002 return false; 1309 return false;
@@ -1004,11 +1311,47 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1004 n = NULL; 1311 n = NULL;
1005 switch (ntohs(eth_hdr(skb)->h_proto)) { 1312 switch (ntohs(eth_hdr(skb)->h_proto)) {
1006 case ETH_P_IP: 1313 case ETH_P_IP:
1314 {
1315 struct iphdr *pip;
1316
1007 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1317 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1008 return false; 1318 return false;
1009 pip = ip_hdr(skb); 1319 pip = ip_hdr(skb);
1010 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 1320 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1321 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1322 union vxlan_addr ipa = {
1323 .sin.sin_addr.s_addr = pip->daddr,
1324 .sa.sa_family = AF_INET,
1325 };
1326
1327 vxlan_ip_miss(dev, &ipa);
1328 return false;
1329 }
1330
1011 break; 1331 break;
1332 }
1333#if IS_ENABLED(CONFIG_IPV6)
1334 case ETH_P_IPV6:
1335 {
1336 struct ipv6hdr *pip6;
1337
1338 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1339 return false;
1340 pip6 = ipv6_hdr(skb);
1341 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1342 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1343 union vxlan_addr ipa = {
1344 .sin6.sin6_addr = pip6->daddr,
1345 .sa.sa_family = AF_INET6,
1346 };
1347
1348 vxlan_ip_miss(dev, &ipa);
1349 return false;
1350 }
1351
1352 break;
1353 }
1354#endif
1012 default: 1355 default:
1013 return false; 1356 return false;
1014 } 1357 }
@@ -1016,7 +1359,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1016 if (n) { 1359 if (n) {
1017 bool diff; 1360 bool diff;
1018 1361
1019 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0; 1362 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1020 if (diff) { 1363 if (diff) {
1021 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 1364 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1022 dev->addr_len); 1365 dev->addr_len);
@@ -1024,8 +1367,8 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1024 } 1367 }
1025 neigh_release(n); 1368 neigh_release(n);
1026 return diff; 1369 return diff;
1027 } else if (vxlan->flags & VXLAN_F_L3MISS) 1370 }
1028 vxlan_ip_miss(dev, pip->daddr); 1371
1029 return false; 1372 return false;
1030} 1373}
1031 1374
@@ -1035,11 +1378,8 @@ static void vxlan_sock_put(struct sk_buff *skb)
1035} 1378}
1036 1379
1037/* On transmit, associate with the tunnel socket */ 1380/* On transmit, associate with the tunnel socket */
1038static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) 1381static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
1039{ 1382{
1040 struct vxlan_dev *vxlan = netdev_priv(dev);
1041 struct sock *sk = vxlan->vn_sock->sock->sk;
1042
1043 skb_orphan(skb); 1383 skb_orphan(skb);
1044 sock_hold(sk); 1384 sock_hold(sk);
1045 skb->sk = sk; 1385 skb->sk = sk;
@@ -1051,9 +1391,9 @@ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
1051 * better and maybe available from hardware 1391 * better and maybe available from hardware
1052 * secondary choice is to use jhash on the Ethernet header 1392 * secondary choice is to use jhash on the Ethernet header
1053 */ 1393 */
1054static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) 1394__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
1055{ 1395{
1056 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1; 1396 unsigned int range = (port_max - port_min) + 1;
1057 u32 hash; 1397 u32 hash;
1058 1398
1059 hash = skb_get_rxhash(skb); 1399 hash = skb_get_rxhash(skb);
@@ -1061,8 +1401,9 @@ static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
1061 hash = jhash(skb->data, 2 * ETH_ALEN, 1401 hash = jhash(skb->data, 2 * ETH_ALEN,
1062 (__force u32) skb->protocol); 1402 (__force u32) skb->protocol);
1063 1403
1064 return htons((((u64) hash * range) >> 32) + vxlan->port_min); 1404 return htons((((u64) hash * range) >> 32) + port_min);
1065} 1405}
1406EXPORT_SYMBOL_GPL(vxlan_src_port);
1066 1407
1067static int handle_offloads(struct sk_buff *skb) 1408static int handle_offloads(struct sk_buff *skb)
1068{ 1409{
@@ -1078,21 +1419,187 @@ static int handle_offloads(struct sk_buff *skb)
1078 return 0; 1419 return 0;
1079} 1420}
1080 1421
1422#if IS_ENABLED(CONFIG_IPV6)
1423static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1424 struct dst_entry *dst, struct sk_buff *skb,
1425 struct net_device *dev, struct in6_addr *saddr,
1426 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1427 __be16 src_port, __be16 dst_port, __be32 vni)
1428{
1429 struct ipv6hdr *ip6h;
1430 struct vxlanhdr *vxh;
1431 struct udphdr *uh;
1432 int min_headroom;
1433 int err;
1434
1435 if (!skb->encapsulation) {
1436 skb_reset_inner_headers(skb);
1437 skb->encapsulation = 1;
1438 }
1439
1440 skb_scrub_packet(skb, false);
1441
1442 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1443 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1444 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1445
1446 /* Need space for new headers (invalidates iph ptr) */
1447 err = skb_cow_head(skb, min_headroom);
1448 if (unlikely(err))
1449 return err;
1450
1451 if (vlan_tx_tag_present(skb)) {
1452 if (WARN_ON(!__vlan_put_tag(skb,
1453 skb->vlan_proto,
1454 vlan_tx_tag_get(skb))))
1455 return -ENOMEM;
1456
1457 skb->vlan_tci = 0;
1458 }
1459
1460 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1461 vxh->vx_flags = htonl(VXLAN_FLAGS);
1462 vxh->vx_vni = vni;
1463
1464 __skb_push(skb, sizeof(*uh));
1465 skb_reset_transport_header(skb);
1466 uh = udp_hdr(skb);
1467
1468 uh->dest = dst_port;
1469 uh->source = src_port;
1470
1471 uh->len = htons(skb->len);
1472 uh->check = 0;
1473
1474 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1475 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1476 IPSKB_REROUTED);
1477 skb_dst_set(skb, dst);
1478
1479 if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
1480 __wsum csum = skb_checksum(skb, 0, skb->len, 0);
1481 skb->ip_summed = CHECKSUM_UNNECESSARY;
1482 uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
1483 IPPROTO_UDP, csum);
1484 if (uh->check == 0)
1485 uh->check = CSUM_MANGLED_0;
1486 } else {
1487 skb->ip_summed = CHECKSUM_PARTIAL;
1488 skb->csum_start = skb_transport_header(skb) - skb->head;
1489 skb->csum_offset = offsetof(struct udphdr, check);
1490 uh->check = ~csum_ipv6_magic(saddr, daddr,
1491 skb->len, IPPROTO_UDP, 0);
1492 }
1493
1494 __skb_push(skb, sizeof(*ip6h));
1495 skb_reset_network_header(skb);
1496 ip6h = ipv6_hdr(skb);
1497 ip6h->version = 6;
1498 ip6h->priority = prio;
1499 ip6h->flow_lbl[0] = 0;
1500 ip6h->flow_lbl[1] = 0;
1501 ip6h->flow_lbl[2] = 0;
1502 ip6h->payload_len = htons(skb->len);
1503 ip6h->nexthdr = IPPROTO_UDP;
1504 ip6h->hop_limit = ttl;
1505 ip6h->daddr = *daddr;
1506 ip6h->saddr = *saddr;
1507
1508 vxlan_set_owner(vs->sock->sk, skb);
1509
1510 err = handle_offloads(skb);
1511 if (err)
1512 return err;
1513
1514 ip6tunnel_xmit(skb, dev);
1515 return 0;
1516}
1517#endif
1518
1519int vxlan_xmit_skb(struct vxlan_sock *vs,
1520 struct rtable *rt, struct sk_buff *skb,
1521 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1522 __be16 src_port, __be16 dst_port, __be32 vni)
1523{
1524 struct vxlanhdr *vxh;
1525 struct udphdr *uh;
1526 int min_headroom;
1527 int err;
1528
1529 if (!skb->encapsulation) {
1530 skb_reset_inner_headers(skb);
1531 skb->encapsulation = 1;
1532 }
1533
1534 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1535 + VXLAN_HLEN + sizeof(struct iphdr)
1536 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1537
1538 /* Need space for new headers (invalidates iph ptr) */
1539 err = skb_cow_head(skb, min_headroom);
1540 if (unlikely(err))
1541 return err;
1542
1543 if (vlan_tx_tag_present(skb)) {
1544 if (WARN_ON(!__vlan_put_tag(skb,
1545 skb->vlan_proto,
1546 vlan_tx_tag_get(skb))))
1547 return -ENOMEM;
1548
1549 skb->vlan_tci = 0;
1550 }
1551
1552 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1553 vxh->vx_flags = htonl(VXLAN_FLAGS);
1554 vxh->vx_vni = vni;
1555
1556 __skb_push(skb, sizeof(*uh));
1557 skb_reset_transport_header(skb);
1558 uh = udp_hdr(skb);
1559
1560 uh->dest = dst_port;
1561 uh->source = src_port;
1562
1563 uh->len = htons(skb->len);
1564 uh->check = 0;
1565
1566 vxlan_set_owner(vs->sock->sk, skb);
1567
1568 err = handle_offloads(skb);
1569 if (err)
1570 return err;
1571
1572 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
1573 false);
1574}
1575EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1576
1081/* Bypass encapsulation if the destination is local */ 1577/* Bypass encapsulation if the destination is local */
1082static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1578static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1083 struct vxlan_dev *dst_vxlan) 1579 struct vxlan_dev *dst_vxlan)
1084{ 1580{
1085 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1581 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1086 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1582 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1583 union vxlan_addr loopback;
1584 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1087 1585
1088 skb->pkt_type = PACKET_HOST; 1586 skb->pkt_type = PACKET_HOST;
1089 skb->encapsulation = 0; 1587 skb->encapsulation = 0;
1090 skb->dev = dst_vxlan->dev; 1588 skb->dev = dst_vxlan->dev;
1091 __skb_pull(skb, skb_network_offset(skb)); 1589 __skb_pull(skb, skb_network_offset(skb));
1092 1590
1591 if (remote_ip->sa.sa_family == AF_INET) {
1592 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1593 loopback.sa.sa_family = AF_INET;
1594#if IS_ENABLED(CONFIG_IPV6)
1595 } else {
1596 loopback.sin6.sin6_addr = in6addr_loopback;
1597 loopback.sa.sa_family = AF_INET6;
1598#endif
1599 }
1600
1093 if (dst_vxlan->flags & VXLAN_F_LEARN) 1601 if (dst_vxlan->flags & VXLAN_F_LEARN)
1094 vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK), 1602 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1095 eth_hdr(skb)->h_source);
1096 1603
1097 u64_stats_update_begin(&tx_stats->syncp); 1604 u64_stats_update_begin(&tx_stats->syncp);
1098 tx_stats->tx_packets++; 1605 tx_stats->tx_packets++;
@@ -1113,13 +1620,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1113 struct vxlan_rdst *rdst, bool did_rsc) 1620 struct vxlan_rdst *rdst, bool did_rsc)
1114{ 1621{
1115 struct vxlan_dev *vxlan = netdev_priv(dev); 1622 struct vxlan_dev *vxlan = netdev_priv(dev);
1116 struct rtable *rt; 1623 struct rtable *rt = NULL;
1117 const struct iphdr *old_iph; 1624 const struct iphdr *old_iph;
1118 struct vxlanhdr *vxh;
1119 struct udphdr *uh;
1120 struct flowi4 fl4; 1625 struct flowi4 fl4;
1121 __be32 dst; 1626 union vxlan_addr *dst;
1122 __be16 src_port, dst_port; 1627 __be16 src_port = 0, dst_port;
1123 u32 vni; 1628 u32 vni;
1124 __be16 df = 0; 1629 __be16 df = 0;
1125 __u8 tos, ttl; 1630 __u8 tos, ttl;
@@ -1127,9 +1632,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1127 1632
1128 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; 1633 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1129 vni = rdst->remote_vni; 1634 vni = rdst->remote_vni;
1130 dst = rdst->remote_ip; 1635 dst = &rdst->remote_ip;
1131 1636
1132 if (!dst) { 1637 if (vxlan_addr_any(dst)) {
1133 if (did_rsc) { 1638 if (did_rsc) {
1134 /* short-circuited back to local bridge */ 1639 /* short-circuited back to local bridge */
1135 vxlan_encap_bypass(skb, vxlan, vxlan); 1640 vxlan_encap_bypass(skb, vxlan, vxlan);
@@ -1138,84 +1643,113 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1138 goto drop; 1643 goto drop;
1139 } 1644 }
1140 1645
1141 if (!skb->encapsulation) {
1142 skb_reset_inner_headers(skb);
1143 skb->encapsulation = 1;
1144 }
1145
1146 /* Need space for new headers (invalidates iph ptr) */
1147 if (skb_cow_head(skb, VXLAN_HEADROOM))
1148 goto drop;
1149
1150 old_iph = ip_hdr(skb); 1646 old_iph = ip_hdr(skb);
1151 1647
1152 ttl = vxlan->ttl; 1648 ttl = vxlan->ttl;
1153 if (!ttl && IN_MULTICAST(ntohl(dst))) 1649 if (!ttl && vxlan_addr_multicast(dst))
1154 ttl = 1; 1650 ttl = 1;
1155 1651
1156 tos = vxlan->tos; 1652 tos = vxlan->tos;
1157 if (tos == 1) 1653 if (tos == 1)
1158 tos = ip_tunnel_get_dsfield(old_iph, skb); 1654 tos = ip_tunnel_get_dsfield(old_iph, skb);
1159 1655
1160 src_port = vxlan_src_port(vxlan, skb); 1656 src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
1161 1657
1162 memset(&fl4, 0, sizeof(fl4)); 1658 if (dst->sa.sa_family == AF_INET) {
1163 fl4.flowi4_oif = rdst->remote_ifindex; 1659 memset(&fl4, 0, sizeof(fl4));
1164 fl4.flowi4_tos = RT_TOS(tos); 1660 fl4.flowi4_oif = rdst->remote_ifindex;
1165 fl4.daddr = dst; 1661 fl4.flowi4_tos = RT_TOS(tos);
1166 fl4.saddr = vxlan->saddr; 1662 fl4.daddr = dst->sin.sin_addr.s_addr;
1663 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1167 1664
1168 rt = ip_route_output_key(dev_net(dev), &fl4); 1665 rt = ip_route_output_key(dev_net(dev), &fl4);
1169 if (IS_ERR(rt)) { 1666 if (IS_ERR(rt)) {
1170 netdev_dbg(dev, "no route to %pI4\n", &dst); 1667 netdev_dbg(dev, "no route to %pI4\n",
1171 dev->stats.tx_carrier_errors++; 1668 &dst->sin.sin_addr.s_addr);
1172 goto tx_error; 1669 dev->stats.tx_carrier_errors++;
1173 } 1670 goto tx_error;
1671 }
1174 1672
1175 if (rt->dst.dev == dev) { 1673 if (rt->dst.dev == dev) {
1176 netdev_dbg(dev, "circular route to %pI4\n", &dst); 1674 netdev_dbg(dev, "circular route to %pI4\n",
1177 ip_rt_put(rt); 1675 &dst->sin.sin_addr.s_addr);
1178 dev->stats.collisions++; 1676 dev->stats.collisions++;
1179 goto tx_error; 1677 goto tx_error;
1180 } 1678 }
1181 1679
1182 /* Bypass encapsulation if the destination is local */ 1680 /* Bypass encapsulation if the destination is local */
1183 if (rt->rt_flags & RTCF_LOCAL && 1681 if (rt->rt_flags & RTCF_LOCAL &&
1184 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 1682 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1185 struct vxlan_dev *dst_vxlan; 1683 struct vxlan_dev *dst_vxlan;
1186 1684
1187 ip_rt_put(rt); 1685 ip_rt_put(rt);
1188 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1686 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1189 if (!dst_vxlan) 1687 if (!dst_vxlan)
1190 goto tx_error; 1688 goto tx_error;
1191 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1689 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1192 return; 1690 return;
1193 } 1691 }
1194 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1195 vxh->vx_flags = htonl(VXLAN_FLAGS);
1196 vxh->vx_vni = htonl(vni << 8);
1197 1692
1198 __skb_push(skb, sizeof(*uh)); 1693 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1199 skb_reset_transport_header(skb); 1694 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1200 uh = udp_hdr(skb);
1201 1695
1202 uh->dest = dst_port; 1696 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
1203 uh->source = src_port; 1697 fl4.saddr, dst->sin.sin_addr.s_addr,
1698 tos, ttl, df, src_port, dst_port,
1699 htonl(vni << 8));
1204 1700
1205 uh->len = htons(skb->len); 1701 if (err < 0)
1206 uh->check = 0; 1702 goto rt_tx_error;
1703 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1704#if IS_ENABLED(CONFIG_IPV6)
1705 } else {
1706 struct sock *sk = vxlan->vn_sock->sock->sk;
1707 struct dst_entry *ndst;
1708 struct flowi6 fl6;
1709 u32 flags;
1710
1711 memset(&fl6, 0, sizeof(fl6));
1712 fl6.flowi6_oif = rdst->remote_ifindex;
1713 fl6.daddr = dst->sin6.sin6_addr;
1714 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
1715 fl6.flowi6_proto = IPPROTO_UDP;
1716
1717 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
1718 netdev_dbg(dev, "no route to %pI6\n",
1719 &dst->sin6.sin6_addr);
1720 dev->stats.tx_carrier_errors++;
1721 goto tx_error;
1722 }
1207 1723
1208 vxlan_set_owner(dev, skb); 1724 if (ndst->dev == dev) {
1725 netdev_dbg(dev, "circular route to %pI6\n",
1726 &dst->sin6.sin6_addr);
1727 dst_release(ndst);
1728 dev->stats.collisions++;
1729 goto tx_error;
1730 }
1209 1731
1210 if (handle_offloads(skb)) 1732 /* Bypass encapsulation if the destination is local */
1211 goto drop; 1733 flags = ((struct rt6_info *)ndst)->rt6i_flags;
1734 if (flags & RTF_LOCAL &&
1735 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1736 struct vxlan_dev *dst_vxlan;
1737
1738 dst_release(ndst);
1739 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1740 if (!dst_vxlan)
1741 goto tx_error;
1742 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1743 return;
1744 }
1212 1745
1213 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 1746 ttl = ttl ? : ip6_dst_hoplimit(ndst);
1214 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1215 1747
1216 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst, 1748 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
1217 IPPROTO_UDP, tos, ttl, df); 1749 dev, &fl6.saddr, &fl6.daddr, 0, ttl,
1218 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 1750 src_port, dst_port, htonl(vni << 8));
1751#endif
1752 }
1219 1753
1220 return; 1754 return;
1221 1755
@@ -1223,6 +1757,8 @@ drop:
1223 dev->stats.tx_dropped++; 1757 dev->stats.tx_dropped++;
1224 goto tx_free; 1758 goto tx_free;
1225 1759
1760rt_tx_error:
1761 ip_rt_put(rt);
1226tx_error: 1762tx_error:
1227 dev->stats.tx_errors++; 1763 dev->stats.tx_errors++;
1228tx_free: 1764tx_free:
@@ -1246,14 +1782,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1246 skb_reset_mac_header(skb); 1782 skb_reset_mac_header(skb);
1247 eth = eth_hdr(skb); 1783 eth = eth_hdr(skb);
1248 1784
1249 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP) 1785 if ((vxlan->flags & VXLAN_F_PROXY)) {
1250 return arp_reduce(dev, skb); 1786 if (ntohs(eth->h_proto) == ETH_P_ARP)
1787 return arp_reduce(dev, skb);
1788#if IS_ENABLED(CONFIG_IPV6)
1789 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
1790 skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
1791 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
1792 struct nd_msg *msg;
1793
1794 msg = (struct nd_msg *)skb_transport_header(skb);
1795 if (msg->icmph.icmp6_code == 0 &&
1796 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
1797 return neigh_reduce(dev, skb);
1798 }
1799#endif
1800 }
1251 1801
1252 f = vxlan_find_mac(vxlan, eth->h_dest); 1802 f = vxlan_find_mac(vxlan, eth->h_dest);
1253 did_rsc = false; 1803 did_rsc = false;
1254 1804
1255 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && 1805 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1256 ntohs(eth->h_proto) == ETH_P_IP) { 1806 (ntohs(eth->h_proto) == ETH_P_IP ||
1807 ntohs(eth->h_proto) == ETH_P_IPV6)) {
1257 did_rsc = route_shortcircuit(dev, skb); 1808 did_rsc = route_shortcircuit(dev, skb);
1258 if (did_rsc) 1809 if (did_rsc)
1259 f = vxlan_find_mac(vxlan, eth->h_dest); 1810 f = vxlan_find_mac(vxlan, eth->h_dest);
@@ -1321,25 +1872,31 @@ static void vxlan_cleanup(unsigned long arg)
1321 mod_timer(&vxlan->age_timer, next_timer); 1872 mod_timer(&vxlan->age_timer, next_timer);
1322} 1873}
1323 1874
1875static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
1876{
1877 __u32 vni = vxlan->default_dst.remote_vni;
1878
1879 vxlan->vn_sock = vs;
1880 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1881}
1882
1324/* Setup stats when device is created */ 1883/* Setup stats when device is created */
1325static int vxlan_init(struct net_device *dev) 1884static int vxlan_init(struct net_device *dev)
1326{ 1885{
1327 struct vxlan_dev *vxlan = netdev_priv(dev); 1886 struct vxlan_dev *vxlan = netdev_priv(dev);
1328 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1887 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1329 struct vxlan_sock *vs; 1888 struct vxlan_sock *vs;
1330 __u32 vni = vxlan->default_dst.remote_vni;
1331 1889
1332 dev->tstats = alloc_percpu(struct pcpu_tstats); 1890 dev->tstats = alloc_percpu(struct pcpu_tstats);
1333 if (!dev->tstats) 1891 if (!dev->tstats)
1334 return -ENOMEM; 1892 return -ENOMEM;
1335 1893
1336 spin_lock(&vn->sock_lock); 1894 spin_lock(&vn->sock_lock);
1337 vs = vxlan_find_port(dev_net(dev), vxlan->dst_port); 1895 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
1338 if (vs) { 1896 if (vs) {
1339 /* If we have a socket with same port already, reuse it */ 1897 /* If we have a socket with same port already, reuse it */
1340 atomic_inc(&vs->refcnt); 1898 atomic_inc(&vs->refcnt);
1341 vxlan->vn_sock = vs; 1899 vxlan_vs_add_dev(vs, vxlan);
1342 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1343 } else { 1900 } else {
1344 /* otherwise make new socket outside of RTNL */ 1901 /* otherwise make new socket outside of RTNL */
1345 dev_hold(dev); 1902 dev_hold(dev);
@@ -1364,13 +1921,12 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
1364static void vxlan_uninit(struct net_device *dev) 1921static void vxlan_uninit(struct net_device *dev)
1365{ 1922{
1366 struct vxlan_dev *vxlan = netdev_priv(dev); 1923 struct vxlan_dev *vxlan = netdev_priv(dev);
1367 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1368 struct vxlan_sock *vs = vxlan->vn_sock; 1924 struct vxlan_sock *vs = vxlan->vn_sock;
1369 1925
1370 vxlan_fdb_delete_default(vxlan); 1926 vxlan_fdb_delete_default(vxlan);
1371 1927
1372 if (vs) 1928 if (vs)
1373 vxlan_sock_release(vn, vs); 1929 vxlan_sock_release(vs);
1374 free_percpu(dev->tstats); 1930 free_percpu(dev->tstats);
1375} 1931}
1376 1932
@@ -1385,8 +1941,8 @@ static int vxlan_open(struct net_device *dev)
1385 if (!vs) 1941 if (!vs)
1386 return -ENOTCONN; 1942 return -ENOTCONN;
1387 1943
1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && 1944 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
1389 vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { 1945 vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
1390 vxlan_sock_hold(vs); 1946 vxlan_sock_hold(vs);
1391 dev_hold(dev); 1947 dev_hold(dev);
1392 queue_work(vxlan_wq, &vxlan->igmp_join); 1948 queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1424,8 +1980,8 @@ static int vxlan_stop(struct net_device *dev)
1424 struct vxlan_dev *vxlan = netdev_priv(dev); 1980 struct vxlan_dev *vxlan = netdev_priv(dev);
1425 struct vxlan_sock *vs = vxlan->vn_sock; 1981 struct vxlan_sock *vs = vxlan->vn_sock;
1426 1982
1427 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && 1983 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
1428 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { 1984 ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
1429 vxlan_sock_hold(vs); 1985 vxlan_sock_hold(vs);
1430 dev_hold(dev); 1986 dev_hold(dev);
1431 queue_work(vxlan_wq, &vxlan->igmp_leave); 1987 queue_work(vxlan_wq, &vxlan->igmp_leave);
@@ -1464,6 +2020,34 @@ static struct device_type vxlan_type = {
1464 .name = "vxlan", 2020 .name = "vxlan",
1465}; 2021};
1466 2022
2023/* Calls the ndo_add_vxlan_port of the caller in order to
2024 * supply the listening VXLAN udp ports.
2025 */
2026void vxlan_get_rx_port(struct net_device *dev)
2027{
2028 struct vxlan_sock *vs;
2029 struct net *net = dev_net(dev);
2030 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2031 sa_family_t sa_family;
2032 u16 port;
2033 int i;
2034
2035 if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
2036 return;
2037
2038 spin_lock(&vn->sock_lock);
2039 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2040 hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
2041 port = htons(inet_sk(vs->sock->sk)->inet_sport);
2042 sa_family = vs->sock->sk->sk_family;
2043 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2044 port);
2045 }
2046 }
2047 spin_unlock(&vn->sock_lock);
2048}
2049EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2050
1467/* Initialize the device structure. */ 2051/* Initialize the device structure. */
1468static void vxlan_setup(struct net_device *dev) 2052static void vxlan_setup(struct net_device *dev)
1469{ 2053{
@@ -1473,7 +2057,10 @@ static void vxlan_setup(struct net_device *dev)
1473 2057
1474 eth_hw_addr_random(dev); 2058 eth_hw_addr_random(dev);
1475 ether_setup(dev); 2059 ether_setup(dev);
1476 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; 2060 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2061 dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
2062 else
2063 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
1477 2064
1478 dev->netdev_ops = &vxlan_netdev_ops; 2065 dev->netdev_ops = &vxlan_netdev_ops;
1479 dev->destructor = free_netdev; 2066 dev->destructor = free_netdev;
@@ -1486,8 +2073,11 @@ static void vxlan_setup(struct net_device *dev)
1486 dev->features |= NETIF_F_RXCSUM; 2073 dev->features |= NETIF_F_RXCSUM;
1487 dev->features |= NETIF_F_GSO_SOFTWARE; 2074 dev->features |= NETIF_F_GSO_SOFTWARE;
1488 2075
2076 dev->vlan_features = dev->features;
2077 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
1489 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 2078 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1490 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2079 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2080 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
1491 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 2081 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1492 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2082 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1493 2083
@@ -1515,8 +2105,10 @@ static void vxlan_setup(struct net_device *dev)
1515static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2105static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1516 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2106 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1517 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2107 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2108 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
1518 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 2109 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1519 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 2110 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2111 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
1520 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 2112 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1521 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 2113 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1522 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 2114 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
@@ -1587,99 +2179,199 @@ static void vxlan_del_work(struct work_struct *work)
1587 kfree_rcu(vs, rcu); 2179 kfree_rcu(vs, rcu);
1588} 2180}
1589 2181
1590static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) 2182#if IS_ENABLED(CONFIG_IPV6)
2183/* Create UDP socket for encapsulation receive. AF_INET6 socket
2184 * could be used for both IPv4 and IPv6 communications, but
2185 * users may set bindv6only=1.
2186 */
2187static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
2188{
2189 struct sock *sk;
2190 struct socket *sock;
2191 struct sockaddr_in6 vxlan_addr = {
2192 .sin6_family = AF_INET6,
2193 .sin6_port = port,
2194 };
2195 int rc, val = 1;
2196
2197 rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
2198 if (rc < 0) {
2199 pr_debug("UDPv6 socket create failed\n");
2200 return rc;
2201 }
2202
2203 /* Put in proper namespace */
2204 sk = sock->sk;
2205 sk_change_net(sk, net);
2206
2207 kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
2208 (char *)&val, sizeof(val));
2209 rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
2210 sizeof(struct sockaddr_in6));
2211 if (rc < 0) {
2212 pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
2213 &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
2214 sk_release_kernel(sk);
2215 return rc;
2216 }
2217 /* At this point, IPv6 module should have been loaded in
2218 * sock_create_kern().
2219 */
2220 BUG_ON(!ipv6_stub);
2221
2222 *psock = sock;
2223 /* Disable multicast loopback */
2224 inet_sk(sk)->mc_loop = 0;
2225 return 0;
2226}
2227
2228#else
2229
2230static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
2231{
2232 return -EPFNOSUPPORT;
2233}
2234#endif
2235
2236static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
1591{ 2237{
1592 struct vxlan_sock *vs;
1593 struct sock *sk; 2238 struct sock *sk;
2239 struct socket *sock;
1594 struct sockaddr_in vxlan_addr = { 2240 struct sockaddr_in vxlan_addr = {
1595 .sin_family = AF_INET, 2241 .sin_family = AF_INET,
1596 .sin_addr.s_addr = htonl(INADDR_ANY), 2242 .sin_addr.s_addr = htonl(INADDR_ANY),
1597 .sin_port = port, 2243 .sin_port = port,
1598 }; 2244 };
1599 int rc; 2245 int rc;
1600 unsigned int h;
1601
1602 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
1603 if (!vs)
1604 return ERR_PTR(-ENOMEM);
1605
1606 for (h = 0; h < VNI_HASH_SIZE; ++h)
1607 INIT_HLIST_HEAD(&vs->vni_list[h]);
1608
1609 INIT_WORK(&vs->del_work, vxlan_del_work);
1610 2246
1611 /* Create UDP socket for encapsulation receive. */ 2247 /* Create UDP socket for encapsulation receive. */
1612 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock); 2248 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1613 if (rc < 0) { 2249 if (rc < 0) {
1614 pr_debug("UDP socket create failed\n"); 2250 pr_debug("UDP socket create failed\n");
1615 kfree(vs); 2251 return rc;
1616 return ERR_PTR(rc);
1617 } 2252 }
1618 2253
1619 /* Put in proper namespace */ 2254 /* Put in proper namespace */
1620 sk = vs->sock->sk; 2255 sk = sock->sk;
1621 sk_change_net(sk, net); 2256 sk_change_net(sk, net);
1622 2257
1623 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr, 2258 rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
1624 sizeof(vxlan_addr)); 2259 sizeof(vxlan_addr));
1625 if (rc < 0) { 2260 if (rc < 0) {
1626 pr_debug("bind for UDP socket %pI4:%u (%d)\n", 2261 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1627 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc); 2262 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1628 sk_release_kernel(sk); 2263 sk_release_kernel(sk);
1629 kfree(vs); 2264 return rc;
1630 return ERR_PTR(rc);
1631 } 2265 }
1632 2266
2267 *psock = sock;
1633 /* Disable multicast loopback */ 2268 /* Disable multicast loopback */
1634 inet_sk(sk)->mc_loop = 0; 2269 inet_sk(sk)->mc_loop = 0;
2270 return 0;
2271}
2272
2273/* Create new listen socket if needed */
2274static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2275 vxlan_rcv_t *rcv, void *data, bool ipv6)
2276{
2277 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2278 struct vxlan_sock *vs;
2279 struct socket *sock;
2280 struct sock *sk;
2281 int rc = 0;
2282 unsigned int h;
2283
2284 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
2285 if (!vs)
2286 return ERR_PTR(-ENOMEM);
2287
2288 for (h = 0; h < VNI_HASH_SIZE; ++h)
2289 INIT_HLIST_HEAD(&vs->vni_list[h]);
2290
2291 INIT_WORK(&vs->del_work, vxlan_del_work);
2292
2293 if (ipv6)
2294 rc = create_v6_sock(net, port, &sock);
2295 else
2296 rc = create_v4_sock(net, port, &sock);
2297 if (rc < 0) {
2298 kfree(vs);
2299 return ERR_PTR(rc);
2300 }
2301
2302 vs->sock = sock;
2303 sk = sock->sk;
2304 atomic_set(&vs->refcnt, 1);
2305 vs->rcv = rcv;
2306 vs->data = data;
2307 smp_wmb();
2308 vs->sock->sk->sk_user_data = vs;
2309
2310 spin_lock(&vn->sock_lock);
2311 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2312 vxlan_notify_add_rx_port(sk);
2313 spin_unlock(&vn->sock_lock);
1635 2314
1636 /* Mark socket as an encapsulation socket. */ 2315 /* Mark socket as an encapsulation socket. */
1637 udp_sk(sk)->encap_type = 1; 2316 udp_sk(sk)->encap_type = 1;
1638 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; 2317 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1639 udp_encap_enable(); 2318#if IS_ENABLED(CONFIG_IPV6)
1640 atomic_set(&vs->refcnt, 1); 2319 if (ipv6)
2320 ipv6_stub->udpv6_encap_enable();
2321 else
2322#endif
2323 udp_encap_enable();
1641 2324
1642 return vs; 2325 return vs;
1643} 2326}
1644 2327
1645/* Scheduled at device creation to bind to a socket */ 2328struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
1646static void vxlan_sock_work(struct work_struct *work) 2329 vxlan_rcv_t *rcv, void *data,
2330 bool no_share, bool ipv6)
1647{ 2331{
1648 struct vxlan_dev *vxlan
1649 = container_of(work, struct vxlan_dev, sock_work);
1650 struct net_device *dev = vxlan->dev;
1651 struct net *net = dev_net(dev);
1652 __u32 vni = vxlan->default_dst.remote_vni;
1653 __be16 port = vxlan->dst_port;
1654 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2332 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1655 struct vxlan_sock *nvs, *ovs; 2333 struct vxlan_sock *vs;
1656 2334
1657 nvs = vxlan_socket_create(net, port); 2335 vs = vxlan_socket_create(net, port, rcv, data, ipv6);
1658 if (IS_ERR(nvs)) { 2336 if (!IS_ERR(vs))
1659 netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n", 2337 return vs;
1660 PTR_ERR(nvs)); 2338
1661 goto out; 2339 if (no_share) /* Return error if sharing is not allowed. */
1662 } 2340 return vs;
1663 2341
1664 spin_lock(&vn->sock_lock); 2342 spin_lock(&vn->sock_lock);
1665 /* Look again to see if can reuse socket */ 2343 vs = vxlan_find_sock(net, port);
1666 ovs = vxlan_find_port(net, port); 2344 if (vs) {
1667 if (ovs) { 2345 if (vs->rcv == rcv)
1668 atomic_inc(&ovs->refcnt); 2346 atomic_inc(&vs->refcnt);
1669 vxlan->vn_sock = ovs; 2347 else
1670 hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni)); 2348 vs = ERR_PTR(-EBUSY);
1671 spin_unlock(&vn->sock_lock);
1672
1673 sk_release_kernel(nvs->sock->sk);
1674 kfree(nvs);
1675 } else {
1676 vxlan->vn_sock = nvs;
1677 hlist_add_head_rcu(&nvs->hlist, vs_head(net, port));
1678 hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni));
1679 spin_unlock(&vn->sock_lock);
1680 } 2349 }
1681out: 2350 spin_unlock(&vn->sock_lock);
1682 dev_put(dev); 2351
2352 if (!vs)
2353 vs = ERR_PTR(-EINVAL);
2354
2355 return vs;
2356}
2357EXPORT_SYMBOL_GPL(vxlan_sock_add);
2358
2359/* Scheduled at device creation to bind to a socket */
2360static void vxlan_sock_work(struct work_struct *work)
2361{
2362 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2363 struct net *net = dev_net(vxlan->dev);
2364 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2365 __be16 port = vxlan->dst_port;
2366 struct vxlan_sock *nvs;
2367
2368 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6);
2369 spin_lock(&vn->sock_lock);
2370 if (!IS_ERR(nvs))
2371 vxlan_vs_add_dev(nvs, vxlan);
2372 spin_unlock(&vn->sock_lock);
2373
2374 dev_put(vxlan->dev);
1683} 2375}
1684 2376
1685static int vxlan_newlink(struct net *net, struct net_device *dev, 2377static int vxlan_newlink(struct net *net, struct net_device *dev,
@@ -1690,6 +2382,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1690 struct vxlan_rdst *dst = &vxlan->default_dst; 2382 struct vxlan_rdst *dst = &vxlan->default_dst;
1691 __u32 vni; 2383 __u32 vni;
1692 int err; 2384 int err;
2385 bool use_ipv6 = false;
1693 2386
1694 if (!data[IFLA_VXLAN_ID]) 2387 if (!data[IFLA_VXLAN_ID])
1695 return -EINVAL; 2388 return -EINVAL;
@@ -1697,11 +2390,32 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1697 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2390 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1698 dst->remote_vni = vni; 2391 dst->remote_vni = vni;
1699 2392
1700 if (data[IFLA_VXLAN_GROUP]) 2393 if (data[IFLA_VXLAN_GROUP]) {
1701 dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]); 2394 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
2395 dst->remote_ip.sa.sa_family = AF_INET;
2396 } else if (data[IFLA_VXLAN_GROUP6]) {
2397 if (!IS_ENABLED(CONFIG_IPV6))
2398 return -EPFNOSUPPORT;
2399
2400 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
2401 sizeof(struct in6_addr));
2402 dst->remote_ip.sa.sa_family = AF_INET6;
2403 use_ipv6 = true;
2404 }
1702 2405
1703 if (data[IFLA_VXLAN_LOCAL]) 2406 if (data[IFLA_VXLAN_LOCAL]) {
1704 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); 2407 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
2408 vxlan->saddr.sa.sa_family = AF_INET;
2409 } else if (data[IFLA_VXLAN_LOCAL6]) {
2410 if (!IS_ENABLED(CONFIG_IPV6))
2411 return -EPFNOSUPPORT;
2412
2413 /* TODO: respect scope id */
2414 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
2415 sizeof(struct in6_addr));
2416 vxlan->saddr.sa.sa_family = AF_INET6;
2417 use_ipv6 = true;
2418 }
1705 2419
1706 if (data[IFLA_VXLAN_LINK] && 2420 if (data[IFLA_VXLAN_LINK] &&
1707 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 2421 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
@@ -1713,12 +2427,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1713 return -ENODEV; 2427 return -ENODEV;
1714 } 2428 }
1715 2429
2430#if IS_ENABLED(CONFIG_IPV6)
2431 if (use_ipv6) {
2432 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2433 if (idev && idev->cnf.disable_ipv6) {
2434 pr_info("IPv6 is disabled via sysctl\n");
2435 return -EPERM;
2436 }
2437 vxlan->flags |= VXLAN_F_IPV6;
2438 }
2439#endif
2440
1716 if (!tb[IFLA_MTU]) 2441 if (!tb[IFLA_MTU])
1717 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; 2442 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
1718 2443
1719 /* update header length based on lower device */ 2444 /* update header length based on lower device */
1720 dev->hard_header_len = lowerdev->hard_header_len + 2445 dev->hard_header_len = lowerdev->hard_header_len +
1721 VXLAN_HEADROOM; 2446 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
1722 } 2447 }
1723 2448
1724 if (data[IFLA_VXLAN_TOS]) 2449 if (data[IFLA_VXLAN_TOS])
@@ -1769,7 +2494,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1769 2494
1770 /* create an fdb entry for default destination */ 2495 /* create an fdb entry for default destination */
1771 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2496 err = vxlan_fdb_create(vxlan, all_zeros_mac,
1772 vxlan->default_dst.remote_ip, 2497 &vxlan->default_dst.remote_ip,
1773 NUD_REACHABLE|NUD_PERMANENT, 2498 NUD_REACHABLE|NUD_PERMANENT,
1774 NLM_F_EXCL|NLM_F_CREATE, 2499 NLM_F_EXCL|NLM_F_CREATE,
1775 vxlan->dst_port, vxlan->default_dst.remote_vni, 2500 vxlan->dst_port, vxlan->default_dst.remote_vni,
@@ -1794,7 +2519,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1794 struct vxlan_dev *vxlan = netdev_priv(dev); 2519 struct vxlan_dev *vxlan = netdev_priv(dev);
1795 2520
1796 spin_lock(&vn->sock_lock); 2521 spin_lock(&vn->sock_lock);
1797 hlist_del_rcu(&vxlan->hlist); 2522 if (!hlist_unhashed(&vxlan->hlist))
2523 hlist_del_rcu(&vxlan->hlist);
1798 spin_unlock(&vn->sock_lock); 2524 spin_unlock(&vn->sock_lock);
1799 2525
1800 list_del(&vxlan->next); 2526 list_del(&vxlan->next);
@@ -1805,9 +2531,9 @@ static size_t vxlan_get_size(const struct net_device *dev)
1805{ 2531{
1806 2532
1807 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 2533 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1808 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */ 2534 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
1809 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 2535 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1810 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */ 2536 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
1811 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 2537 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1812 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 2538 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1813 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 2539 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -1834,14 +2560,36 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1834 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) 2560 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
1835 goto nla_put_failure; 2561 goto nla_put_failure;
1836 2562
1837 if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip)) 2563 if (!vxlan_addr_any(&dst->remote_ip)) {
1838 goto nla_put_failure; 2564 if (dst->remote_ip.sa.sa_family == AF_INET) {
2565 if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
2566 dst->remote_ip.sin.sin_addr.s_addr))
2567 goto nla_put_failure;
2568#if IS_ENABLED(CONFIG_IPV6)
2569 } else {
2570 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
2571 &dst->remote_ip.sin6.sin6_addr))
2572 goto nla_put_failure;
2573#endif
2574 }
2575 }
1839 2576
1840 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 2577 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
1841 goto nla_put_failure; 2578 goto nla_put_failure;
1842 2579
1843 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) 2580 if (!vxlan_addr_any(&vxlan->saddr)) {
1844 goto nla_put_failure; 2581 if (vxlan->saddr.sa.sa_family == AF_INET) {
2582 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
2583 vxlan->saddr.sin.sin_addr.s_addr))
2584 goto nla_put_failure;
2585#if IS_ENABLED(CONFIG_IPV6)
2586 } else {
2587 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
2588 &vxlan->saddr.sin6.sin6_addr))
2589 goto nla_put_failure;
2590#endif
2591 }
2592 }
1845 2593
1846 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || 2594 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1847 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || 2595 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index d43f4efd3e07..5bbcb5e3ee0c 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -176,7 +176,7 @@ static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
176 176
177#ifndef MODULE 177#ifndef MODULE
178typedef u32 iarr[]; 178typedef u32 iarr[];
179static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac }; 179static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
180#endif 180#endif
181 181
182/* A zero-terminated list of I/O addresses to be probed on ISA bus */ 182/* A zero-terminated list of I/O addresses to be probed on ISA bus */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index d0adbaf86186..7fe19648f10e 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2693,7 +2693,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2693 dev->base_addr = ethdev->base_addr; 2693 dev->base_addr = ethdev->base_addr;
2694 dev->wireless_data = ethdev->wireless_data; 2694 dev->wireless_data = ethdev->wireless_data;
2695 SET_NETDEV_DEV(dev, ethdev->dev.parent); 2695 SET_NETDEV_DEV(dev, ethdev->dev.parent);
2696 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); 2696 eth_hw_addr_inherit(dev, ethdev);
2697 err = register_netdev(dev); 2697 err = register_netdev(dev);
2698 if (err<0) { 2698 if (err<0) {
2699 free_netdev(dev); 2699 free_netdev(dev);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index daeafeff186b..e0ba7cd14252 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -159,7 +159,7 @@ struct ath_common {
159 159
160 bool btcoex_enabled; 160 bool btcoex_enabled;
161 bool disable_ani; 161 bool disable_ani;
162 bool antenna_diversity; 162 bool bt_ant_diversity;
163}; 163};
164 164
165struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 165struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 1a2ef51b69d9..744da6d1c405 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -20,6 +20,12 @@
20#include "debug.h" 20#include "debug.h"
21#include "htc.h" 21#include "htc.h"
22 22
23void ath10k_bmi_start(struct ath10k *ar)
24{
25 ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
26 ar->bmi.done_sent = false;
27}
28
23int ath10k_bmi_done(struct ath10k *ar) 29int ath10k_bmi_done(struct ath10k *ar)
24{ 30{
25 struct bmi_cmd cmd; 31 struct bmi_cmd cmd;
@@ -105,7 +111,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
105 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, 111 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
106 &resp, &rxlen); 112 &resp, &rxlen);
107 if (ret) { 113 if (ret) {
108 ath10k_warn("unable to read from the device\n"); 114 ath10k_warn("unable to read from the device (%d)\n",
115 ret);
109 return ret; 116 return ret;
110 } 117 }
111 118
@@ -149,7 +156,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
149 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, 156 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
150 NULL, NULL); 157 NULL, NULL);
151 if (ret) { 158 if (ret) {
152 ath10k_warn("unable to write to the device\n"); 159 ath10k_warn("unable to write to the device (%d)\n",
160 ret);
153 return ret; 161 return ret;
154 } 162 }
155 163
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 32c56aa33a5e..8d81ce1cec21 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -184,6 +184,7 @@ struct bmi_target_info {
184#define BMI_CE_NUM_TO_TARG 0 184#define BMI_CE_NUM_TO_TARG 0
185#define BMI_CE_NUM_TO_HOST 1 185#define BMI_CE_NUM_TO_HOST 1
186 186
187void ath10k_bmi_start(struct ath10k *ar);
187int ath10k_bmi_done(struct ath10k *ar); 188int ath10k_bmi_done(struct ath10k *ar);
188int ath10k_bmi_get_target_info(struct ath10k *ar, 189int ath10k_bmi_get_target_info(struct ath10k *ar,
189 struct bmi_target_info *target_info); 190 struct bmi_target_info *target_info);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 61a8ac70d3ca..f8b969f518f8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -79,7 +79,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
79 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 79 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
80 void __iomem *indicator_addr; 80 void __iomem *indicator_addr;
81 81
82 if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { 82 if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
83 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); 83 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
84 return; 84 return;
85 } 85 }
@@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
637 ath10k_pci_wake(ar); 637 ath10k_pci_wake(ar);
638 src_ring->hw_index = 638 src_ring->hw_index =
639 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 639 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
640 src_ring->hw_index &= nentries_mask;
640 ath10k_pci_sleep(ar); 641 ath10k_pci_sleep(ar);
641 } 642 }
642 read_index = src_ring->hw_index; 643 read_index = src_ring->hw_index;
@@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
950 951
951 ath10k_pci_wake(ar); 952 ath10k_pci_wake(ar);
952 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 953 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
954 src_ring->sw_index &= src_ring->nentries_mask;
953 src_ring->hw_index = src_ring->sw_index; 955 src_ring->hw_index = src_ring->sw_index;
954 956
955 src_ring->write_index = 957 src_ring->write_index =
956 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 958 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
959 src_ring->write_index &= src_ring->nentries_mask;
957 ath10k_pci_sleep(ar); 960 ath10k_pci_sleep(ar);
958 961
959 src_ring->per_transfer_context = (void **)ptr; 962 src_ring->per_transfer_context = (void **)ptr;
@@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1035 1038
1036 ath10k_pci_wake(ar); 1039 ath10k_pci_wake(ar);
1037 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); 1040 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1041 dest_ring->sw_index &= dest_ring->nentries_mask;
1038 dest_ring->write_index = 1042 dest_ring->write_index =
1039 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); 1043 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1044 dest_ring->write_index &= dest_ring->nentries_mask;
1040 ath10k_pci_sleep(ar); 1045 ath10k_pci_sleep(ar);
1041 1046
1042 dest_ring->per_transfer_context = (void **)ptr; 1047 dest_ring->per_transfer_context = (void **)ptr;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2b3426b1ff3f..7226c23b9569 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -100,7 +100,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
100 goto conn_fail; 100 goto conn_fail;
101 101
102 /* Start HTC */ 102 /* Start HTC */
103 status = ath10k_htc_start(ar->htc); 103 status = ath10k_htc_start(&ar->htc);
104 if (status) 104 if (status)
105 goto conn_fail; 105 goto conn_fail;
106 106
@@ -116,7 +116,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
116 return 0; 116 return 0;
117 117
118timeout: 118timeout:
119 ath10k_htc_stop(ar->htc); 119 ath10k_htc_stop(&ar->htc);
120conn_fail: 120conn_fail:
121 return status; 121 return status;
122} 122}
@@ -247,19 +247,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
247 247
248static int ath10k_download_board_data(struct ath10k *ar) 248static int ath10k_download_board_data(struct ath10k *ar)
249{ 249{
250 const struct firmware *fw = ar->board_data;
250 u32 board_data_size = QCA988X_BOARD_DATA_SZ; 251 u32 board_data_size = QCA988X_BOARD_DATA_SZ;
251 u32 address; 252 u32 address;
252 const struct firmware *fw;
253 int ret; 253 int ret;
254 254
255 fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
256 ar->hw_params.fw.board);
257 if (IS_ERR(fw)) {
258 ath10k_err("could not fetch board data fw file (%ld)\n",
259 PTR_ERR(fw));
260 return PTR_ERR(fw);
261 }
262
263 ret = ath10k_push_board_ext_data(ar, fw); 255 ret = ath10k_push_board_ext_data(ar, fw);
264 if (ret) { 256 if (ret) {
265 ath10k_err("could not push board ext data (%d)\n", ret); 257 ath10k_err("could not push board ext data (%d)\n", ret);
@@ -286,32 +278,20 @@ static int ath10k_download_board_data(struct ath10k *ar)
286 } 278 }
287 279
288exit: 280exit:
289 release_firmware(fw);
290 return ret; 281 return ret;
291} 282}
292 283
293static int ath10k_download_and_run_otp(struct ath10k *ar) 284static int ath10k_download_and_run_otp(struct ath10k *ar)
294{ 285{
295 const struct firmware *fw; 286 const struct firmware *fw = ar->otp;
296 u32 address; 287 u32 address = ar->hw_params.patch_load_addr;
297 u32 exec_param; 288 u32 exec_param;
298 int ret; 289 int ret;
299 290
300 /* OTP is optional */ 291 /* OTP is optional */
301 292
302 if (ar->hw_params.fw.otp == NULL) { 293 if (!ar->otp)
303 ath10k_info("otp file not defined\n");
304 return 0;
305 }
306
307 address = ar->hw_params.patch_load_addr;
308
309 fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
310 ar->hw_params.fw.otp);
311 if (IS_ERR(fw)) {
312 ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
313 return 0; 294 return 0;
314 }
315 295
316 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); 296 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
317 if (ret) { 297 if (ret) {
@@ -327,28 +307,17 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
327 } 307 }
328 308
329exit: 309exit:
330 release_firmware(fw);
331 return ret; 310 return ret;
332} 311}
333 312
334static int ath10k_download_fw(struct ath10k *ar) 313static int ath10k_download_fw(struct ath10k *ar)
335{ 314{
336 const struct firmware *fw; 315 const struct firmware *fw = ar->firmware;
337 u32 address; 316 u32 address;
338 int ret; 317 int ret;
339 318
340 if (ar->hw_params.fw.fw == NULL)
341 return -EINVAL;
342
343 address = ar->hw_params.patch_load_addr; 319 address = ar->hw_params.patch_load_addr;
344 320
345 fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
346 ar->hw_params.fw.fw);
347 if (IS_ERR(fw)) {
348 ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
349 return PTR_ERR(fw);
350 }
351
352 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); 321 ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
353 if (ret) { 322 if (ret) {
354 ath10k_err("could not write fw (%d)\n", ret); 323 ath10k_err("could not write fw (%d)\n", ret);
@@ -356,7 +325,74 @@ static int ath10k_download_fw(struct ath10k *ar)
356 } 325 }
357 326
358exit: 327exit:
359 release_firmware(fw); 328 return ret;
329}
330
331static void ath10k_core_free_firmware_files(struct ath10k *ar)
332{
333 if (ar->board_data && !IS_ERR(ar->board_data))
334 release_firmware(ar->board_data);
335
336 if (ar->otp && !IS_ERR(ar->otp))
337 release_firmware(ar->otp);
338
339 if (ar->firmware && !IS_ERR(ar->firmware))
340 release_firmware(ar->firmware);
341
342 ar->board_data = NULL;
343 ar->otp = NULL;
344 ar->firmware = NULL;
345}
346
347static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
348{
349 int ret = 0;
350
351 if (ar->hw_params.fw.fw == NULL) {
352 ath10k_err("firmware file not defined\n");
353 return -EINVAL;
354 }
355
356 if (ar->hw_params.fw.board == NULL) {
357 ath10k_err("board data file not defined");
358 return -EINVAL;
359 }
360
361 ar->board_data = ath10k_fetch_fw_file(ar,
362 ar->hw_params.fw.dir,
363 ar->hw_params.fw.board);
364 if (IS_ERR(ar->board_data)) {
365 ret = PTR_ERR(ar->board_data);
366 ath10k_err("could not fetch board data (%d)\n", ret);
367 goto err;
368 }
369
370 ar->firmware = ath10k_fetch_fw_file(ar,
371 ar->hw_params.fw.dir,
372 ar->hw_params.fw.fw);
373 if (IS_ERR(ar->firmware)) {
374 ret = PTR_ERR(ar->firmware);
375 ath10k_err("could not fetch firmware (%d)\n", ret);
376 goto err;
377 }
378
379 /* OTP may be undefined. If so, don't fetch it at all */
380 if (ar->hw_params.fw.otp == NULL)
381 return 0;
382
383 ar->otp = ath10k_fetch_fw_file(ar,
384 ar->hw_params.fw.dir,
385 ar->hw_params.fw.otp);
386 if (IS_ERR(ar->otp)) {
387 ret = PTR_ERR(ar->otp);
388 ath10k_err("could not fetch otp (%d)\n", ret);
389 goto err;
390 }
391
392 return 0;
393
394err:
395 ath10k_core_free_firmware_files(ar);
360 return ret; 396 return ret;
361} 397}
362 398
@@ -440,8 +476,35 @@ static int ath10k_init_hw_params(struct ath10k *ar)
440 return 0; 476 return 0;
441} 477}
442 478
479static void ath10k_core_restart(struct work_struct *work)
480{
481 struct ath10k *ar = container_of(work, struct ath10k, restart_work);
482
483 mutex_lock(&ar->conf_mutex);
484
485 switch (ar->state) {
486 case ATH10K_STATE_ON:
487 ath10k_halt(ar);
488 ar->state = ATH10K_STATE_RESTARTING;
489 ieee80211_restart_hw(ar->hw);
490 break;
491 case ATH10K_STATE_OFF:
492 /* this can happen if driver is being unloaded */
493 ath10k_warn("cannot restart a device that hasn't been started\n");
494 break;
495 case ATH10K_STATE_RESTARTING:
496 case ATH10K_STATE_RESTARTED:
497 ar->state = ATH10K_STATE_WEDGED;
498 /* fall through */
499 case ATH10K_STATE_WEDGED:
500 ath10k_warn("device is wedged, will not restart\n");
501 break;
502 }
503
504 mutex_unlock(&ar->conf_mutex);
505}
506
443struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, 507struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
444 enum ath10k_bus bus,
445 const struct ath10k_hif_ops *hif_ops) 508 const struct ath10k_hif_ops *hif_ops)
446{ 509{
447 struct ath10k *ar; 510 struct ath10k *ar;
@@ -458,9 +521,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
458 521
459 ar->hif.priv = hif_priv; 522 ar->hif.priv = hif_priv;
460 ar->hif.ops = hif_ops; 523 ar->hif.ops = hif_ops;
461 ar->hif.bus = bus;
462
463 ar->free_vdev_map = 0xFF; /* 8 vdevs */
464 524
465 init_completion(&ar->scan.started); 525 init_completion(&ar->scan.started);
466 init_completion(&ar->scan.completed); 526 init_completion(&ar->scan.completed);
@@ -487,6 +547,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
487 547
488 init_waitqueue_head(&ar->event_queue); 548 init_waitqueue_head(&ar->event_queue);
489 549
550 INIT_WORK(&ar->restart_work, ath10k_core_restart);
551
490 return ar; 552 return ar;
491 553
492err_wq: 554err_wq:
@@ -504,24 +566,11 @@ void ath10k_core_destroy(struct ath10k *ar)
504} 566}
505EXPORT_SYMBOL(ath10k_core_destroy); 567EXPORT_SYMBOL(ath10k_core_destroy);
506 568
507 569int ath10k_core_start(struct ath10k *ar)
508int ath10k_core_register(struct ath10k *ar)
509{ 570{
510 struct ath10k_htc_ops htc_ops;
511 struct bmi_target_info target_info;
512 int status; 571 int status;
513 572
514 memset(&target_info, 0, sizeof(target_info)); 573 ath10k_bmi_start(ar);
515 status = ath10k_bmi_get_target_info(ar, &target_info);
516 if (status)
517 goto err;
518
519 ar->target_version = target_info.version;
520 ar->hw->wiphy->hw_version = target_info.version;
521
522 status = ath10k_init_hw_params(ar);
523 if (status)
524 goto err;
525 574
526 if (ath10k_init_configure_target(ar)) { 575 if (ath10k_init_configure_target(ar)) {
527 status = -EINVAL; 576 status = -EINVAL;
@@ -536,32 +585,32 @@ int ath10k_core_register(struct ath10k *ar)
536 if (status) 585 if (status)
537 goto err; 586 goto err;
538 587
539 htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; 588 ar->htc.htc_ops.target_send_suspend_complete =
589 ath10k_send_suspend_complete;
540 590
541 ar->htc = ath10k_htc_create(ar, &htc_ops); 591 status = ath10k_htc_init(ar);
542 if (IS_ERR(ar->htc)) { 592 if (status) {
543 status = PTR_ERR(ar->htc); 593 ath10k_err("could not init HTC (%d)\n", status);
544 ath10k_err("could not create HTC (%d)\n", status);
545 goto err; 594 goto err;
546 } 595 }
547 596
548 status = ath10k_bmi_done(ar); 597 status = ath10k_bmi_done(ar);
549 if (status) 598 if (status)
550 goto err_htc_destroy; 599 goto err;
551 600
552 status = ath10k_wmi_attach(ar); 601 status = ath10k_wmi_attach(ar);
553 if (status) { 602 if (status) {
554 ath10k_err("WMI attach failed: %d\n", status); 603 ath10k_err("WMI attach failed: %d\n", status);
555 goto err_htc_destroy; 604 goto err;
556 } 605 }
557 606
558 status = ath10k_htc_wait_target(ar->htc); 607 status = ath10k_htc_wait_target(&ar->htc);
559 if (status) 608 if (status)
560 goto err_wmi_detach; 609 goto err_wmi_detach;
561 610
562 ar->htt = ath10k_htt_attach(ar); 611 status = ath10k_htt_attach(ar);
563 if (!ar->htt) { 612 if (status) {
564 status = -ENOMEM; 613 ath10k_err("could not attach htt (%d)\n", status);
565 goto err_wmi_detach; 614 goto err_wmi_detach;
566 } 615 }
567 616
@@ -588,77 +637,127 @@ int ath10k_core_register(struct ath10k *ar)
588 goto err_disconnect_htc; 637 goto err_disconnect_htc;
589 } 638 }
590 639
591 status = ath10k_htt_attach_target(ar->htt); 640 status = ath10k_htt_attach_target(&ar->htt);
592 if (status)
593 goto err_disconnect_htc;
594
595 status = ath10k_mac_register(ar);
596 if (status) 641 if (status)
597 goto err_disconnect_htc; 642 goto err_disconnect_htc;
598 643
599 status = ath10k_debug_create(ar); 644 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
600 if (status) {
601 ath10k_err("unable to initialize debugfs\n");
602 goto err_unregister_mac;
603 }
604 645
605 return 0; 646 return 0;
606 647
607err_unregister_mac:
608 ath10k_mac_unregister(ar);
609err_disconnect_htc: 648err_disconnect_htc:
610 ath10k_htc_stop(ar->htc); 649 ath10k_htc_stop(&ar->htc);
611err_htt_detach: 650err_htt_detach:
612 ath10k_htt_detach(ar->htt); 651 ath10k_htt_detach(&ar->htt);
613err_wmi_detach: 652err_wmi_detach:
614 ath10k_wmi_detach(ar); 653 ath10k_wmi_detach(ar);
615err_htc_destroy:
616 ath10k_htc_destroy(ar->htc);
617err: 654err:
618 return status; 655 return status;
619} 656}
620EXPORT_SYMBOL(ath10k_core_register); 657EXPORT_SYMBOL(ath10k_core_start);
621 658
622void ath10k_core_unregister(struct ath10k *ar) 659void ath10k_core_stop(struct ath10k *ar)
623{ 660{
624 /* We must unregister from mac80211 before we stop HTC and HIF. 661 ath10k_htc_stop(&ar->htc);
625 * Otherwise we will fail to submit commands to FW and mac80211 will be 662 ath10k_htt_detach(&ar->htt);
626 * unhappy about callback failures. */
627 ath10k_mac_unregister(ar);
628 ath10k_htc_stop(ar->htc);
629 ath10k_htt_detach(ar->htt);
630 ath10k_wmi_detach(ar); 663 ath10k_wmi_detach(ar);
631 ath10k_htc_destroy(ar->htc);
632} 664}
633EXPORT_SYMBOL(ath10k_core_unregister); 665EXPORT_SYMBOL(ath10k_core_stop);
634 666
635int ath10k_core_target_suspend(struct ath10k *ar) 667/* mac80211 manages fw/hw initialization through start/stop hooks. However in
668 * order to know what hw capabilities should be advertised to mac80211 it is
669 * necessary to load the firmware (and tear it down immediately since start
670 * hook will try to init it again) before registering */
671static int ath10k_core_probe_fw(struct ath10k *ar)
636{ 672{
637 int ret; 673 struct bmi_target_info target_info;
674 int ret = 0;
675
676 ret = ath10k_hif_power_up(ar);
677 if (ret) {
678 ath10k_err("could not start pci hif (%d)\n", ret);
679 return ret;
680 }
638 681
639 ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); 682 memset(&target_info, 0, sizeof(target_info));
683 ret = ath10k_bmi_get_target_info(ar, &target_info);
684 if (ret) {
685 ath10k_err("could not get target info (%d)\n", ret);
686 ath10k_hif_power_down(ar);
687 return ret;
688 }
640 689
641 ret = ath10k_wmi_pdev_suspend_target(ar); 690 ar->target_version = target_info.version;
642 if (ret) 691 ar->hw->wiphy->hw_version = target_info.version;
643 ath10k_warn("could not suspend target (%d)\n", ret);
644 692
645 return ret; 693 ret = ath10k_init_hw_params(ar);
694 if (ret) {
695 ath10k_err("could not get hw params (%d)\n", ret);
696 ath10k_hif_power_down(ar);
697 return ret;
698 }
699
700 ret = ath10k_core_fetch_firmware_files(ar);
701 if (ret) {
702 ath10k_err("could not fetch firmware files (%d)\n", ret);
703 ath10k_hif_power_down(ar);
704 return ret;
705 }
706
707 ret = ath10k_core_start(ar);
708 if (ret) {
709 ath10k_err("could not init core (%d)\n", ret);
710 ath10k_core_free_firmware_files(ar);
711 ath10k_hif_power_down(ar);
712 return ret;
713 }
714
715 ath10k_core_stop(ar);
716 ath10k_hif_power_down(ar);
717 return 0;
646} 718}
647EXPORT_SYMBOL(ath10k_core_target_suspend);
648 719
649int ath10k_core_target_resume(struct ath10k *ar) 720int ath10k_core_register(struct ath10k *ar)
650{ 721{
651 int ret; 722 int status;
652 723
653 ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); 724 status = ath10k_core_probe_fw(ar);
725 if (status) {
726 ath10k_err("could not probe fw (%d)\n", status);
727 return status;
728 }
654 729
655 ret = ath10k_wmi_pdev_resume_target(ar); 730 status = ath10k_mac_register(ar);
656 if (ret) 731 if (status) {
657 ath10k_warn("could not resume target (%d)\n", ret); 732 ath10k_err("could not register to mac80211 (%d)\n", status);
733 goto err_release_fw;
734 }
658 735
659 return ret; 736 status = ath10k_debug_create(ar);
737 if (status) {
738 ath10k_err("unable to initialize debugfs\n");
739 goto err_unregister_mac;
740 }
741
742 return 0;
743
744err_unregister_mac:
745 ath10k_mac_unregister(ar);
746err_release_fw:
747 ath10k_core_free_firmware_files(ar);
748 return status;
749}
750EXPORT_SYMBOL(ath10k_core_register);
751
752void ath10k_core_unregister(struct ath10k *ar)
753{
754 /* We must unregister from mac80211 before we stop HTC and HIF.
755 * Otherwise we will fail to submit commands to FW and mac80211 will be
756 * unhappy about callback failures. */
757 ath10k_mac_unregister(ar);
758 ath10k_core_free_firmware_files(ar);
660} 759}
661EXPORT_SYMBOL(ath10k_core_target_resume); 760EXPORT_SYMBOL(ath10k_core_unregister);
662 761
663MODULE_AUTHOR("Qualcomm Atheros"); 762MODULE_AUTHOR("Qualcomm Atheros");
664MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); 763MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 539336d1be4b..e4bba563ed42 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25 25
26#include "htt.h"
26#include "htc.h" 27#include "htc.h"
27#include "hw.h" 28#include "hw.h"
28#include "targaddrs.h" 29#include "targaddrs.h"
@@ -37,16 +38,13 @@
37#define ATH10K_SCAN_ID 0 38#define ATH10K_SCAN_ID 0
38#define WMI_READY_TIMEOUT (5 * HZ) 39#define WMI_READY_TIMEOUT (5 * HZ)
39#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) 40#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
41#define ATH10K_NUM_CHANS 38
40 42
41/* Antenna noise floor */ 43/* Antenna noise floor */
42#define ATH10K_DEFAULT_NOISE_FLOOR -95 44#define ATH10K_DEFAULT_NOISE_FLOOR -95
43 45
44struct ath10k; 46struct ath10k;
45 47
46enum ath10k_bus {
47 ATH10K_BUS_PCI,
48};
49
50struct ath10k_skb_cb { 48struct ath10k_skb_cb {
51 dma_addr_t paddr; 49 dma_addr_t paddr;
52 bool is_mapped; 50 bool is_mapped;
@@ -250,6 +248,28 @@ struct ath10k_debug {
250 struct completion event_stats_compl; 248 struct completion event_stats_compl;
251}; 249};
252 250
251enum ath10k_state {
252 ATH10K_STATE_OFF = 0,
253 ATH10K_STATE_ON,
254
255 /* When doing firmware recovery the device is first powered down.
256 * mac80211 is supposed to call in to start() hook later on. It is
257 * however possible that driver unloading and firmware crash overlap.
258 * mac80211 can wait on conf_mutex in stop() while the device is
259 * stopped in ath10k_core_restart() work holding conf_mutex. The state
260 * RESTARTED means that the device is up and mac80211 has started hw
261 * reconfiguration. Once mac80211 is done with the reconfiguration we
262 * set the state to STATE_ON in restart_complete(). */
263 ATH10K_STATE_RESTARTING,
264 ATH10K_STATE_RESTARTED,
265
266 /* The device has crashed while restarting hw. This state is like ON
267 * but commands are blocked in HTC and -ECOMM response is given. This
268 * prevents completion timeouts and makes the driver more responsive to
269 * userspace commands. This is also prevents recursive recovery. */
270 ATH10K_STATE_WEDGED,
271};
272
253struct ath10k { 273struct ath10k {
254 struct ath_common ath_common; 274 struct ath_common ath_common;
255 struct ieee80211_hw *hw; 275 struct ieee80211_hw *hw;
@@ -266,6 +286,7 @@ struct ath10k {
266 u32 hw_max_tx_power; 286 u32 hw_max_tx_power;
267 u32 ht_cap_info; 287 u32 ht_cap_info;
268 u32 vht_cap_info; 288 u32 vht_cap_info;
289 u32 num_rf_chains;
269 290
270 struct targetdef *targetdef; 291 struct targetdef *targetdef;
271 struct hostdef *hostdef; 292 struct hostdef *hostdef;
@@ -274,19 +295,16 @@ struct ath10k {
274 295
275 struct { 296 struct {
276 void *priv; 297 void *priv;
277 enum ath10k_bus bus;
278 const struct ath10k_hif_ops *ops; 298 const struct ath10k_hif_ops *ops;
279 } hif; 299 } hif;
280 300
281 struct ath10k_wmi wmi;
282
283 wait_queue_head_t event_queue; 301 wait_queue_head_t event_queue;
284 bool is_target_paused; 302 bool is_target_paused;
285 303
286 struct ath10k_bmi bmi; 304 struct ath10k_bmi bmi;
287 305 struct ath10k_wmi wmi;
288 struct ath10k_htc *htc; 306 struct ath10k_htc htc;
289 struct ath10k_htt *htt; 307 struct ath10k_htt htt;
290 308
291 struct ath10k_hw_params { 309 struct ath10k_hw_params {
292 u32 id; 310 u32 id;
@@ -301,6 +319,10 @@ struct ath10k {
301 } fw; 319 } fw;
302 } hw_params; 320 } hw_params;
303 321
322 const struct firmware *board_data;
323 const struct firmware *otp;
324 const struct firmware *firmware;
325
304 struct { 326 struct {
305 struct completion started; 327 struct completion started;
306 struct completion completed; 328 struct completion completed;
@@ -350,20 +372,28 @@ struct ath10k {
350 struct completion offchan_tx_completed; 372 struct completion offchan_tx_completed;
351 struct sk_buff *offchan_tx_skb; 373 struct sk_buff *offchan_tx_skb;
352 374
375 enum ath10k_state state;
376
377 struct work_struct restart_work;
378
379 /* cycle count is reported twice for each visited channel during scan.
380 * access protected by data_lock */
381 u32 survey_last_rx_clear_count;
382 u32 survey_last_cycle_count;
383 struct survey_info survey[ATH10K_NUM_CHANS];
384
353#ifdef CONFIG_ATH10K_DEBUGFS 385#ifdef CONFIG_ATH10K_DEBUGFS
354 struct ath10k_debug debug; 386 struct ath10k_debug debug;
355#endif 387#endif
356}; 388};
357 389
358struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, 390struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
359 enum ath10k_bus bus,
360 const struct ath10k_hif_ops *hif_ops); 391 const struct ath10k_hif_ops *hif_ops);
361void ath10k_core_destroy(struct ath10k *ar); 392void ath10k_core_destroy(struct ath10k *ar);
362 393
394int ath10k_core_start(struct ath10k *ar);
395void ath10k_core_stop(struct ath10k *ar);
363int ath10k_core_register(struct ath10k *ar); 396int ath10k_core_register(struct ath10k *ar);
364void ath10k_core_unregister(struct ath10k *ar); 397void ath10k_core_unregister(struct ath10k *ar);
365 398
366int ath10k_core_target_suspend(struct ath10k *ar);
367int ath10k_core_target_resume(struct ath10k *ar);
368
369#endif /* _CORE_H_ */ 399#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 499034b873d1..3d65594fa098 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
161 struct wmi_pdev_stats *ps; 161 struct wmi_pdev_stats *ps;
162 int i; 162 int i;
163 163
164 mutex_lock(&ar->conf_mutex); 164 spin_lock_bh(&ar->data_lock);
165 165
166 stats = &ar->debug.target_stats; 166 stats = &ar->debug.target_stats;
167 167
@@ -259,6 +259,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
259 } 259 }
260 } 260 }
261 261
262 spin_unlock_bh(&ar->data_lock);
262 mutex_unlock(&ar->conf_mutex); 263 mutex_unlock(&ar->conf_mutex);
263 complete(&ar->debug.event_stats_compl); 264 complete(&ar->debug.event_stats_compl);
264} 265}
@@ -268,35 +269,35 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
268{ 269{
269 struct ath10k *ar = file->private_data; 270 struct ath10k *ar = file->private_data;
270 struct ath10k_target_stats *fw_stats; 271 struct ath10k_target_stats *fw_stats;
271 char *buf; 272 char *buf = NULL;
272 unsigned int len = 0, buf_len = 2500; 273 unsigned int len = 0, buf_len = 2500;
273 ssize_t ret_cnt; 274 ssize_t ret_cnt = 0;
274 long left; 275 long left;
275 int i; 276 int i;
276 int ret; 277 int ret;
277 278
278 fw_stats = &ar->debug.target_stats; 279 fw_stats = &ar->debug.target_stats;
279 280
281 mutex_lock(&ar->conf_mutex);
282
283 if (ar->state != ATH10K_STATE_ON)
284 goto exit;
285
280 buf = kzalloc(buf_len, GFP_KERNEL); 286 buf = kzalloc(buf_len, GFP_KERNEL);
281 if (!buf) 287 if (!buf)
282 return -ENOMEM; 288 goto exit;
283 289
284 ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); 290 ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
285 if (ret) { 291 if (ret) {
286 ath10k_warn("could not request stats (%d)\n", ret); 292 ath10k_warn("could not request stats (%d)\n", ret);
287 kfree(buf); 293 goto exit;
288 return -EIO;
289 } 294 }
290 295
291 left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); 296 left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
297 if (left <= 0)
298 goto exit;
292 299
293 if (left <= 0) { 300 spin_lock_bh(&ar->data_lock);
294 kfree(buf);
295 return -ETIMEDOUT;
296 }
297
298 mutex_lock(&ar->conf_mutex);
299
300 len += scnprintf(buf + len, buf_len - len, "\n"); 301 len += scnprintf(buf + len, buf_len - len, "\n");
301 len += scnprintf(buf + len, buf_len - len, "%30s\n", 302 len += scnprintf(buf + len, buf_len - len, "%30s\n",
302 "ath10k PDEV stats"); 303 "ath10k PDEV stats");
@@ -424,14 +425,15 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
424 fw_stats->peer_stat[i].peer_tx_rate); 425 fw_stats->peer_stat[i].peer_tx_rate);
425 len += scnprintf(buf + len, buf_len - len, "\n"); 426 len += scnprintf(buf + len, buf_len - len, "\n");
426 } 427 }
428 spin_unlock_bh(&ar->data_lock);
427 429
428 if (len > buf_len) 430 if (len > buf_len)
429 len = buf_len; 431 len = buf_len;
430 432
431 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); 433 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
432 434
435exit:
433 mutex_unlock(&ar->conf_mutex); 436 mutex_unlock(&ar->conf_mutex);
434
435 kfree(buf); 437 kfree(buf);
436 return ret_cnt; 438 return ret_cnt;
437} 439}
@@ -443,6 +445,60 @@ static const struct file_operations fops_fw_stats = {
443 .llseek = default_llseek, 445 .llseek = default_llseek,
444}; 446};
445 447
448static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
449 char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 const char buf[] = "To simulate firmware crash write the keyword"
453 " `crash` to this file.\nThis will force firmware"
454 " to report a crash to the host system.\n";
455 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
456}
457
458static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
459 const char __user *user_buf,
460 size_t count, loff_t *ppos)
461{
462 struct ath10k *ar = file->private_data;
463 char buf[32] = {};
464 int ret;
465
466 mutex_lock(&ar->conf_mutex);
467
468 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
469 if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
470 ret = -EINVAL;
471 goto exit;
472 }
473
474 if (ar->state != ATH10K_STATE_ON &&
475 ar->state != ATH10K_STATE_RESTARTED) {
476 ret = -ENETDOWN;
477 goto exit;
478 }
479
480 ath10k_info("simulating firmware crash\n");
481
482 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
483 if (ret)
484 ath10k_warn("failed to force fw hang (%d)\n", ret);
485
486 if (ret == 0)
487 ret = count;
488
489exit:
490 mutex_unlock(&ar->conf_mutex);
491 return ret;
492}
493
494static const struct file_operations fops_simulate_fw_crash = {
495 .read = ath10k_read_simulate_fw_crash,
496 .write = ath10k_write_simulate_fw_crash,
497 .open = simple_open,
498 .owner = THIS_MODULE,
499 .llseek = default_llseek,
500};
501
446int ath10k_debug_create(struct ath10k *ar) 502int ath10k_debug_create(struct ath10k *ar)
447{ 503{
448 ar->debug.debugfs_phy = debugfs_create_dir("ath10k", 504 ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -459,6 +515,9 @@ int ath10k_debug_create(struct ath10k *ar)
459 debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, 515 debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
460 &fops_wmi_services); 516 &fops_wmi_services);
461 517
518 debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
519 ar, &fops_simulate_fw_crash);
520
462 return 0; 521 return 0;
463} 522}
464#endif /* CONFIG_ATH10K_DEBUGFS */ 523#endif /* CONFIG_ATH10K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 73a24d44d1b4..dcdea68bcc0a 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -46,8 +46,11 @@ struct ath10k_hif_ops {
46 void *request, u32 request_len, 46 void *request, u32 request_len,
47 void *response, u32 *response_len); 47 void *response, u32 *response_len);
48 48
49 /* Post BMI phase, after FW is loaded. Starts regular operation */
49 int (*start)(struct ath10k *ar); 50 int (*start)(struct ath10k *ar);
50 51
52 /* Clean up what start() did. This does not revert to BMI phase. If
53 * desired so, call power_down() and power_up() */
51 void (*stop)(struct ath10k *ar); 54 void (*stop)(struct ath10k *ar);
52 55
53 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, 56 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
@@ -66,10 +69,20 @@ struct ath10k_hif_ops {
66 */ 69 */
67 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force); 70 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
68 71
69 void (*init)(struct ath10k *ar, 72 void (*set_callbacks)(struct ath10k *ar,
70 struct ath10k_hif_cb *callbacks); 73 struct ath10k_hif_cb *callbacks);
71 74
72 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); 75 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
76
77 /* Power up the device and enter BMI transfer mode for FW download */
78 int (*power_up)(struct ath10k *ar);
79
80 /* Power down the device and free up resources. stop() must be called
81 * before this if start() was called earlier */
82 void (*power_down)(struct ath10k *ar);
83
84 int (*suspend)(struct ath10k *ar);
85 int (*resume)(struct ath10k *ar);
73}; 86};
74 87
75 88
@@ -122,10 +135,10 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
122 ar->hif.ops->send_complete_check(ar, pipe_id, force); 135 ar->hif.ops->send_complete_check(ar, pipe_id, force);
123} 136}
124 137
125static inline void ath10k_hif_init(struct ath10k *ar, 138static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
126 struct ath10k_hif_cb *callbacks) 139 struct ath10k_hif_cb *callbacks)
127{ 140{
128 ar->hif.ops->init(ar, callbacks); 141 ar->hif.ops->set_callbacks(ar, callbacks);
129} 142}
130 143
131static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, 144static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
@@ -134,4 +147,30 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
134 return ar->hif.ops->get_free_queue_number(ar, pipe_id); 147 return ar->hif.ops->get_free_queue_number(ar, pipe_id);
135} 148}
136 149
150static inline int ath10k_hif_power_up(struct ath10k *ar)
151{
152 return ar->hif.ops->power_up(ar);
153}
154
155static inline void ath10k_hif_power_down(struct ath10k *ar)
156{
157 ar->hif.ops->power_down(ar);
158}
159
160static inline int ath10k_hif_suspend(struct ath10k *ar)
161{
162 if (!ar->hif.ops->suspend)
163 return -EOPNOTSUPP;
164
165 return ar->hif.ops->suspend(ar);
166}
167
168static inline int ath10k_hif_resume(struct ath10k *ar)
169{
170 if (!ar->hif.ops->resume)
171 return -EOPNOTSUPP;
172
173 return ar->hif.ops->resume(ar);
174}
175
137#endif /* _HIF_H_ */ 176#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 74363c949392..ef3329ef52f3 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -246,15 +246,22 @@ int ath10k_htc_send(struct ath10k_htc *htc,
246{ 246{
247 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 247 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
248 248
249 if (htc->ar->state == ATH10K_STATE_WEDGED)
250 return -ECOMM;
251
249 if (eid >= ATH10K_HTC_EP_COUNT) { 252 if (eid >= ATH10K_HTC_EP_COUNT) {
250 ath10k_warn("Invalid endpoint id: %d\n", eid); 253 ath10k_warn("Invalid endpoint id: %d\n", eid);
251 return -ENOENT; 254 return -ENOENT;
252 } 255 }
253 256
254 skb_push(skb, sizeof(struct ath10k_htc_hdr));
255
256 spin_lock_bh(&htc->tx_lock); 257 spin_lock_bh(&htc->tx_lock);
258 if (htc->stopped) {
259 spin_unlock_bh(&htc->tx_lock);
260 return -ESHUTDOWN;
261 }
262
257 __skb_queue_tail(&ep->tx_queue, skb); 263 __skb_queue_tail(&ep->tx_queue, skb);
264 skb_push(skb, sizeof(struct ath10k_htc_hdr));
258 spin_unlock_bh(&htc->tx_lock); 265 spin_unlock_bh(&htc->tx_lock);
259 266
260 queue_work(htc->ar->workqueue, &ep->send_work); 267 queue_work(htc->ar->workqueue, &ep->send_work);
@@ -265,25 +272,19 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
265 struct sk_buff *skb, 272 struct sk_buff *skb,
266 unsigned int eid) 273 unsigned int eid)
267{ 274{
268 struct ath10k_htc *htc = ar->htc; 275 struct ath10k_htc *htc = &ar->htc;
269 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 276 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
270 bool stopping;
271 277
272 ath10k_htc_notify_tx_completion(ep, skb); 278 ath10k_htc_notify_tx_completion(ep, skb);
273 /* the skb now belongs to the completion handler */ 279 /* the skb now belongs to the completion handler */
274 280
281 /* note: when using TX credit flow, the re-checking of queues happens
282 * when credits flow back from the target. in the non-TX credit case,
283 * we recheck after the packet completes */
275 spin_lock_bh(&htc->tx_lock); 284 spin_lock_bh(&htc->tx_lock);
276 stopping = htc->stopping; 285 if (!ep->tx_credit_flow_enabled && !htc->stopped)
277 spin_unlock_bh(&htc->tx_lock);
278
279 if (!ep->tx_credit_flow_enabled && !stopping)
280 /*
281 * note: when using TX credit flow, the re-checking of
282 * queues happens when credits flow back from the target.
283 * in the non-TX credit case, we recheck after the packet
284 * completes
285 */
286 queue_work(ar->workqueue, &ep->send_work); 286 queue_work(ar->workqueue, &ep->send_work);
287 spin_unlock_bh(&htc->tx_lock);
287 288
288 return 0; 289 return 0;
289} 290}
@@ -414,7 +415,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
414 u8 pipe_id) 415 u8 pipe_id)
415{ 416{
416 int status = 0; 417 int status = 0;
417 struct ath10k_htc *htc = ar->htc; 418 struct ath10k_htc *htc = &ar->htc;
418 struct ath10k_htc_hdr *hdr; 419 struct ath10k_htc_hdr *hdr;
419 struct ath10k_htc_ep *ep; 420 struct ath10k_htc_ep *ep;
420 u16 payload_len; 421 u16 payload_len;
@@ -751,8 +752,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
751 tx_alloc = ath10k_htc_get_credit_allocation(htc, 752 tx_alloc = ath10k_htc_get_credit_allocation(htc,
752 conn_req->service_id); 753 conn_req->service_id);
753 if (!tx_alloc) 754 if (!tx_alloc)
754 ath10k_warn("HTC Service %s does not allocate target credits\n", 755 ath10k_dbg(ATH10K_DBG_HTC,
755 htc_service_name(conn_req->service_id)); 756 "HTC Service %s does not allocate target credits\n",
757 htc_service_name(conn_req->service_id));
756 758
757 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); 759 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
758 if (!skb) { 760 if (!skb) {
@@ -947,7 +949,7 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
947 struct ath10k_htc_ep *ep; 949 struct ath10k_htc_ep *ep;
948 950
949 spin_lock_bh(&htc->tx_lock); 951 spin_lock_bh(&htc->tx_lock);
950 htc->stopping = true; 952 htc->stopped = true;
951 spin_unlock_bh(&htc->tx_lock); 953 spin_unlock_bh(&htc->tx_lock);
952 954
953 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { 955 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
@@ -956,26 +958,18 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
956 } 958 }
957 959
958 ath10k_hif_stop(htc->ar); 960 ath10k_hif_stop(htc->ar);
959 ath10k_htc_reset_endpoint_states(htc);
960} 961}
961 962
962/* registered target arrival callback from the HIF layer */ 963/* registered target arrival callback from the HIF layer */
963struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, 964int ath10k_htc_init(struct ath10k *ar)
964 struct ath10k_htc_ops *htc_ops)
965{ 965{
966 struct ath10k_hif_cb htc_callbacks; 966 struct ath10k_hif_cb htc_callbacks;
967 struct ath10k_htc_ep *ep = NULL; 967 struct ath10k_htc_ep *ep = NULL;
968 struct ath10k_htc *htc = NULL; 968 struct ath10k_htc *htc = &ar->htc;
969
970 /* FIXME: use struct ath10k instead */
971 htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
972 if (!htc)
973 return ERR_PTR(-ENOMEM);
974 969
975 spin_lock_init(&htc->tx_lock); 970 spin_lock_init(&htc->tx_lock);
976 971
977 memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops)); 972 htc->stopped = false;
978
979 ath10k_htc_reset_endpoint_states(htc); 973 ath10k_htc_reset_endpoint_states(htc);
980 974
981 /* setup HIF layer callbacks */ 975 /* setup HIF layer callbacks */
@@ -986,15 +980,10 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
986 /* Get HIF default pipe for HTC message exchange */ 980 /* Get HIF default pipe for HTC message exchange */
987 ep = &htc->endpoint[ATH10K_HTC_EP_0]; 981 ep = &htc->endpoint[ATH10K_HTC_EP_0];
988 982
989 ath10k_hif_init(ar, &htc_callbacks); 983 ath10k_hif_set_callbacks(ar, &htc_callbacks);
990 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); 984 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
991 985
992 init_completion(&htc->ctl_resp); 986 init_completion(&htc->ctl_resp);
993 987
994 return htc; 988 return 0;
995}
996
997void ath10k_htc_destroy(struct ath10k_htc *htc)
998{
999 kfree(htc);
1000} 989}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index fa45844b59fb..e1dd8c761853 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -335,7 +335,7 @@ struct ath10k_htc {
335 struct ath10k *ar; 335 struct ath10k *ar;
336 struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; 336 struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
337 337
338 /* protects endpoint and stopping fields */ 338 /* protects endpoint and stopped fields */
339 spinlock_t tx_lock; 339 spinlock_t tx_lock;
340 340
341 struct ath10k_htc_ops htc_ops; 341 struct ath10k_htc_ops htc_ops;
@@ -349,11 +349,10 @@ struct ath10k_htc {
349 struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; 349 struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
350 int target_credit_size; 350 int target_credit_size;
351 351
352 bool stopping; 352 bool stopped;
353}; 353};
354 354
355struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, 355int ath10k_htc_init(struct ath10k *ar);
356 struct ath10k_htc_ops *htc_ops);
357int ath10k_htc_wait_target(struct ath10k_htc *htc); 356int ath10k_htc_wait_target(struct ath10k_htc *htc);
358int ath10k_htc_start(struct ath10k_htc *htc); 357int ath10k_htc_start(struct ath10k_htc *htc);
359int ath10k_htc_connect_service(struct ath10k_htc *htc, 358int ath10k_htc_connect_service(struct ath10k_htc *htc,
@@ -362,7 +361,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
362int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, 361int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
363 struct sk_buff *packet); 362 struct sk_buff *packet);
364void ath10k_htc_stop(struct ath10k_htc *htc); 363void ath10k_htc_stop(struct ath10k_htc *htc);
365void ath10k_htc_destroy(struct ath10k_htc *htc);
366struct sk_buff *ath10k_htc_alloc_skb(int size); 364struct sk_buff *ath10k_htc_alloc_skb(int size);
367 365
368#endif 366#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 185a5468a2f2..39342c5cfcb2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/if_ether.h>
19 20
20#include "htt.h" 21#include "htt.h"
21#include "core.h" 22#include "core.h"
@@ -36,7 +37,7 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
36 /* connect to control service */ 37 /* connect to control service */
37 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; 38 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
38 39
39 status = ath10k_htc_connect_service(htt->ar->htc, &conn_req, 40 status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
40 &conn_resp); 41 &conn_resp);
41 42
42 if (status) 43 if (status)
@@ -47,15 +48,11 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
47 return 0; 48 return 0;
48} 49}
49 50
50struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) 51int ath10k_htt_attach(struct ath10k *ar)
51{ 52{
52 struct ath10k_htt *htt; 53 struct ath10k_htt *htt = &ar->htt;
53 int ret; 54 int ret;
54 55
55 htt = kzalloc(sizeof(*htt), GFP_KERNEL);
56 if (!htt)
57 return NULL;
58
59 htt->ar = ar; 56 htt->ar = ar;
60 htt->max_throughput_mbps = 800; 57 htt->max_throughput_mbps = 800;
61 58
@@ -65,8 +62,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
65 * since ath10k_htt_rx_attach involves sending a rx ring configure 62 * since ath10k_htt_rx_attach involves sending a rx ring configure
66 * message to the target. 63 * message to the target.
67 */ 64 */
68 if (ath10k_htt_htc_attach(htt)) 65 ret = ath10k_htt_htc_attach(htt);
66 if (ret) {
67 ath10k_err("could not attach htt htc (%d)\n", ret);
69 goto err_htc_attach; 68 goto err_htc_attach;
69 }
70 70
71 ret = ath10k_htt_tx_attach(htt); 71 ret = ath10k_htt_tx_attach(htt);
72 if (ret) { 72 if (ret) {
@@ -74,8 +74,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
74 goto err_htc_attach; 74 goto err_htc_attach;
75 } 75 }
76 76
77 if (ath10k_htt_rx_attach(htt)) 77 ret = ath10k_htt_rx_attach(htt);
78 if (ret) {
79 ath10k_err("could not attach htt rx (%d)\n", ret);
78 goto err_rx_attach; 80 goto err_rx_attach;
81 }
79 82
80 /* 83 /*
81 * Prefetch enough data to satisfy target 84 * Prefetch enough data to satisfy target
@@ -89,13 +92,12 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
89 8 + /* llc snap */ 92 8 + /* llc snap */
90 2; /* ip4 dscp or ip6 priority */ 93 2; /* ip4 dscp or ip6 priority */
91 94
92 return htt; 95 return 0;
93 96
94err_rx_attach: 97err_rx_attach:
95 ath10k_htt_tx_detach(htt); 98 ath10k_htt_tx_detach(htt);
96err_htc_attach: 99err_htc_attach:
97 kfree(htt); 100 return ret;
98 return NULL;
99} 101}
100 102
101#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) 103#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -148,5 +150,4 @@ void ath10k_htt_detach(struct ath10k_htt *htt)
148{ 150{
149 ath10k_htt_rx_detach(htt); 151 ath10k_htt_rx_detach(htt);
150 ath10k_htt_tx_detach(htt); 152 ath10k_htt_tx_detach(htt);
151 kfree(htt);
152} 153}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index a7a7aa040536..318be4629cde 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,7 +20,6 @@
20 20
21#include <linux/bug.h> 21#include <linux/bug.h>
22 22
23#include "core.h"
24#include "htc.h" 23#include "htc.h"
25#include "rx_desc.h" 24#include "rx_desc.h"
26 25
@@ -1317,7 +1316,7 @@ struct htt_rx_desc {
1317#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ 1316#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
1318#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) 1317#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
1319 1318
1320struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar); 1319int ath10k_htt_attach(struct ath10k *ar);
1321int ath10k_htt_attach_target(struct ath10k_htt *htt); 1320int ath10k_htt_attach_target(struct ath10k_htt *htt);
1322void ath10k_htt_detach(struct ath10k_htt *htt); 1321void ath10k_htt_detach(struct ath10k_htt *htt);
1323 1322
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index de058d7adca8..e784c40b904b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -15,6 +15,7 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#include "core.h"
18#include "htc.h" 19#include "htc.h"
19#include "htt.h" 20#include "htt.h"
20#include "txrx.h" 21#include "txrx.h"
@@ -803,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
803 return false; 804 return false;
804} 805}
805 806
807static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
808{
809 struct htt_rx_desc *rxd;
810 u32 flags, info;
811 bool is_ip4, is_ip6;
812 bool is_tcp, is_udp;
813 bool ip_csum_ok, tcpudp_csum_ok;
814
815 rxd = (void *)skb->data - sizeof(*rxd);
816 flags = __le32_to_cpu(rxd->attention.flags);
817 info = __le32_to_cpu(rxd->msdu_start.info1);
818
819 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
820 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
821 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
822 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
823 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
824 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
825
826 if (!is_ip4 && !is_ip6)
827 return CHECKSUM_NONE;
828 if (!is_tcp && !is_udp)
829 return CHECKSUM_NONE;
830 if (!ip_csum_ok)
831 return CHECKSUM_NONE;
832 if (!tcpudp_csum_ok)
833 return CHECKSUM_NONE;
834
835 return CHECKSUM_UNNECESSARY;
836}
837
806static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 838static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
807 struct htt_rx_indication *rx) 839 struct htt_rx_indication *rx)
808{ 840{
@@ -814,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
814 u8 *fw_desc; 846 u8 *fw_desc;
815 int i, j; 847 int i, j;
816 int ret; 848 int ret;
849 int ip_summed;
817 850
818 memset(&info, 0, sizeof(info)); 851 memset(&info, 0, sizeof(info));
819 852
@@ -888,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
888 continue; 921 continue;
889 } 922 }
890 923
924 /* The skb is not yet processed and it may be
925 * reallocated. Since the offload is in the original
926 * skb extract the checksum now and assign it later */
927 ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
928
891 info.skb = msdu_head; 929 info.skb = msdu_head;
892 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); 930 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
893 info.signal = ATH10K_DEFAULT_NOISE_FLOOR; 931 info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -913,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
913 if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) 951 if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
914 ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); 952 ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
915 953
954 info.skb->ip_summed = ip_summed;
955
916 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", 956 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
917 info.skb->data, info.skb->len); 957 info.skb->data, info.skb->len);
918 ath10k_process_rx(htt->ar, &info); 958 ath10k_process_rx(htt->ar, &info);
@@ -979,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
979 info.status = HTT_RX_IND_MPDU_STATUS_OK; 1019 info.status = HTT_RX_IND_MPDU_STATUS_OK;
980 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1020 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
981 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1021 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1022 info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
982 1023
983 if (tkip_mic_err) { 1024 if (tkip_mic_err) {
984 ath10k_warn("tkip mic error\n"); 1025 ath10k_warn("tkip mic error\n");
@@ -1036,7 +1077,7 @@ end:
1036 1077
1037void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1078void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1038{ 1079{
1039 struct ath10k_htt *htt = ar->htt; 1080 struct ath10k_htt *htt = &ar->htt;
1040 struct htt_resp *resp = (struct htt_resp *)skb->data; 1081 struct htt_resp *resp = (struct htt_resp *)skb->data;
1041 1082
1042 /* confirm alignment */ 1083 /* confirm alignment */
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index ef79106db247..656c2546b294 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -92,7 +92,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
92 92
93 /* At the beginning free queue number should hint us the maximum 93 /* At the beginning free queue number should hint us the maximum
94 * queue length */ 94 * queue length */
95 pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id; 95 pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
96 htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, 96 htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
97 pipe); 97 pipe);
98 98
@@ -153,7 +153,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
153void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 153void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
154{ 154{
155 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 155 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
156 struct ath10k_htt *htt = ar->htt; 156 struct ath10k_htt *htt = &ar->htt;
157 157
158 if (skb_cb->htt.is_conf) { 158 if (skb_cb->htt.is_conf) {
159 dev_kfree_skb_any(skb); 159 dev_kfree_skb_any(skb);
@@ -194,7 +194,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
194 194
195 ATH10K_SKB_CB(skb)->htt.is_conf = true; 195 ATH10K_SKB_CB(skb)->htt.is_conf = true;
196 196
197 ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); 197 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
198 if (ret) { 198 if (ret) {
199 dev_kfree_skb_any(skb); 199 dev_kfree_skb_any(skb);
200 return ret; 200 return ret;
@@ -281,7 +281,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
281 281
282 ATH10K_SKB_CB(skb)->htt.is_conf = true; 282 ATH10K_SKB_CB(skb)->htt.is_conf = true;
283 283
284 ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); 284 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
285 if (ret) { 285 if (ret) {
286 dev_kfree_skb_any(skb); 286 dev_kfree_skb_any(skb);
287 return ret; 287 return ret;
@@ -346,7 +346,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
346 skb_cb->htt.refcount = 2; 346 skb_cb->htt.refcount = 2;
347 skb_cb->htt.msdu = msdu; 347 skb_cb->htt.msdu = msdu;
348 348
349 res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); 349 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
350 if (res) 350 if (res)
351 goto err; 351 goto err;
352 352
@@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
465 flags1 = 0; 465 flags1 = 0;
466 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 466 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
467 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 467 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
468 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
469 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
468 470
469 frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; 471 frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
470 472
@@ -486,7 +488,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
486 skb_cb->htt.txfrag = txfrag; 488 skb_cb->htt.txfrag = txfrag;
487 skb_cb->htt.msdu = msdu; 489 skb_cb->htt.msdu = msdu;
488 490
489 res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); 491 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
490 if (res) 492 if (res)
491 goto err; 493 goto err;
492 494
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index da5c333d0d4b..cf2ba4d850c9 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -20,6 +20,7 @@
20#include <net/mac80211.h> 20#include <net/mac80211.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22 22
23#include "hif.h"
23#include "core.h" 24#include "core.h"
24#include "debug.h" 25#include "debug.h"
25#include "wmi.h" 26#include "wmi.h"
@@ -43,6 +44,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
43 .macaddr = macaddr, 44 .macaddr = macaddr,
44 }; 45 };
45 46
47 lockdep_assert_held(&arvif->ar->conf_mutex);
48
46 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 49 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
47 arg.key_flags = WMI_KEY_PAIRWISE; 50 arg.key_flags = WMI_KEY_PAIRWISE;
48 else 51 else
@@ -87,6 +90,8 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
87 struct ath10k *ar = arvif->ar; 90 struct ath10k *ar = arvif->ar;
88 int ret; 91 int ret;
89 92
93 lockdep_assert_held(&ar->conf_mutex);
94
90 INIT_COMPLETION(ar->install_key_done); 95 INIT_COMPLETION(ar->install_key_done);
91 96
92 ret = ath10k_send_key(arvif, key, cmd, macaddr); 97 ret = ath10k_send_key(arvif, key, cmd, macaddr);
@@ -327,6 +332,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
327 return 0; 332 return 0;
328} 333}
329 334
335static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
336{
337 if (value != 0xFFFFFFFF)
338 value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
339 ATH10K_RTS_MAX);
340
341 return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
342 WMI_VDEV_PARAM_RTS_THRESHOLD,
343 value);
344}
345
346static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
347{
348 if (value != 0xFFFFFFFF)
349 value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
350 ATH10K_FRAGMT_THRESHOLD_MIN,
351 ATH10K_FRAGMT_THRESHOLD_MAX);
352
353 return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
354 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
355 value);
356}
357
330static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 358static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
331{ 359{
332 int ret; 360 int ret;
@@ -364,6 +392,20 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
364 spin_unlock_bh(&ar->data_lock); 392 spin_unlock_bh(&ar->data_lock);
365} 393}
366 394
395static void ath10k_peer_cleanup_all(struct ath10k *ar)
396{
397 struct ath10k_peer *peer, *tmp;
398
399 lockdep_assert_held(&ar->conf_mutex);
400
401 spin_lock_bh(&ar->data_lock);
402 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
403 list_del(&peer->list);
404 kfree(peer);
405 }
406 spin_unlock_bh(&ar->data_lock);
407}
408
367/************************/ 409/************************/
368/* Interface management */ 410/* Interface management */
369/************************/ 411/************************/
@@ -372,6 +414,8 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
372{ 414{
373 int ret; 415 int ret;
374 416
417 lockdep_assert_held(&ar->conf_mutex);
418
375 ret = wait_for_completion_timeout(&ar->vdev_setup_done, 419 ret = wait_for_completion_timeout(&ar->vdev_setup_done,
376 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 420 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
377 if (ret == 0) 421 if (ret == 0)
@@ -605,6 +649,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
605{ 649{
606 int ret = 0; 650 int ret = 0;
607 651
652 lockdep_assert_held(&arvif->ar->conf_mutex);
653
608 if (!info->enable_beacon) { 654 if (!info->enable_beacon) {
609 ath10k_vdev_stop(arvif); 655 ath10k_vdev_stop(arvif);
610 return; 656 return;
@@ -631,6 +677,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
631{ 677{
632 int ret = 0; 678 int ret = 0;
633 679
680 lockdep_assert_held(&arvif->ar->conf_mutex);
681
634 if (!info->ibss_joined) { 682 if (!info->ibss_joined) {
635 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); 683 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
636 if (ret) 684 if (ret)
@@ -680,6 +728,8 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
680 enum wmi_sta_ps_mode psmode; 728 enum wmi_sta_ps_mode psmode;
681 int ret; 729 int ret;
682 730
731 lockdep_assert_held(&arvif->ar->conf_mutex);
732
683 if (vif->type != NL80211_IFTYPE_STATION) 733 if (vif->type != NL80211_IFTYPE_STATION)
684 return; 734 return;
685 735
@@ -722,6 +772,8 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
722 struct ieee80211_bss_conf *bss_conf, 772 struct ieee80211_bss_conf *bss_conf,
723 struct wmi_peer_assoc_complete_arg *arg) 773 struct wmi_peer_assoc_complete_arg *arg)
724{ 774{
775 lockdep_assert_held(&ar->conf_mutex);
776
725 memcpy(arg->addr, sta->addr, ETH_ALEN); 777 memcpy(arg->addr, sta->addr, ETH_ALEN);
726 arg->vdev_id = arvif->vdev_id; 778 arg->vdev_id = arvif->vdev_id;
727 arg->peer_aid = sta->aid; 779 arg->peer_aid = sta->aid;
@@ -764,6 +816,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
764 const u8 *rsnie = NULL; 816 const u8 *rsnie = NULL;
765 const u8 *wpaie = NULL; 817 const u8 *wpaie = NULL;
766 818
819 lockdep_assert_held(&ar->conf_mutex);
820
767 bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan, 821 bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
768 info->bssid, NULL, 0, 0, 0); 822 info->bssid, NULL, 0, 0, 0);
769 if (bss) { 823 if (bss) {
@@ -804,6 +858,8 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
804 u32 ratemask; 858 u32 ratemask;
805 int i; 859 int i;
806 860
861 lockdep_assert_held(&ar->conf_mutex);
862
807 sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; 863 sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
808 ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band]; 864 ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
809 rates = sband->bitrates; 865 rates = sband->bitrates;
@@ -827,6 +883,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
827 int smps; 883 int smps;
828 int i, n; 884 int i, n;
829 885
886 lockdep_assert_held(&ar->conf_mutex);
887
830 if (!ht_cap->ht_supported) 888 if (!ht_cap->ht_supported)
831 return; 889 return;
832 890
@@ -905,6 +963,8 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
905 u32 uapsd = 0; 963 u32 uapsd = 0;
906 u32 max_sp = 0; 964 u32 max_sp = 0;
907 965
966 lockdep_assert_held(&ar->conf_mutex);
967
908 if (sta->wme) 968 if (sta->wme)
909 arg->peer_flags |= WMI_PEER_QOS; 969 arg->peer_flags |= WMI_PEER_QOS;
910 970
@@ -1056,6 +1116,8 @@ static int ath10k_peer_assoc(struct ath10k *ar,
1056{ 1116{
1057 struct wmi_peer_assoc_complete_arg arg; 1117 struct wmi_peer_assoc_complete_arg arg;
1058 1118
1119 lockdep_assert_held(&ar->conf_mutex);
1120
1059 memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); 1121 memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
1060 1122
1061 ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); 1123 ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
@@ -1079,6 +1141,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1079 struct ieee80211_sta *ap_sta; 1141 struct ieee80211_sta *ap_sta;
1080 int ret; 1142 int ret;
1081 1143
1144 lockdep_assert_held(&ar->conf_mutex);
1145
1082 rcu_read_lock(); 1146 rcu_read_lock();
1083 1147
1084 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 1148 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -1119,6 +1183,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1119 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1183 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1120 int ret; 1184 int ret;
1121 1185
1186 lockdep_assert_held(&ar->conf_mutex);
1187
1122 /* 1188 /*
1123 * For some reason, calling VDEV-DOWN before VDEV-STOP 1189 * For some reason, calling VDEV-DOWN before VDEV-STOP
1124 * makes the FW to send frames via HTT after disassociation. 1190 * makes the FW to send frames via HTT after disassociation.
@@ -1152,6 +1218,8 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1152{ 1218{
1153 int ret = 0; 1219 int ret = 0;
1154 1220
1221 lockdep_assert_held(&ar->conf_mutex);
1222
1155 ret = ath10k_peer_assoc(ar, arvif, sta, NULL); 1223 ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
1156 if (ret) { 1224 if (ret) {
1157 ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); 1225 ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
@@ -1172,6 +1240,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1172{ 1240{
1173 int ret = 0; 1241 int ret = 0;
1174 1242
1243 lockdep_assert_held(&ar->conf_mutex);
1244
1175 ret = ath10k_clear_peer_keys(arvif, sta->addr); 1245 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1176 if (ret) { 1246 if (ret) {
1177 ath10k_warn("could not clear all peer wep keys (%d)\n", ret); 1247 ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
@@ -1198,6 +1268,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
1198 int ret; 1268 int ret;
1199 int i; 1269 int i;
1200 1270
1271 lockdep_assert_held(&ar->conf_mutex);
1272
1201 bands = hw->wiphy->bands; 1273 bands = hw->wiphy->bands;
1202 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1274 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1203 if (!bands[band]) 1275 if (!bands[band])
@@ -1276,21 +1348,19 @@ static int ath10k_update_channel_list(struct ath10k *ar)
1276 return ret; 1348 return ret;
1277} 1349}
1278 1350
1279static void ath10k_reg_notifier(struct wiphy *wiphy, 1351static void ath10k_regd_update(struct ath10k *ar)
1280 struct regulatory_request *request)
1281{ 1352{
1282 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1283 struct reg_dmn_pair_mapping *regpair; 1353 struct reg_dmn_pair_mapping *regpair;
1284 struct ath10k *ar = hw->priv;
1285 int ret; 1354 int ret;
1286 1355
1287 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 1356 lockdep_assert_held(&ar->conf_mutex);
1288 1357
1289 ret = ath10k_update_channel_list(ar); 1358 ret = ath10k_update_channel_list(ar);
1290 if (ret) 1359 if (ret)
1291 ath10k_warn("could not update channel list (%d)\n", ret); 1360 ath10k_warn("could not update channel list (%d)\n", ret);
1292 1361
1293 regpair = ar->ath_common.regulatory.regpair; 1362 regpair = ar->ath_common.regulatory.regpair;
1363
1294 /* Target allows setting up per-band regdomain but ath_common provides 1364 /* Target allows setting up per-band regdomain but ath_common provides
1295 * a combined one only */ 1365 * a combined one only */
1296 ret = ath10k_wmi_pdev_set_regdomain(ar, 1366 ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1303,6 +1373,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
1303 ath10k_warn("could not set pdev regdomain (%d)\n", ret); 1373 ath10k_warn("could not set pdev regdomain (%d)\n", ret);
1304} 1374}
1305 1375
1376static void ath10k_reg_notifier(struct wiphy *wiphy,
1377 struct regulatory_request *request)
1378{
1379 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1380 struct ath10k *ar = hw->priv;
1381
1382 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
1383
1384 mutex_lock(&ar->conf_mutex);
1385 if (ar->state == ATH10K_STATE_ON)
1386 ath10k_regd_update(ar);
1387 mutex_unlock(&ar->conf_mutex);
1388}
1389
1306/***************/ 1390/***************/
1307/* TX handlers */ 1391/* TX handlers */
1308/***************/ 1392/***************/
@@ -1322,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
1322 return; 1406 return;
1323 1407
1324 qos_ctl = ieee80211_get_qos_ctl(hdr); 1408 qos_ctl = ieee80211_get_qos_ctl(hdr);
1325 memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN, 1409 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
1326 skb->len - ieee80211_hdrlen(hdr->frame_control)); 1410 skb->data, (void *)qos_ctl - (void *)skb->data);
1327 skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN); 1411 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
1328} 1412}
1329 1413
1330static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) 1414static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1397,15 +1481,15 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1397 int ret; 1481 int ret;
1398 1482
1399 if (ieee80211_is_mgmt(hdr->frame_control)) 1483 if (ieee80211_is_mgmt(hdr->frame_control))
1400 ret = ath10k_htt_mgmt_tx(ar->htt, skb); 1484 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
1401 else if (ieee80211_is_nullfunc(hdr->frame_control)) 1485 else if (ieee80211_is_nullfunc(hdr->frame_control))
1402 /* FW does not report tx status properly for NullFunc frames 1486 /* FW does not report tx status properly for NullFunc frames
1403 * unless they are sent through mgmt tx path. mac80211 sends 1487 * unless they are sent through mgmt tx path. mac80211 sends
1404 * those frames when it detects link/beacon loss and depends on 1488 * those frames when it detects link/beacon loss and depends on
1405 * the tx status to be correct. */ 1489 * the tx status to be correct. */
1406 ret = ath10k_htt_mgmt_tx(ar->htt, skb); 1490 ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
1407 else 1491 else
1408 ret = ath10k_htt_tx(ar->htt, skb); 1492 ret = ath10k_htt_tx(&ar->htt, skb);
1409 1493
1410 if (ret) { 1494 if (ret) {
1411 ath10k_warn("tx failed (%d). dropping packet.\n", ret); 1495 ath10k_warn("tx failed (%d). dropping packet.\n", ret);
@@ -1552,6 +1636,10 @@ static int ath10k_abort_scan(struct ath10k *ar)
1552 ret = ath10k_wmi_stop_scan(ar, &arg); 1636 ret = ath10k_wmi_stop_scan(ar, &arg);
1553 if (ret) { 1637 if (ret) {
1554 ath10k_warn("could not submit wmi stop scan (%d)\n", ret); 1638 ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
1639 spin_lock_bh(&ar->data_lock);
1640 ar->scan.in_progress = false;
1641 ath10k_offchan_tx_purge(ar);
1642 spin_unlock_bh(&ar->data_lock);
1555 return -EIO; 1643 return -EIO;
1556 } 1644 }
1557 1645
@@ -1645,10 +1733,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1645 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 1733 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1646 } 1734 }
1647 1735
1648 ath10k_tx_h_qos_workaround(hw, control, skb); 1736 /* it makes no sense to process injected frames like that */
1649 ath10k_tx_h_update_wep_key(skb); 1737 if (info->control.vif &&
1650 ath10k_tx_h_add_p2p_noa_ie(ar, skb); 1738 info->control.vif->type != NL80211_IFTYPE_MONITOR) {
1651 ath10k_tx_h_seq_no(skb); 1739 ath10k_tx_h_qos_workaround(hw, control, skb);
1740 ath10k_tx_h_update_wep_key(skb);
1741 ath10k_tx_h_add_p2p_noa_ie(ar, skb);
1742 ath10k_tx_h_seq_no(skb);
1743 }
1652 1744
1653 memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); 1745 memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
1654 ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; 1746 ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
@@ -1673,10 +1765,57 @@ static void ath10k_tx(struct ieee80211_hw *hw,
1673/* 1765/*
1674 * Initialize various parameters with default vaules. 1766 * Initialize various parameters with default vaules.
1675 */ 1767 */
1768void ath10k_halt(struct ath10k *ar)
1769{
1770 lockdep_assert_held(&ar->conf_mutex);
1771
1772 del_timer_sync(&ar->scan.timeout);
1773 ath10k_offchan_tx_purge(ar);
1774 ath10k_peer_cleanup_all(ar);
1775 ath10k_core_stop(ar);
1776 ath10k_hif_power_down(ar);
1777
1778 spin_lock_bh(&ar->data_lock);
1779 if (ar->scan.in_progress) {
1780 del_timer(&ar->scan.timeout);
1781 ar->scan.in_progress = false;
1782 ieee80211_scan_completed(ar->hw, true);
1783 }
1784 spin_unlock_bh(&ar->data_lock);
1785}
1786
1676static int ath10k_start(struct ieee80211_hw *hw) 1787static int ath10k_start(struct ieee80211_hw *hw)
1677{ 1788{
1678 struct ath10k *ar = hw->priv; 1789 struct ath10k *ar = hw->priv;
1679 int ret; 1790 int ret = 0;
1791
1792 mutex_lock(&ar->conf_mutex);
1793
1794 if (ar->state != ATH10K_STATE_OFF &&
1795 ar->state != ATH10K_STATE_RESTARTING) {
1796 ret = -EINVAL;
1797 goto exit;
1798 }
1799
1800 ret = ath10k_hif_power_up(ar);
1801 if (ret) {
1802 ath10k_err("could not init hif (%d)\n", ret);
1803 ar->state = ATH10K_STATE_OFF;
1804 goto exit;
1805 }
1806
1807 ret = ath10k_core_start(ar);
1808 if (ret) {
1809 ath10k_err("could not init core (%d)\n", ret);
1810 ath10k_hif_power_down(ar);
1811 ar->state = ATH10K_STATE_OFF;
1812 goto exit;
1813 }
1814
1815 if (ar->state == ATH10K_STATE_OFF)
1816 ar->state = ATH10K_STATE_ON;
1817 else if (ar->state == ATH10K_STATE_RESTARTING)
1818 ar->state = ATH10K_STATE_RESTARTED;
1680 1819
1681 ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); 1820 ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
1682 if (ret) 1821 if (ret)
@@ -1688,6 +1827,10 @@ static int ath10k_start(struct ieee80211_hw *hw)
1688 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", 1827 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
1689 ret); 1828 ret);
1690 1829
1830 ath10k_regd_update(ar);
1831
1832exit:
1833 mutex_unlock(&ar->conf_mutex);
1691 return 0; 1834 return 0;
1692} 1835}
1693 1836
@@ -1695,18 +1838,48 @@ static void ath10k_stop(struct ieee80211_hw *hw)
1695{ 1838{
1696 struct ath10k *ar = hw->priv; 1839 struct ath10k *ar = hw->priv;
1697 1840
1698 /* avoid leaks in case FW never confirms scan for offchannel */ 1841 mutex_lock(&ar->conf_mutex);
1842 if (ar->state == ATH10K_STATE_ON ||
1843 ar->state == ATH10K_STATE_RESTARTED ||
1844 ar->state == ATH10K_STATE_WEDGED)
1845 ath10k_halt(ar);
1846
1847 ar->state = ATH10K_STATE_OFF;
1848 mutex_unlock(&ar->conf_mutex);
1849
1699 cancel_work_sync(&ar->offchan_tx_work); 1850 cancel_work_sync(&ar->offchan_tx_work);
1700 ath10k_offchan_tx_purge(ar); 1851 cancel_work_sync(&ar->restart_work);
1701} 1852}
1702 1853
1703static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 1854static void ath10k_config_ps(struct ath10k *ar)
1704{ 1855{
1705 struct ath10k_generic_iter ar_iter; 1856 struct ath10k_generic_iter ar_iter;
1857
1858 lockdep_assert_held(&ar->conf_mutex);
1859
1860 /* During HW reconfiguration mac80211 reports all interfaces that were
1861 * running until reconfiguration was started. Since FW doesn't have any
1862 * vdevs at this point we must not iterate over this interface list.
1863 * This setting will be updated upon add_interface(). */
1864 if (ar->state == ATH10K_STATE_RESTARTED)
1865 return;
1866
1867 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
1868 ar_iter.ar = ar;
1869
1870 ieee80211_iterate_active_interfaces_atomic(
1871 ar->hw, IEEE80211_IFACE_ITER_NORMAL,
1872 ath10k_ps_iter, &ar_iter);
1873
1874 if (ar_iter.ret)
1875 ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
1876}
1877
1878static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
1879{
1706 struct ath10k *ar = hw->priv; 1880 struct ath10k *ar = hw->priv;
1707 struct ieee80211_conf *conf = &hw->conf; 1881 struct ieee80211_conf *conf = &hw->conf;
1708 int ret = 0; 1882 int ret = 0;
1709 u32 flags;
1710 1883
1711 mutex_lock(&ar->conf_mutex); 1884 mutex_lock(&ar->conf_mutex);
1712 1885
@@ -1718,18 +1891,8 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
1718 spin_unlock_bh(&ar->data_lock); 1891 spin_unlock_bh(&ar->data_lock);
1719 } 1892 }
1720 1893
1721 if (changed & IEEE80211_CONF_CHANGE_PS) { 1894 if (changed & IEEE80211_CONF_CHANGE_PS)
1722 memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); 1895 ath10k_config_ps(ar);
1723 ar_iter.ar = ar;
1724 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
1725
1726 ieee80211_iterate_active_interfaces_atomic(hw,
1727 flags,
1728 ath10k_ps_iter,
1729 &ar_iter);
1730
1731 ret = ar_iter.ret;
1732 }
1733 1896
1734 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1897 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1735 if (conf->flags & IEEE80211_CONF_MONITOR) 1898 if (conf->flags & IEEE80211_CONF_MONITOR)
@@ -1738,6 +1901,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
1738 ret = ath10k_monitor_destroy(ar); 1901 ret = ath10k_monitor_destroy(ar);
1739 } 1902 }
1740 1903
1904 ath10k_wmi_flush_tx(ar);
1741 mutex_unlock(&ar->conf_mutex); 1905 mutex_unlock(&ar->conf_mutex);
1742 return ret; 1906 return ret;
1743} 1907}
@@ -1761,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1761 1925
1762 mutex_lock(&ar->conf_mutex); 1926 mutex_lock(&ar->conf_mutex);
1763 1927
1928 memset(arvif, 0, sizeof(*arvif));
1929
1764 arvif->ar = ar; 1930 arvif->ar = ar;
1765 arvif->vif = vif; 1931 arvif->vif = vif;
1766 1932
@@ -1859,6 +2025,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1859 ath10k_warn("Failed to set PSPOLL count: %d\n", ret); 2025 ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
1860 } 2026 }
1861 2027
2028 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
2029 if (ret)
2030 ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
2031 arvif->vdev_id, ret);
2032
2033 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
2034 if (ret)
2035 ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
2036 arvif->vdev_id, ret);
2037
1862 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) 2038 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
1863 ar->monitor_present = true; 2039 ar->monitor_present = true;
1864 2040
@@ -2164,6 +2340,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
2164 arg.ssids[i].len = req->ssids[i].ssid_len; 2340 arg.ssids[i].len = req->ssids[i].ssid_len;
2165 arg.ssids[i].ssid = req->ssids[i].ssid; 2341 arg.ssids[i].ssid = req->ssids[i].ssid;
2166 } 2342 }
2343 } else {
2344 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
2167 } 2345 }
2168 2346
2169 if (req->n_channels) { 2347 if (req->n_channels) {
@@ -2363,6 +2541,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
2363 u32 value = 0; 2541 u32 value = 0;
2364 int ret = 0; 2542 int ret = 0;
2365 2543
2544 lockdep_assert_held(&ar->conf_mutex);
2545
2366 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 2546 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
2367 return 0; 2547 return 0;
2368 2548
@@ -2558,11 +2738,16 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2558 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2738 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2559 u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; 2739 u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
2560 2740
2561 rts = min_t(u32, rts, ATH10K_RTS_MAX); 2741 lockdep_assert_held(&arvif->ar->conf_mutex);
2562 2742
2563 ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, 2743 /* During HW reconfiguration mac80211 reports all interfaces that were
2564 WMI_VDEV_PARAM_RTS_THRESHOLD, 2744 * running until reconfiguration was started. Since FW doesn't have any
2565 rts); 2745 * vdevs at this point we must not iterate over this interface list.
2746 * This setting will be updated upon add_interface(). */
2747 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
2748 return;
2749
2750 ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
2566 if (ar_iter->ret) 2751 if (ar_iter->ret)
2567 ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", 2752 ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
2568 arvif->vdev_id); 2753 arvif->vdev_id);
@@ -2581,8 +2766,9 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2581 ar_iter.ar = ar; 2766 ar_iter.ar = ar;
2582 2767
2583 mutex_lock(&ar->conf_mutex); 2768 mutex_lock(&ar->conf_mutex);
2584 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, 2769 ieee80211_iterate_active_interfaces_atomic(
2585 ath10k_set_rts_iter, &ar_iter); 2770 hw, IEEE80211_IFACE_ITER_NORMAL,
2771 ath10k_set_rts_iter, &ar_iter);
2586 mutex_unlock(&ar->conf_mutex); 2772 mutex_unlock(&ar->conf_mutex);
2587 2773
2588 return ar_iter.ret; 2774 return ar_iter.ret;
@@ -2593,17 +2779,17 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2593 struct ath10k_generic_iter *ar_iter = data; 2779 struct ath10k_generic_iter *ar_iter = data;
2594 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2780 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2595 u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; 2781 u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
2596 int ret;
2597 2782
2598 frag = clamp_t(u32, frag, 2783 lockdep_assert_held(&arvif->ar->conf_mutex);
2599 ATH10K_FRAGMT_THRESHOLD_MIN,
2600 ATH10K_FRAGMT_THRESHOLD_MAX);
2601 2784
2602 ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, 2785 /* During HW reconfiguration mac80211 reports all interfaces that were
2603 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 2786 * running until reconfiguration was started. Since FW doesn't have any
2604 frag); 2787 * vdevs at this point we must not iterate over this interface list.
2788 * This setting will be updated upon add_interface(). */
2789 if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
2790 return;
2605 2791
2606 ar_iter->ret = ret; 2792 ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
2607 if (ar_iter->ret) 2793 if (ar_iter->ret)
2608 ath10k_warn("Failed to set frag threshold for VDEV: %d\n", 2794 ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
2609 arvif->vdev_id); 2795 arvif->vdev_id);
@@ -2622,8 +2808,9 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2622 ar_iter.ar = ar; 2808 ar_iter.ar = ar;
2623 2809
2624 mutex_lock(&ar->conf_mutex); 2810 mutex_lock(&ar->conf_mutex);
2625 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, 2811 ieee80211_iterate_active_interfaces_atomic(
2626 ath10k_set_frag_iter, &ar_iter); 2812 hw, IEEE80211_IFACE_ITER_NORMAL,
2813 ath10k_set_frag_iter, &ar_iter);
2627 mutex_unlock(&ar->conf_mutex); 2814 mutex_unlock(&ar->conf_mutex);
2628 2815
2629 return ar_iter.ret; 2816 return ar_iter.ret;
@@ -2632,6 +2819,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2632static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 2819static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
2633{ 2820{
2634 struct ath10k *ar = hw->priv; 2821 struct ath10k *ar = hw->priv;
2822 bool skip;
2635 int ret; 2823 int ret;
2636 2824
2637 /* mac80211 doesn't care if we really xmit queued frames or not 2825 /* mac80211 doesn't care if we really xmit queued frames or not
@@ -2639,16 +2827,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
2639 if (drop) 2827 if (drop)
2640 return; 2828 return;
2641 2829
2642 ret = wait_event_timeout(ar->htt->empty_tx_wq, ({ 2830 mutex_lock(&ar->conf_mutex);
2831
2832 if (ar->state == ATH10K_STATE_WEDGED)
2833 goto skip;
2834
2835 ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
2643 bool empty; 2836 bool empty;
2644 spin_lock_bh(&ar->htt->tx_lock); 2837
2645 empty = bitmap_empty(ar->htt->used_msdu_ids, 2838 spin_lock_bh(&ar->htt.tx_lock);
2646 ar->htt->max_num_pending_tx); 2839 empty = bitmap_empty(ar->htt.used_msdu_ids,
2647 spin_unlock_bh(&ar->htt->tx_lock); 2840 ar->htt.max_num_pending_tx);
2648 (empty); 2841 spin_unlock_bh(&ar->htt.tx_lock);
2842
2843 skip = (ar->state == ATH10K_STATE_WEDGED);
2844
2845 (empty || skip);
2649 }), ATH10K_FLUSH_TIMEOUT_HZ); 2846 }), ATH10K_FLUSH_TIMEOUT_HZ);
2650 if (ret <= 0) 2847
2848 if (ret <= 0 || skip)
2651 ath10k_warn("tx not flushed\n"); 2849 ath10k_warn("tx not flushed\n");
2850
2851skip:
2852 mutex_unlock(&ar->conf_mutex);
2652} 2853}
2653 2854
2654/* TODO: Implement this function properly 2855/* TODO: Implement this function properly
@@ -2660,6 +2861,118 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
2660 return 1; 2861 return 1;
2661} 2862}
2662 2863
2864#ifdef CONFIG_PM
2865static int ath10k_suspend(struct ieee80211_hw *hw,
2866 struct cfg80211_wowlan *wowlan)
2867{
2868 struct ath10k *ar = hw->priv;
2869 int ret;
2870
2871 ar->is_target_paused = false;
2872
2873 ret = ath10k_wmi_pdev_suspend_target(ar);
2874 if (ret) {
2875 ath10k_warn("could not suspend target (%d)\n", ret);
2876 return 1;
2877 }
2878
2879 ret = wait_event_interruptible_timeout(ar->event_queue,
2880 ar->is_target_paused == true,
2881 1 * HZ);
2882 if (ret < 0) {
2883 ath10k_warn("suspend interrupted (%d)\n", ret);
2884 goto resume;
2885 } else if (ret == 0) {
2886 ath10k_warn("suspend timed out - target pause event never came\n");
2887 goto resume;
2888 }
2889
2890 ret = ath10k_hif_suspend(ar);
2891 if (ret) {
2892 ath10k_warn("could not suspend hif (%d)\n", ret);
2893 goto resume;
2894 }
2895
2896 return 0;
2897resume:
2898 ret = ath10k_wmi_pdev_resume_target(ar);
2899 if (ret)
2900 ath10k_warn("could not resume target (%d)\n", ret);
2901 return 1;
2902}
2903
2904static int ath10k_resume(struct ieee80211_hw *hw)
2905{
2906 struct ath10k *ar = hw->priv;
2907 int ret;
2908
2909 ret = ath10k_hif_resume(ar);
2910 if (ret) {
2911 ath10k_warn("could not resume hif (%d)\n", ret);
2912 return 1;
2913 }
2914
2915 ret = ath10k_wmi_pdev_resume_target(ar);
2916 if (ret) {
2917 ath10k_warn("could not resume target (%d)\n", ret);
2918 return 1;
2919 }
2920
2921 return 0;
2922}
2923#endif
2924
2925static void ath10k_restart_complete(struct ieee80211_hw *hw)
2926{
2927 struct ath10k *ar = hw->priv;
2928
2929 mutex_lock(&ar->conf_mutex);
2930
2931 /* If device failed to restart it will be in a different state, e.g.
2932 * ATH10K_STATE_WEDGED */
2933 if (ar->state == ATH10K_STATE_RESTARTED) {
2934 ath10k_info("device successfully recovered\n");
2935 ar->state = ATH10K_STATE_ON;
2936 }
2937
2938 mutex_unlock(&ar->conf_mutex);
2939}
2940
2941static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
2942 struct survey_info *survey)
2943{
2944 struct ath10k *ar = hw->priv;
2945 struct ieee80211_supported_band *sband;
2946 struct survey_info *ar_survey = &ar->survey[idx];
2947 int ret = 0;
2948
2949 mutex_lock(&ar->conf_mutex);
2950
2951 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
2952 if (sband && idx >= sband->n_channels) {
2953 idx -= sband->n_channels;
2954 sband = NULL;
2955 }
2956
2957 if (!sband)
2958 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
2959
2960 if (!sband || idx >= sband->n_channels) {
2961 ret = -ENOENT;
2962 goto exit;
2963 }
2964
2965 spin_lock_bh(&ar->data_lock);
2966 memcpy(survey, ar_survey, sizeof(*survey));
2967 spin_unlock_bh(&ar->data_lock);
2968
2969 survey->channel = &sband->channels[idx];
2970
2971exit:
2972 mutex_unlock(&ar->conf_mutex);
2973 return ret;
2974}
2975
2663static const struct ieee80211_ops ath10k_ops = { 2976static const struct ieee80211_ops ath10k_ops = {
2664 .tx = ath10k_tx, 2977 .tx = ath10k_tx,
2665 .start = ath10k_start, 2978 .start = ath10k_start,
@@ -2680,6 +2993,12 @@ static const struct ieee80211_ops ath10k_ops = {
2680 .set_frag_threshold = ath10k_set_frag_threshold, 2993 .set_frag_threshold = ath10k_set_frag_threshold,
2681 .flush = ath10k_flush, 2994 .flush = ath10k_flush,
2682 .tx_last_beacon = ath10k_tx_last_beacon, 2995 .tx_last_beacon = ath10k_tx_last_beacon,
2996 .restart_complete = ath10k_restart_complete,
2997 .get_survey = ath10k_get_survey,
2998#ifdef CONFIG_PM
2999 .suspend = ath10k_suspend,
3000 .resume = ath10k_resume,
3001#endif
2683}; 3002};
2684 3003
2685#define RATETAB_ENT(_rate, _rateid, _flags) { \ 3004#define RATETAB_ENT(_rate, _rateid, _flags) { \
@@ -2797,9 +3116,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
2797 .max = 8, 3116 .max = 8,
2798 .types = BIT(NL80211_IFTYPE_STATION) 3117 .types = BIT(NL80211_IFTYPE_STATION)
2799 | BIT(NL80211_IFTYPE_P2P_CLIENT) 3118 | BIT(NL80211_IFTYPE_P2P_CLIENT)
2800 | BIT(NL80211_IFTYPE_P2P_GO) 3119 },
2801 | BIT(NL80211_IFTYPE_AP) 3120 {
2802 } 3121 .max = 3,
3122 .types = BIT(NL80211_IFTYPE_P2P_GO)
3123 },
3124 {
3125 .max = 7,
3126 .types = BIT(NL80211_IFTYPE_AP)
3127 },
2803}; 3128};
2804 3129
2805static const struct ieee80211_iface_combination ath10k_if_comb = { 3130static const struct ieee80211_iface_combination ath10k_if_comb = {
@@ -2814,19 +3139,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
2814{ 3139{
2815 struct ieee80211_sta_vht_cap vht_cap = {0}; 3140 struct ieee80211_sta_vht_cap vht_cap = {0};
2816 u16 mcs_map; 3141 u16 mcs_map;
3142 int i;
2817 3143
2818 vht_cap.vht_supported = 1; 3144 vht_cap.vht_supported = 1;
2819 vht_cap.cap = ar->vht_cap_info; 3145 vht_cap.cap = ar->vht_cap_info;
2820 3146
2821 /* FIXME: check dynamically how many streams board supports */ 3147 mcs_map = 0;
2822 mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | 3148 for (i = 0; i < 8; i++) {
2823 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | 3149 if (i < ar->num_rf_chains)
2824 IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | 3150 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
2825 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 3151 else
2826 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 3152 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
2827 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 3153 }
2828 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
2829 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
2830 3154
2831 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 3155 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
2832 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 3156 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@@ -2889,7 +3213,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
2889 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 3213 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
2890 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 3214 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
2891 3215
2892 for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++) 3216 for (i = 0; i < ar->num_rf_chains; i++)
2893 ht_cap.mcs.rx_mask[i] = 0xFF; 3217 ht_cap.mcs.rx_mask[i] = 0xFF;
2894 3218
2895 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 3219 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
@@ -2948,8 +3272,10 @@ int ath10k_mac_register(struct ath10k *ar)
2948 channels = kmemdup(ath10k_2ghz_channels, 3272 channels = kmemdup(ath10k_2ghz_channels,
2949 sizeof(ath10k_2ghz_channels), 3273 sizeof(ath10k_2ghz_channels),
2950 GFP_KERNEL); 3274 GFP_KERNEL);
2951 if (!channels) 3275 if (!channels) {
2952 return -ENOMEM; 3276 ret = -ENOMEM;
3277 goto err_free;
3278 }
2953 3279
2954 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; 3280 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
2955 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 3281 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
@@ -2968,11 +3294,8 @@ int ath10k_mac_register(struct ath10k *ar)
2968 sizeof(ath10k_5ghz_channels), 3294 sizeof(ath10k_5ghz_channels),
2969 GFP_KERNEL); 3295 GFP_KERNEL);
2970 if (!channels) { 3296 if (!channels) {
2971 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 3297 ret = -ENOMEM;
2972 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; 3298 goto err_free;
2973 kfree(band->channels);
2974 }
2975 return -ENOMEM;
2976 } 3299 }
2977 3300
2978 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; 3301 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
@@ -3032,29 +3355,36 @@ int ath10k_mac_register(struct ath10k *ar)
3032 ar->hw->wiphy->iface_combinations = &ath10k_if_comb; 3355 ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
3033 ar->hw->wiphy->n_iface_combinations = 1; 3356 ar->hw->wiphy->n_iface_combinations = 1;
3034 3357
3358 ar->hw->netdev_features = NETIF_F_HW_CSUM;
3359
3035 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 3360 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
3036 ath10k_reg_notifier); 3361 ath10k_reg_notifier);
3037 if (ret) { 3362 if (ret) {
3038 ath10k_err("Regulatory initialization failed\n"); 3363 ath10k_err("Regulatory initialization failed\n");
3039 return ret; 3364 goto err_free;
3040 } 3365 }
3041 3366
3042 ret = ieee80211_register_hw(ar->hw); 3367 ret = ieee80211_register_hw(ar->hw);
3043 if (ret) { 3368 if (ret) {
3044 ath10k_err("ieee80211 registration failed: %d\n", ret); 3369 ath10k_err("ieee80211 registration failed: %d\n", ret);
3045 return ret; 3370 goto err_free;
3046 } 3371 }
3047 3372
3048 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 3373 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
3049 ret = regulatory_hint(ar->hw->wiphy, 3374 ret = regulatory_hint(ar->hw->wiphy,
3050 ar->ath_common.regulatory.alpha2); 3375 ar->ath_common.regulatory.alpha2);
3051 if (ret) 3376 if (ret)
3052 goto exit; 3377 goto err_unregister;
3053 } 3378 }
3054 3379
3055 return 0; 3380 return 0;
3056exit: 3381
3382err_unregister:
3057 ieee80211_unregister_hw(ar->hw); 3383 ieee80211_unregister_hw(ar->hw);
3384err_free:
3385 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
3386 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
3387
3058 return ret; 3388 return ret;
3059} 3389}
3060 3390
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 27fc92e58829..6fce9bfb19a5 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
34void ath10k_reset_scan(unsigned long ptr); 34void ath10k_reset_scan(unsigned long ptr);
35void ath10k_offchan_tx_purge(struct ath10k *ar); 35void ath10k_offchan_tx_purge(struct ath10k *ar);
36void ath10k_offchan_tx_work(struct work_struct *work); 36void ath10k_offchan_tx_work(struct work_struct *work);
37void ath10k_halt(struct ath10k *ar);
37 38
38static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) 39static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
39{ 40{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 33af4672c909..e2f9ef50b1bd 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -32,7 +32,7 @@
32#include "ce.h" 32#include "ce.h"
33#include "pci.h" 33#include "pci.h"
34 34
35unsigned int ath10k_target_ps; 35static unsigned int ath10k_target_ps;
36module_param(ath10k_target_ps, uint, 0644); 36module_param(ath10k_target_ps, uint, 0644);
37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); 37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38 38
@@ -54,6 +54,10 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54 int num); 54 int num);
55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); 55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56static void ath10k_pci_stop_ce(struct ath10k *ar); 56static void ath10k_pci_stop_ce(struct ath10k *ar);
57static void ath10k_pci_device_reset(struct ath10k *ar);
58static int ath10k_pci_reset_target(struct ath10k *ar);
59static int ath10k_pci_start_intr(struct ath10k *ar);
60static void ath10k_pci_stop_intr(struct ath10k *ar);
57 61
58static const struct ce_attr host_ce_config_wlan[] = { 62static const struct ce_attr host_ce_config_wlan[] = {
59 /* host->target HTC control and raw streams */ 63 /* host->target HTC control and raw streams */
@@ -718,6 +722,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
718 reg_dump_values[i + 1], 722 reg_dump_values[i + 1],
719 reg_dump_values[i + 2], 723 reg_dump_values[i + 2],
720 reg_dump_values[i + 3]); 724 reg_dump_values[i + 3]);
725
726 ieee80211_queue_work(ar->hw, &ar->restart_work);
721} 727}
722 728
723static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 729static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -744,8 +750,8 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
744 ath10k_ce_per_engine_service(ar, pipe); 750 ath10k_ce_per_engine_service(ar, pipe);
745} 751}
746 752
747static void ath10k_pci_hif_post_init(struct ath10k *ar, 753static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
748 struct ath10k_hif_cb *callbacks) 754 struct ath10k_hif_cb *callbacks)
749{ 755{
750 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
751 757
@@ -1250,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
1250 } 1256 }
1251} 1257}
1252 1258
1259static void ath10k_pci_disable_irqs(struct ath10k *ar)
1260{
1261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1262 int i;
1263
1264 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1265 disable_irq(ar_pci->pdev->irq + i);
1266}
1267
1253static void ath10k_pci_hif_stop(struct ath10k *ar) 1268static void ath10k_pci_hif_stop(struct ath10k *ar)
1254{ 1269{
1270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1271
1255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 1272 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1256 1273
1274 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1275 * by ath10k_pci_start_intr(). */
1276 ath10k_pci_disable_irqs(ar);
1277
1257 ath10k_pci_stop_ce(ar); 1278 ath10k_pci_stop_ce(ar);
1258 1279
1259 /* At this point, asynchronous threads are stopped, the target should 1280 /* At this point, asynchronous threads are stopped, the target should
@@ -1263,7 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1263 ath10k_pci_process_ce(ar); 1284 ath10k_pci_process_ce(ar);
1264 ath10k_pci_cleanup_ce(ar); 1285 ath10k_pci_cleanup_ce(ar);
1265 ath10k_pci_buffer_cleanup(ar); 1286 ath10k_pci_buffer_cleanup(ar);
1266 ath10k_pci_ce_deinit(ar); 1287
1288 ar_pci->started = 0;
1267} 1289}
1268 1290
1269static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1291static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1735,6 +1757,124 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1735 ath10k_pci_sleep(ar); 1757 ath10k_pci_sleep(ar);
1736} 1758}
1737 1759
1760static int ath10k_pci_hif_power_up(struct ath10k *ar)
1761{
1762 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1763 int ret;
1764
1765 ret = ath10k_pci_start_intr(ar);
1766 if (ret) {
1767 ath10k_err("could not start interrupt handling (%d)\n", ret);
1768 goto err;
1769 }
1770
1771 /*
1772 * Bring the target up cleanly.
1773 *
1774 * The target may be in an undefined state with an AUX-powered Target
1775 * and a Host in WoW mode. If the Host crashes, loses power, or is
1776 * restarted (without unloading the driver) then the Target is left
1777 * (aux) powered and running. On a subsequent driver load, the Target
1778 * is in an unexpected state. We try to catch that here in order to
1779 * reset the Target and retry the probe.
1780 */
1781 ath10k_pci_device_reset(ar);
1782
1783 ret = ath10k_pci_reset_target(ar);
1784 if (ret)
1785 goto err_irq;
1786
1787 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1788 /* Force AWAKE forever */
1789 ath10k_do_pci_wake(ar);
1790
1791 ret = ath10k_pci_ce_init(ar);
1792 if (ret)
1793 goto err_ps;
1794
1795 ret = ath10k_pci_init_config(ar);
1796 if (ret)
1797 goto err_ce;
1798
1799 ret = ath10k_pci_wake_target_cpu(ar);
1800 if (ret) {
1801 ath10k_err("could not wake up target CPU (%d)\n", ret);
1802 goto err_ce;
1803 }
1804
1805 return 0;
1806
1807err_ce:
1808 ath10k_pci_ce_deinit(ar);
1809err_ps:
1810 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1811 ath10k_do_pci_sleep(ar);
1812err_irq:
1813 ath10k_pci_stop_intr(ar);
1814err:
1815 return ret;
1816}
1817
1818static void ath10k_pci_hif_power_down(struct ath10k *ar)
1819{
1820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1821
1822 ath10k_pci_stop_intr(ar);
1823
1824 ath10k_pci_ce_deinit(ar);
1825 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1826 ath10k_do_pci_sleep(ar);
1827}
1828
1829#ifdef CONFIG_PM
1830
1831#define ATH10K_PCI_PM_CONTROL 0x44
1832
1833static int ath10k_pci_hif_suspend(struct ath10k *ar)
1834{
1835 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1836 struct pci_dev *pdev = ar_pci->pdev;
1837 u32 val;
1838
1839 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1840
1841 if ((val & 0x000000ff) != 0x3) {
1842 pci_save_state(pdev);
1843 pci_disable_device(pdev);
1844 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1845 (val & 0xffffff00) | 0x03);
1846 }
1847
1848 return 0;
1849}
1850
1851static int ath10k_pci_hif_resume(struct ath10k *ar)
1852{
1853 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1854 struct pci_dev *pdev = ar_pci->pdev;
1855 u32 val;
1856
1857 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1858
1859 if ((val & 0x000000ff) != 0) {
1860 pci_restore_state(pdev);
1861 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1862 val & 0xffffff00);
1863 /*
1864 * Suspend/Resume resets the PCI configuration space,
1865 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1866 * to keep PCI Tx retries from interfering with C3 CPU state
1867 */
1868 pci_read_config_dword(pdev, 0x40, &val);
1869
1870 if ((val & 0x0000ff00) != 0)
1871 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1872 }
1873
1874 return 0;
1875}
1876#endif
1877
1738static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 1878static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1739 .send_head = ath10k_pci_hif_send_head, 1879 .send_head = ath10k_pci_hif_send_head,
1740 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 1880 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
@@ -1743,8 +1883,14 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1743 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 1883 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1744 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 1884 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1745 .send_complete_check = ath10k_pci_hif_send_complete_check, 1885 .send_complete_check = ath10k_pci_hif_send_complete_check,
1746 .init = ath10k_pci_hif_post_init, 1886 .set_callbacks = ath10k_pci_hif_set_callbacks,
1747 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 1887 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1888 .power_up = ath10k_pci_hif_power_up,
1889 .power_down = ath10k_pci_hif_power_down,
1890#ifdef CONFIG_PM
1891 .suspend = ath10k_pci_hif_suspend,
1892 .resume = ath10k_pci_hif_resume,
1893#endif
1748}; 1894};
1749 1895
1750static void ath10k_pci_ce_tasklet(unsigned long ptr) 1896static void ath10k_pci_ce_tasklet(unsigned long ptr)
@@ -1872,8 +2018,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1872 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, 2018 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1873 ath10k_pci_msi_fw_handler, 2019 ath10k_pci_msi_fw_handler,
1874 IRQF_SHARED, "ath10k_pci", ar); 2020 IRQF_SHARED, "ath10k_pci", ar);
1875 if (ret) 2021 if (ret) {
2022 ath10k_warn("request_irq(%d) failed %d\n",
2023 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2024
2025 pci_disable_msi(ar_pci->pdev);
1876 return ret; 2026 return ret;
2027 }
1877 2028
1878 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { 2029 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1879 ret = request_irq(ar_pci->pdev->irq + i, 2030 ret = request_irq(ar_pci->pdev->irq + i,
@@ -2059,9 +2210,9 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
2059 return 0; 2210 return 0;
2060} 2211}
2061 2212
2062static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci) 2213static void ath10k_pci_device_reset(struct ath10k *ar)
2063{ 2214{
2064 struct ath10k *ar = ar_pci->ar; 2215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2065 void __iomem *mem = ar_pci->mem; 2216 void __iomem *mem = ar_pci->mem;
2066 int i; 2217 int i;
2067 u32 val; 2218 u32 val;
@@ -2118,9 +2269,12 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2118 case ATH10K_PCI_FEATURE_MSI_X: 2269 case ATH10K_PCI_FEATURE_MSI_X:
2119 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); 2270 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2120 break; 2271 break;
2121 case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND: 2272 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
2122 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); 2273 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2123 break; 2274 break;
2275 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2276 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2277 break;
2124 } 2278 }
2125 } 2279 }
2126} 2280}
@@ -2145,7 +2299,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2145 2299
2146 switch (pci_dev->device) { 2300 switch (pci_dev->device) {
2147 case QCA988X_1_0_DEVICE_ID: 2301 case QCA988X_1_0_DEVICE_ID:
2148 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features); 2302 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
2149 break; 2303 break;
2150 case QCA988X_2_0_DEVICE_ID: 2304 case QCA988X_2_0_DEVICE_ID:
2151 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); 2305 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
@@ -2156,10 +2310,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2156 goto err_ar_pci; 2310 goto err_ar_pci;
2157 } 2311 }
2158 2312
2313 if (ath10k_target_ps)
2314 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2315
2159 ath10k_pci_dump_features(ar_pci); 2316 ath10k_pci_dump_features(ar_pci);
2160 2317
2161 ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI, 2318 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2162 &ath10k_pci_hif_ops);
2163 if (!ar) { 2319 if (!ar) {
2164 ath10k_err("ath10k_core_create failed!\n"); 2320 ath10k_err("ath10k_core_create failed!\n");
2165 ret = -EINVAL; 2321 ret = -EINVAL;
@@ -2167,7 +2323,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2167 } 2323 }
2168 2324
2169 /* Enable QCA988X_1.0 HW workarounds */ 2325 /* Enable QCA988X_1.0 HW workarounds */
2170 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) 2326 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
2171 spin_lock_init(&ar_pci->hw_v1_workaround_lock); 2327 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2172 2328
2173 ar_pci->ar = ar; 2329 ar_pci->ar = ar;
@@ -2241,62 +2397,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2241 2397
2242 ar_pci->cacheline_sz = dma_get_cache_alignment(); 2398 ar_pci->cacheline_sz = dma_get_cache_alignment();
2243 2399
2244 ret = ath10k_pci_start_intr(ar);
2245 if (ret) {
2246 ath10k_err("could not start interrupt handling (%d)\n", ret);
2247 goto err_iomap;
2248 }
2249
2250 /*
2251 * Bring the target up cleanly.
2252 *
2253 * The target may be in an undefined state with an AUX-powered Target
2254 * and a Host in WoW mode. If the Host crashes, loses power, or is
2255 * restarted (without unloading the driver) then the Target is left
2256 * (aux) powered and running. On a subsequent driver load, the Target
2257 * is in an unexpected state. We try to catch that here in order to
2258 * reset the Target and retry the probe.
2259 */
2260 ath10k_pci_device_reset(ar_pci);
2261
2262 ret = ath10k_pci_reset_target(ar);
2263 if (ret)
2264 goto err_intr;
2265
2266 if (ath10k_target_ps) {
2267 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
2268 } else {
2269 /* Force AWAKE forever */
2270 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
2271 ath10k_do_pci_wake(ar);
2272 }
2273
2274 ret = ath10k_pci_ce_init(ar);
2275 if (ret)
2276 goto err_intr;
2277
2278 ret = ath10k_pci_init_config(ar);
2279 if (ret)
2280 goto err_ce;
2281
2282 ret = ath10k_pci_wake_target_cpu(ar);
2283 if (ret) {
2284 ath10k_err("could not wake up target CPU (%d)\n", ret);
2285 goto err_ce;
2286 }
2287
2288 ret = ath10k_core_register(ar); 2400 ret = ath10k_core_register(ar);
2289 if (ret) { 2401 if (ret) {
2290 ath10k_err("could not register driver core (%d)\n", ret); 2402 ath10k_err("could not register driver core (%d)\n", ret);
2291 goto err_ce; 2403 goto err_iomap;
2292 } 2404 }
2293 2405
2294 return 0; 2406 return 0;
2295 2407
2296err_ce:
2297 ath10k_pci_ce_deinit(ar);
2298err_intr:
2299 ath10k_pci_stop_intr(ar);
2300err_iomap: 2408err_iomap:
2301 pci_iounmap(pdev, mem); 2409 pci_iounmap(pdev, mem);
2302err_master: 2410err_master:
@@ -2333,7 +2441,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2333 tasklet_kill(&ar_pci->msi_fw_err); 2441 tasklet_kill(&ar_pci->msi_fw_err);
2334 2442
2335 ath10k_core_unregister(ar); 2443 ath10k_core_unregister(ar);
2336 ath10k_pci_stop_intr(ar);
2337 2444
2338 pci_set_drvdata(pdev, NULL); 2445 pci_set_drvdata(pdev, NULL);
2339 pci_iounmap(pdev, ar_pci->mem); 2446 pci_iounmap(pdev, ar_pci->mem);
@@ -2345,128 +2452,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2345 kfree(ar_pci); 2452 kfree(ar_pci);
2346} 2453}
2347 2454
2348#if defined(CONFIG_PM_SLEEP)
2349
2350#define ATH10K_PCI_PM_CONTROL 0x44
2351
2352static int ath10k_pci_suspend(struct device *device)
2353{
2354 struct pci_dev *pdev = to_pci_dev(device);
2355 struct ath10k *ar = pci_get_drvdata(pdev);
2356 struct ath10k_pci *ar_pci;
2357 u32 val;
2358 int ret, retval;
2359
2360 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2361
2362 if (!ar)
2363 return -ENODEV;
2364
2365 ar_pci = ath10k_pci_priv(ar);
2366 if (!ar_pci)
2367 return -ENODEV;
2368
2369 if (ath10k_core_target_suspend(ar))
2370 return -EBUSY;
2371
2372 ret = wait_event_interruptible_timeout(ar->event_queue,
2373 ar->is_target_paused == true,
2374 1 * HZ);
2375 if (ret < 0) {
2376 ath10k_warn("suspend interrupted (%d)\n", ret);
2377 retval = ret;
2378 goto resume;
2379 } else if (ret == 0) {
2380 ath10k_warn("suspend timed out - target pause event never came\n");
2381 retval = EIO;
2382 goto resume;
2383 }
2384
2385 /*
2386 * reset is_target_paused and host can check that in next time,
2387 * or it will always be TRUE and host just skip the waiting
2388 * condition, it causes target assert due to host already
2389 * suspend
2390 */
2391 ar->is_target_paused = false;
2392
2393 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2394
2395 if ((val & 0x000000ff) != 0x3) {
2396 pci_save_state(pdev);
2397 pci_disable_device(pdev);
2398 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2399 (val & 0xffffff00) | 0x03);
2400 }
2401
2402 return 0;
2403resume:
2404 ret = ath10k_core_target_resume(ar);
2405 if (ret)
2406 ath10k_warn("could not resume (%d)\n", ret);
2407
2408 return retval;
2409}
2410
2411static int ath10k_pci_resume(struct device *device)
2412{
2413 struct pci_dev *pdev = to_pci_dev(device);
2414 struct ath10k *ar = pci_get_drvdata(pdev);
2415 struct ath10k_pci *ar_pci;
2416 int ret;
2417 u32 val;
2418
2419 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2420
2421 if (!ar)
2422 return -ENODEV;
2423 ar_pci = ath10k_pci_priv(ar);
2424
2425 if (!ar_pci)
2426 return -ENODEV;
2427
2428 ret = pci_enable_device(pdev);
2429 if (ret) {
2430 ath10k_warn("cannot enable PCI device: %d\n", ret);
2431 return ret;
2432 }
2433
2434 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2435
2436 if ((val & 0x000000ff) != 0) {
2437 pci_restore_state(pdev);
2438 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2439 val & 0xffffff00);
2440 /*
2441 * Suspend/Resume resets the PCI configuration space,
2442 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2443 * to keep PCI Tx retries from interfering with C3 CPU state
2444 */
2445 pci_read_config_dword(pdev, 0x40, &val);
2446
2447 if ((val & 0x0000ff00) != 0)
2448 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2449 }
2450
2451 ret = ath10k_core_target_resume(ar);
2452 if (ret)
2453 ath10k_warn("target resume failed: %d\n", ret);
2454
2455 return ret;
2456}
2457
2458static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
2459 ath10k_pci_suspend,
2460 ath10k_pci_resume);
2461
2462#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
2463
2464#else
2465
2466#define ATH10K_PCI_PM_OPS NULL
2467
2468#endif /* CONFIG_PM_SLEEP */
2469
2470MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 2455MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2471 2456
2472static struct pci_driver ath10k_pci_driver = { 2457static struct pci_driver ath10k_pci_driver = {
@@ -2474,7 +2459,6 @@ static struct pci_driver ath10k_pci_driver = {
2474 .id_table = ath10k_pci_id_table, 2459 .id_table = ath10k_pci_id_table,
2475 .probe = ath10k_pci_probe, 2460 .probe = ath10k_pci_probe,
2476 .remove = ath10k_pci_remove, 2461 .remove = ath10k_pci_remove,
2477 .driver.pm = ATH10K_PCI_PM_OPS,
2478}; 2462};
2479 2463
2480static int __init ath10k_pci_init(void) 2464static int __init ath10k_pci_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d2a055a07dc6..871bb339d56d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -152,7 +152,8 @@ struct service_to_pipe {
152 152
153enum ath10k_pci_features { 153enum ath10k_pci_features {
154 ATH10K_PCI_FEATURE_MSI_X = 0, 154 ATH10K_PCI_FEATURE_MSI_X = 0,
155 ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1, 155 ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
156 ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
156 157
157 /* keep last */ 158 /* keep last */
158 ATH10K_PCI_FEATURE_COUNT 159 ATH10K_PCI_FEATURE_COUNT
@@ -311,7 +312,7 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
311 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 312 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
312 void __iomem *addr = ar_pci->mem; 313 void __iomem *addr = ar_pci->mem;
313 314
314 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { 315 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
315 unsigned long irq_flags; 316 unsigned long irq_flags;
316 317
317 spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); 318 spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
@@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
335 return ioread32(ar_pci->mem + offset); 336 return ioread32(ar_pci->mem + offset);
336} 337}
337 338
338extern unsigned int ath10k_target_ps;
339
340void ath10k_do_pci_wake(struct ath10k *ar); 339void ath10k_do_pci_wake(struct ath10k *ar);
341void ath10k_do_pci_sleep(struct ath10k *ar); 340void ath10k_do_pci_sleep(struct ath10k *ar);
342 341
343static inline void ath10k_pci_wake(struct ath10k *ar) 342static inline void ath10k_pci_wake(struct ath10k *ar)
344{ 343{
345 if (ath10k_target_ps) 344 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
345
346 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
346 ath10k_do_pci_wake(ar); 347 ath10k_do_pci_wake(ar);
347} 348}
348 349
349static inline void ath10k_pci_sleep(struct ath10k *ar) 350static inline void ath10k_pci_sleep(struct ath10k *ar)
350{ 351{
351 if (ath10k_target_ps) 352 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
353
354 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
352 ath10k_do_pci_sleep(ar); 355 ath10k_do_pci_sleep(ar);
353} 356}
354 357
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7d4b7987422d..55f90c761868 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -27,6 +27,13 @@ void ath10k_wmi_flush_tx(struct ath10k *ar)
27{ 27{
28 int ret; 28 int ret;
29 29
30 lockdep_assert_held(&ar->conf_mutex);
31
32 if (ar->state == ATH10K_STATE_WEDGED) {
33 ath10k_warn("wmi flush skipped - device is wedged anyway\n");
34 return;
35 }
36
30 ret = wait_event_timeout(ar->wmi.wq, 37 ret = wait_event_timeout(ar->wmi.wq,
31 atomic_read(&ar->wmi.pending_tx_count) == 0, 38 atomic_read(&ar->wmi.pending_tx_count) == 0,
32 5*HZ); 39 5*HZ);
@@ -111,7 +118,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
111 118
112 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); 119 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
113 120
114 status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb); 121 status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
115 if (status) { 122 if (status) {
116 dev_kfree_skb_any(skb); 123 dev_kfree_skb_any(skb);
117 atomic_dec(&ar->wmi.pending_tx_count); 124 atomic_dec(&ar->wmi.pending_tx_count);
@@ -383,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
383 return 0; 390 return 0;
384} 391}
385 392
393static int freq_to_idx(struct ath10k *ar, int freq)
394{
395 struct ieee80211_supported_band *sband;
396 int band, ch, idx = 0;
397
398 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
399 sband = ar->hw->wiphy->bands[band];
400 if (!sband)
401 continue;
402
403 for (ch = 0; ch < sband->n_channels; ch++, idx++)
404 if (sband->channels[ch].center_freq == freq)
405 goto exit;
406 }
407
408exit:
409 return idx;
410}
411
386static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) 412static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
387{ 413{
388 ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n"); 414 struct wmi_chan_info_event *ev;
415 struct survey_info *survey;
416 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
417 int idx;
418
419 ev = (struct wmi_chan_info_event *)skb->data;
420
421 err_code = __le32_to_cpu(ev->err_code);
422 freq = __le32_to_cpu(ev->freq);
423 cmd_flags = __le32_to_cpu(ev->cmd_flags);
424 noise_floor = __le32_to_cpu(ev->noise_floor);
425 rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
426 cycle_count = __le32_to_cpu(ev->cycle_count);
427
428 ath10k_dbg(ATH10K_DBG_WMI,
429 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
430 err_code, freq, cmd_flags, noise_floor, rx_clear_count,
431 cycle_count);
432
433 spin_lock_bh(&ar->data_lock);
434
435 if (!ar->scan.in_progress) {
436 ath10k_warn("chan info event without a scan request?\n");
437 goto exit;
438 }
439
440 idx = freq_to_idx(ar, freq);
441 if (idx >= ARRAY_SIZE(ar->survey)) {
442 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
443 freq, idx);
444 goto exit;
445 }
446
447 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
448 /* During scanning chan info is reported twice for each
449 * visited channel. The reported cycle count is global
450 * and per-channel cycle count must be calculated */
451
452 cycle_count -= ar->survey_last_cycle_count;
453 rx_clear_count -= ar->survey_last_rx_clear_count;
454
455 survey = &ar->survey[idx];
456 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
457 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
458 survey->noise = noise_floor;
459 survey->filled = SURVEY_INFO_CHANNEL_TIME |
460 SURVEY_INFO_CHANNEL_TIME_RX |
461 SURVEY_INFO_NOISE_DBM;
462 }
463
464 ar->survey_last_rx_clear_count = rx_clear_count;
465 ar->survey_last_cycle_count = cycle_count;
466
467exit:
468 spin_unlock_bh(&ar->data_lock);
389} 469}
390 470
391static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 471static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
@@ -501,8 +581,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
501 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, 581 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
502 (u8 *)skb_tail_pointer(bcn) - ies); 582 (u8 *)skb_tail_pointer(bcn) - ies);
503 if (!ie) { 583 if (!ie) {
504 /* highly unlikely for mac80211 */ 584 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
505 ath10k_warn("no tim ie found;\n"); 585 ath10k_warn("no tim ie found;\n");
506 return; 586 return;
507 } 587 }
508 588
@@ -861,6 +941,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
861 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; 941 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
862 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); 942 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
863 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 943 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
944 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
945
946 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
947 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
948 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
949 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
950 }
864 951
865 ar->ath_common.regulatory.current_rd = 952 ar->ath_common.regulatory.current_rd =
866 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 953 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
@@ -885,7 +972,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
885 } 972 }
886 973
887 ath10k_dbg(ATH10K_DBG_WMI, 974 ath10k_dbg(ATH10K_DBG_WMI,
888 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n", 975 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
889 __le32_to_cpu(ev->sw_version), 976 __le32_to_cpu(ev->sw_version),
890 __le32_to_cpu(ev->sw_version_1), 977 __le32_to_cpu(ev->sw_version_1),
891 __le32_to_cpu(ev->abi_version), 978 __le32_to_cpu(ev->abi_version),
@@ -894,7 +981,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
894 __le32_to_cpu(ev->vht_cap_info), 981 __le32_to_cpu(ev->vht_cap_info),
895 __le32_to_cpu(ev->vht_supp_mcs), 982 __le32_to_cpu(ev->vht_supp_mcs),
896 __le32_to_cpu(ev->sys_cap_info), 983 __le32_to_cpu(ev->sys_cap_info),
897 __le32_to_cpu(ev->num_mem_reqs)); 984 __le32_to_cpu(ev->num_mem_reqs),
985 __le32_to_cpu(ev->num_rf_chains));
898 986
899 complete(&ar->wmi.service_ready); 987 complete(&ar->wmi.service_ready);
900} 988}
@@ -1114,7 +1202,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
1114 /* connect to control service */ 1202 /* connect to control service */
1115 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; 1203 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
1116 1204
1117 status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp); 1205 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1118 if (status) { 1206 if (status) {
1119 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", 1207 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
1120 status); 1208 status);
@@ -1748,6 +1836,9 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
1748 if (arg->key_data) 1836 if (arg->key_data)
1749 memcpy(cmd->key_data, arg->key_data, arg->key_len); 1837 memcpy(cmd->key_data, arg->key_data, arg->key_len);
1750 1838
1839 ath10k_dbg(ATH10K_DBG_WMI,
1840 "wmi vdev install key idx %d cipher %d len %d\n",
1841 arg->key_idx, arg->key_cipher, arg->key_len);
1751 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); 1842 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1752} 1843}
1753 1844
@@ -2011,6 +2102,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
2011 cmd->peer_vht_rates.tx_mcs_set = 2102 cmd->peer_vht_rates.tx_mcs_set =
2012 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 2103 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2013 2104
2105 ath10k_dbg(ATH10K_DBG_WMI,
2106 "wmi peer assoc vdev %d addr %pM\n",
2107 arg->vdev_id, arg->addr);
2014 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); 2108 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
2015} 2109}
2016 2110
@@ -2079,3 +2173,22 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
2079 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 2173 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
2080 return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); 2174 return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
2081} 2175}
2176
2177int ath10k_wmi_force_fw_hang(struct ath10k *ar,
2178 enum wmi_force_fw_hang_type type, u32 delay_ms)
2179{
2180 struct wmi_force_fw_hang_cmd *cmd;
2181 struct sk_buff *skb;
2182
2183 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2184 if (!skb)
2185 return -ENOMEM;
2186
2187 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
2188 cmd->type = __cpu_to_le32(type);
2189 cmd->delay_ms = __cpu_to_le32(delay_ms);
2190
2191 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
2192 type, delay_ms);
2193 return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
2194}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9555f5a0e041..2c5a4f8daf2e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -416,6 +416,7 @@ enum wmi_cmd_id {
416 WMI_PDEV_FTM_INTG_CMDID, 416 WMI_PDEV_FTM_INTG_CMDID,
417 WMI_VDEV_SET_KEEPALIVE_CMDID, 417 WMI_VDEV_SET_KEEPALIVE_CMDID,
418 WMI_VDEV_GET_KEEPALIVE_CMDID, 418 WMI_VDEV_GET_KEEPALIVE_CMDID,
419 WMI_FORCE_FW_HANG_CMDID,
419 420
420 /* GPIO Configuration */ 421 /* GPIO Configuration */
421 WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO), 422 WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
@@ -2930,6 +2931,11 @@ struct wmi_chan_info_event {
2930 __le32 cycle_count; 2931 __le32 cycle_count;
2931} __packed; 2932} __packed;
2932 2933
2934#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
2935
2936/* FIXME: empirically extrapolated */
2937#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
2938
2933/* Beacon filter wmi command info */ 2939/* Beacon filter wmi command info */
2934#define BCN_FLT_MAX_SUPPORTED_IES 256 2940#define BCN_FLT_MAX_SUPPORTED_IES 256
2935#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32) 2941#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
@@ -2972,6 +2978,22 @@ struct wmi_sta_keepalive_cmd {
2972 struct wmi_sta_keepalive_arp_resp arp_resp; 2978 struct wmi_sta_keepalive_arp_resp arp_resp;
2973} __packed; 2979} __packed;
2974 2980
2981enum wmi_force_fw_hang_type {
2982 WMI_FORCE_FW_HANG_ASSERT = 1,
2983 WMI_FORCE_FW_HANG_NO_DETECT,
2984 WMI_FORCE_FW_HANG_CTRL_EP_FULL,
2985 WMI_FORCE_FW_HANG_EMPTY_POINT,
2986 WMI_FORCE_FW_HANG_STACK_OVERFLOW,
2987 WMI_FORCE_FW_HANG_INFINITE_LOOP,
2988};
2989
2990#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
2991
2992struct wmi_force_fw_hang_cmd {
2993 __le32 type;
2994 __le32 delay_ms;
2995} __packed;
2996
2975#define ATH10K_RTS_MAX 2347 2997#define ATH10K_RTS_MAX 2347
2976#define ATH10K_FRAGMT_THRESHOLD_MIN 540 2998#define ATH10K_FRAGMT_THRESHOLD_MIN 540
2977#define ATH10K_FRAGMT_THRESHOLD_MAX 2346 2999#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -3048,5 +3070,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
3048int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 3070int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
3049 const struct wmi_pdev_set_wmm_params_arg *arg); 3071 const struct wmi_pdev_set_wmm_params_arg *arg);
3050int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); 3072int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
3073int ath10k_wmi_force_fw_hang(struct ath10k *ar,
3074 enum wmi_force_fw_hang_type type, u32 delay_ms);
3051 3075
3052#endif /* _WMI_H_ */ 3076#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 2d691b8b95b9..74bd54d6aceb 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -29,6 +29,7 @@
29#include <linux/average.h> 29#include <linux/average.h>
30#include <linux/leds.h> 30#include <linux/leds.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32#include <net/cfg80211.h>
32 33
33/* RX/TX descriptor hw structs 34/* RX/TX descriptor hw structs
34 * TODO: Driver part should only see sw structs */ 35 * TODO: Driver part should only see sw structs */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ce67ab791eae..48161edec8de 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -56,6 +56,7 @@
56#include <linux/etherdevice.h> 56#include <linux/etherdevice.h>
57#include <linux/nl80211.h> 57#include <linux/nl80211.h>
58 58
59#include <net/cfg80211.h>
59#include <net/ieee80211_radiotap.h> 60#include <net/ieee80211_radiotap.h>
60 61
61#include <asm/unaligned.h> 62#include <asm/unaligned.h>
@@ -165,28 +166,36 @@ static const struct ieee80211_rate ath5k_rates[] = {
165 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 166 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
166 { .bitrate = 60, 167 { .bitrate = 60,
167 .hw_value = ATH5K_RATE_CODE_6M, 168 .hw_value = ATH5K_RATE_CODE_6M,
168 .flags = 0 }, 169 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
170 IEEE80211_RATE_SUPPORTS_10MHZ },
169 { .bitrate = 90, 171 { .bitrate = 90,
170 .hw_value = ATH5K_RATE_CODE_9M, 172 .hw_value = ATH5K_RATE_CODE_9M,
171 .flags = 0 }, 173 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
174 IEEE80211_RATE_SUPPORTS_10MHZ },
172 { .bitrate = 120, 175 { .bitrate = 120,
173 .hw_value = ATH5K_RATE_CODE_12M, 176 .hw_value = ATH5K_RATE_CODE_12M,
174 .flags = 0 }, 177 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
178 IEEE80211_RATE_SUPPORTS_10MHZ },
175 { .bitrate = 180, 179 { .bitrate = 180,
176 .hw_value = ATH5K_RATE_CODE_18M, 180 .hw_value = ATH5K_RATE_CODE_18M,
177 .flags = 0 }, 181 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
182 IEEE80211_RATE_SUPPORTS_10MHZ },
178 { .bitrate = 240, 183 { .bitrate = 240,
179 .hw_value = ATH5K_RATE_CODE_24M, 184 .hw_value = ATH5K_RATE_CODE_24M,
180 .flags = 0 }, 185 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
186 IEEE80211_RATE_SUPPORTS_10MHZ },
181 { .bitrate = 360, 187 { .bitrate = 360,
182 .hw_value = ATH5K_RATE_CODE_36M, 188 .hw_value = ATH5K_RATE_CODE_36M,
183 .flags = 0 }, 189 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
190 IEEE80211_RATE_SUPPORTS_10MHZ },
184 { .bitrate = 480, 191 { .bitrate = 480,
185 .hw_value = ATH5K_RATE_CODE_48M, 192 .hw_value = ATH5K_RATE_CODE_48M,
186 .flags = 0 }, 193 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
194 IEEE80211_RATE_SUPPORTS_10MHZ },
187 { .bitrate = 540, 195 { .bitrate = 540,
188 .hw_value = ATH5K_RATE_CODE_54M, 196 .hw_value = ATH5K_RATE_CODE_54M,
189 .flags = 0 }, 197 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
198 IEEE80211_RATE_SUPPORTS_10MHZ },
190}; 199};
191 200
192static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 201static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -435,11 +444,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
435 * Called with ah->lock. 444 * Called with ah->lock.
436 */ 445 */
437int 446int
438ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan) 447ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
439{ 448{
440 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 449 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
441 "channel set, resetting (%u -> %u MHz)\n", 450 "channel set, resetting (%u -> %u MHz)\n",
442 ah->curchan->center_freq, chan->center_freq); 451 ah->curchan->center_freq, chandef->chan->center_freq);
452
453 switch (chandef->width) {
454 case NL80211_CHAN_WIDTH_20:
455 case NL80211_CHAN_WIDTH_20_NOHT:
456 ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
457 break;
458 case NL80211_CHAN_WIDTH_5:
459 ah->ah_bwmode = AR5K_BWMODE_5MHZ;
460 break;
461 case NL80211_CHAN_WIDTH_10:
462 ah->ah_bwmode = AR5K_BWMODE_10MHZ;
463 break;
464 default:
465 WARN_ON(1);
466 return -EINVAL;
467 }
443 468
444 /* 469 /*
445 * To switch channels clear any pending DMA operations; 470 * To switch channels clear any pending DMA operations;
@@ -447,7 +472,7 @@ ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
447 * hardware at the new frequency, and then re-enable 472 * hardware at the new frequency, and then re-enable
448 * the relevant bits of the h/w. 473 * the relevant bits of the h/w.
449 */ 474 */
450 return ath5k_reset(ah, chan, true); 475 return ath5k_reset(ah, chandef->chan, true);
451} 476}
452 477
453void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 478void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1400,6 +1425,16 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1400 1425
1401 rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate); 1426 rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
1402 rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); 1427 rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1428 switch (ah->ah_bwmode) {
1429 case AR5K_BWMODE_5MHZ:
1430 rxs->flag |= RX_FLAG_5MHZ;
1431 break;
1432 case AR5K_BWMODE_10MHZ:
1433 rxs->flag |= RX_FLAG_10MHZ;
1434 break;
1435 default:
1436 break;
1437 }
1403 1438
1404 if (rxs->rate_idx >= 0 && rs->rs_rate == 1439 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1405 ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) 1440 ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
@@ -2507,6 +2542,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2507 /* SW support for IBSS_RSN is provided by mac80211 */ 2542 /* SW support for IBSS_RSN is provided by mac80211 */
2508 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 2543 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
2509 2544
2545 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
2546
2510 /* both antennas can be configured as RX or TX */ 2547 /* both antennas can be configured as RX or TX */
2511 hw->wiphy->available_antennas_tx = 0x3; 2548 hw->wiphy->available_antennas_tx = 0x3;
2512 hw->wiphy->available_antennas_rx = 0x3; 2549 hw->wiphy->available_antennas_rx = 0x3;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index ca9a83ceeee1..97469d0fbad7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -101,7 +101,7 @@ void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
101 101
102void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, 102void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
103 struct ieee80211_vif *vif); 103 struct ieee80211_vif *vif);
104int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan); 104int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
105void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); 105void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
106void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); 106void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
107void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 107void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 9d00dab666a8..b8d031ae63c2 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -245,9 +245,11 @@ static ssize_t write_file_beacon(struct file *file,
245 struct ath5k_hw *ah = file->private_data; 245 struct ath5k_hw *ah = file->private_data;
246 char buf[20]; 246 char buf[20];
247 247
248 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) 248 count = min_t(size_t, count, sizeof(buf) - 1);
249 if (copy_from_user(buf, userbuf, count))
249 return -EFAULT; 250 return -EFAULT;
250 251
252 buf[count] = '\0';
251 if (strncmp(buf, "disable", 7) == 0) { 253 if (strncmp(buf, "disable", 7) == 0) {
252 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); 254 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
253 pr_info("debugfs disable beacons\n"); 255 pr_info("debugfs disable beacons\n");
@@ -345,9 +347,11 @@ static ssize_t write_file_debug(struct file *file,
345 unsigned int i; 347 unsigned int i;
346 char buf[20]; 348 char buf[20];
347 349
348 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) 350 count = min_t(size_t, count, sizeof(buf) - 1);
351 if (copy_from_user(buf, userbuf, count))
349 return -EFAULT; 352 return -EFAULT;
350 353
354 buf[count] = '\0';
351 for (i = 0; i < ARRAY_SIZE(dbg_info); i++) { 355 for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
352 if (strncmp(buf, dbg_info[i].name, 356 if (strncmp(buf, dbg_info[i].name,
353 strlen(dbg_info[i].name)) == 0) { 357 strlen(dbg_info[i].name)) == 0) {
@@ -448,9 +452,11 @@ static ssize_t write_file_antenna(struct file *file,
448 unsigned int i; 452 unsigned int i;
449 char buf[20]; 453 char buf[20];
450 454
451 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) 455 count = min_t(size_t, count, sizeof(buf) - 1);
456 if (copy_from_user(buf, userbuf, count))
452 return -EFAULT; 457 return -EFAULT;
453 458
459 buf[count] = '\0';
454 if (strncmp(buf, "diversity", 9) == 0) { 460 if (strncmp(buf, "diversity", 9) == 0) {
455 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); 461 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
456 pr_info("debug: enable diversity\n"); 462 pr_info("debug: enable diversity\n");
@@ -619,9 +625,11 @@ static ssize_t write_file_frameerrors(struct file *file,
619 struct ath5k_statistics *st = &ah->stats; 625 struct ath5k_statistics *st = &ah->stats;
620 char buf[20]; 626 char buf[20];
621 627
622 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) 628 count = min_t(size_t, count, sizeof(buf) - 1);
629 if (copy_from_user(buf, userbuf, count))
623 return -EFAULT; 630 return -EFAULT;
624 631
632 buf[count] = '\0';
625 if (strncmp(buf, "clear", 5) == 0) { 633 if (strncmp(buf, "clear", 5) == 0) {
626 st->rxerr_crc = 0; 634 st->rxerr_crc = 0;
627 st->rxerr_phy = 0; 635 st->rxerr_phy = 0;
@@ -766,9 +774,11 @@ static ssize_t write_file_ani(struct file *file,
766 struct ath5k_hw *ah = file->private_data; 774 struct ath5k_hw *ah = file->private_data;
767 char buf[20]; 775 char buf[20];
768 776
769 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) 777 count = min_t(size_t, count, sizeof(buf) - 1);
778 if (copy_from_user(buf, userbuf, count))
770 return -EFAULT; 779 return -EFAULT;
771 780
781 buf[count] = '\0';
772 if (strncmp(buf, "sens-low", 8) == 0) { 782 if (strncmp(buf, "sens-low", 8) == 0) {
773 ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH); 783 ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
774 } else if (strncmp(buf, "sens-high", 9) == 0) { 784 } else if (strncmp(buf, "sens-high", 9) == 0) {
@@ -862,9 +872,11 @@ static ssize_t write_file_queue(struct file *file,
862 struct ath5k_hw *ah = file->private_data; 872 struct ath5k_hw *ah = file->private_data;
863 char buf[20]; 873 char buf[20];
864 874
865 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) 875 count = min_t(size_t, count, sizeof(buf) - 1);
876 if (copy_from_user(buf, userbuf, count))
866 return -EFAULT; 877 return -EFAULT;
867 878
879 buf[count] = '\0';
868 if (strncmp(buf, "start", 5) == 0) 880 if (strncmp(buf, "start", 5) == 0)
869 ieee80211_wake_queues(ah->hw); 881 ieee80211_wake_queues(ah->hw);
870 else if (strncmp(buf, "stop", 4) == 0) 882 else if (strncmp(buf, "stop", 4) == 0)
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 40825d43322e..4ee01f654235 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
202 mutex_lock(&ah->lock); 202 mutex_lock(&ah->lock);
203 203
204 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 204 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
205 ret = ath5k_chan_set(ah, conf->chandef.chan); 205 ret = ath5k_chan_set(ah, &conf->chandef);
206 if (ret < 0) 206 if (ret < 0)
207 goto unlock; 207 goto unlock;
208 } 208 }
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 1f16b4227d8f..c60d36aa13e2 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -144,11 +144,13 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
144 sifs = AR5K_INIT_SIFS_HALF_RATE; 144 sifs = AR5K_INIT_SIFS_HALF_RATE;
145 preamble *= 2; 145 preamble *= 2;
146 sym_time *= 2; 146 sym_time *= 2;
147 bitrate = DIV_ROUND_UP(bitrate, 2);
147 break; 148 break;
148 case AR5K_BWMODE_5MHZ: 149 case AR5K_BWMODE_5MHZ:
149 sifs = AR5K_INIT_SIFS_QUARTER_RATE; 150 sifs = AR5K_INIT_SIFS_QUARTER_RATE;
150 preamble *= 4; 151 preamble *= 4;
151 sym_time *= 4; 152 sym_time *= 4;
153 bitrate = DIV_ROUND_UP(bitrate, 4);
152 break; 154 break;
153 default: 155 default:
154 sifs = AR5K_INIT_SIFS_DEFAULT_BG; 156 sifs = AR5K_INIT_SIFS_DEFAULT_BG;
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 65fe929529a8..0583c69d26db 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -566,9 +566,11 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
566{ 566{
567 struct ieee80211_channel *channel = ah->ah_current_channel; 567 struct ieee80211_channel *channel = ah->ah_current_channel;
568 enum ieee80211_band band; 568 enum ieee80211_band band;
569 struct ieee80211_supported_band *sband;
569 struct ieee80211_rate *rate; 570 struct ieee80211_rate *rate;
570 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; 571 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
571 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 572 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
573 u32 rate_flags, i;
572 574
573 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) 575 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
574 return -EINVAL; 576 return -EINVAL;
@@ -605,7 +607,28 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
605 else 607 else
606 band = IEEE80211_BAND_2GHZ; 608 band = IEEE80211_BAND_2GHZ;
607 609
608 rate = &ah->sbands[band].bitrates[0]; 610 switch (ah->ah_bwmode) {
611 case AR5K_BWMODE_5MHZ:
612 rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
613 break;
614 case AR5K_BWMODE_10MHZ:
615 rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
616 break;
617 default:
618 rate_flags = 0;
619 break;
620 }
621 sband = &ah->sbands[band];
622 rate = NULL;
623 for (i = 0; i < sband->n_bitrates; i++) {
624 if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
625 continue;
626 rate = &sband->bitrates[i];
627 break;
628 }
629 if (WARN_ON(!rate))
630 return -EINVAL;
631
609 ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false); 632 ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
610 633
611 /* ack_tx_time includes an SIFS already */ 634 /* ack_tx_time includes an SIFS already */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 6a67881f94d6..4f316bdcbab5 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
1836 1836
1837 clear_bit(WMI_READY, &ar->flag); 1837 clear_bit(WMI_READY, &ar->flag);
1838 1838
1839 if (ar->fw_recovery.enable)
1840 del_timer_sync(&ar->fw_recovery.hb_timer);
1841
1839 /* 1842 /*
1840 * After wmi_shudown all WMI events will be dropped. We 1843 * After wmi_shudown all WMI events will be dropped. We
1841 * need to cleanup the buffers allocated in AP mode and 1844 * need to cleanup the buffers allocated in AP mode and
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index d4fcfcad57d0..5839fc23bdc7 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
29 struct ath6kl_sta *conn = NULL; 29 struct ath6kl_sta *conn = NULL;
30 u8 i, max_conn; 30 u8 i, max_conn;
31 31
32 if (is_zero_ether_addr(node_addr))
33 return NULL;
34
32 max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; 35 max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
33 36
34 for (i = 0; i < max_conn; i++) { 37 for (i = 0; i < max_conn; i++) {
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index acc9aa832f76..d67170ea1038 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -66,7 +66,8 @@ nla_put_failure:
66 ath6kl_warn("nla_put failed on testmode rx skb!\n"); 66 ath6kl_warn("nla_put failed on testmode rx skb!\n");
67} 67}
68 68
69int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len) 69int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
70 void *data, int len)
70{ 71{
71 struct ath6kl *ar = wiphy_priv(wiphy); 72 struct ath6kl *ar = wiphy_priv(wiphy);
72 struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1]; 73 struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h
index fe651d6707df..9fbcdec3e208 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.h
+++ b/drivers/net/wireless/ath/ath6kl/testmode.h
@@ -20,7 +20,8 @@
20#ifdef CONFIG_NL80211_TESTMODE 20#ifdef CONFIG_NL80211_TESTMODE
21 21
22void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len); 22void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
23int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len); 23int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
24 void *data, int len);
24 25
25#else 26#else
26 27
@@ -29,7 +30,9 @@ static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
29{ 30{
30} 31}
31 32
32static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len) 33static inline int ath6kl_tm_cmd(struct wiphy *wiphy,
34 struct wireless_dev *wdev,
35 void *data, int len)
33{ 36{
34 return 0; 37 return 0;
35} 38}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 87aefb4c4c23..546d5da0b894 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -568,8 +568,8 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
568 dlen, freq, vif->probe_req_report); 568 dlen, freq, vif->probe_req_report);
569 569
570 if (vif->probe_req_report || vif->nw_type == AP_NETWORK) 570 if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
571 cfg80211_rx_mgmt(&vif->wdev, freq, 0, 571 cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0,
572 ev->data, dlen, GFP_ATOMIC); 572 GFP_ATOMIC);
573 573
574 return 0; 574 return 0;
575} 575}
@@ -608,8 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
608 return -EINVAL; 608 return -EINVAL;
609 } 609 }
610 ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); 610 ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
611 cfg80211_rx_mgmt(&vif->wdev, freq, 0, 611 cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC);
612 ev->data, dlen, GFP_ATOMIC);
613 612
614 return 0; 613 return 0;
615} 614}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d491a3178986..7944c25c9a43 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -56,7 +56,7 @@ config ATH9K_AHB
56 56
57config ATH9K_DEBUGFS 57config ATH9K_DEBUGFS
58 bool "Atheros ath9k debugging" 58 bool "Atheros ath9k debugging"
59 depends on ATH9K 59 depends on ATH9K && DEBUG_FS
60 select MAC80211_DEBUGFS 60 select MAC80211_DEBUGFS
61 select RELAY 61 select RELAY
62 ---help--- 62 ---help---
@@ -96,6 +96,16 @@ config ATH9K_LEGACY_RATE_CONTROL
96 has to be passed to mac80211 using the module parameter, 96 has to be passed to mac80211 using the module parameter,
97 ieee80211_default_rc_algo. 97 ieee80211_default_rc_algo.
98 98
99config ATH9K_RFKILL
100 bool "Atheros ath9k rfkill support" if EXPERT
101 depends on ATH9K
102 depends on RFKILL=y || RFKILL=ATH9K
103 default y
104 help
105 Say Y to have ath9k poll the RF-Kill GPIO every couple of
106 seconds. Turn off to save power, but enable it if you have
107 a platform that can toggle the RF-Kill GPIO.
108
99config ATH9K_HTC 109config ATH9K_HTC
100 tristate "Atheros HTC based wireless cards support" 110 tristate "Atheros HTC based wireless cards support"
101 depends on USB && MAC80211 111 depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 4994bea809eb..be466b0ef7a7 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -319,9 +319,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
319 ah->ani_function = 0; 319 ah->ani_function = 0;
320 } 320 }
321 321
322 /* always allow mode (on/off) to be controlled */
323 ah->ani_function |= ATH9K_ANI_MODE;
324
325 ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL, 322 ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
326 aniState->ofdmNoiseImmunityLevel); 323 aniState->ofdmNoiseImmunityLevel);
327 cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL, 324 cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index b54a3fb01883..21e7b83c3f6a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -48,15 +48,10 @@
48/* values here are relative to the INI */ 48/* values here are relative to the INI */
49 49
50enum ath9k_ani_cmd { 50enum ath9k_ani_cmd {
51 ATH9K_ANI_PRESENT = 0x1, 51 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x1,
52 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, 52 ATH9K_ANI_FIRSTEP_LEVEL = 0x2,
53 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4, 53 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x4,
54 ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8, 54 ATH9K_ANI_MRC_CCK = 0x8,
55 ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
56 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
57 ATH9K_ANI_MODE = 0x40,
58 ATH9K_ANI_PHYERR_RESET = 0x80,
59 ATH9K_ANI_MRC_CCK = 0x100,
60 ATH9K_ANI_ALL = 0xfff 55 ATH9K_ANI_ALL = 0xfff
61}; 56};
62 57
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 664844c5d3d5..dd1cc73d7946 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -16,37 +16,119 @@
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18 18
19static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 19/*
20 * AR9285
21 * ======
22 *
23 * EEPROM has 2 4-bit fields containing the card configuration.
24 *
25 * antdiv_ctl1:
26 * ------------
27 * bb_enable_ant_div_lnadiv : 1
28 * bb_ant_div_alt_gaintb : 1
29 * bb_ant_div_main_gaintb : 1
30 * bb_enable_ant_fast_div : 1
31 *
32 * antdiv_ctl2:
33 * -----------
34 * bb_ant_div_alt_lnaconf : 2
35 * bb_ant_div_main_lnaconf : 2
36 *
37 * The EEPROM bits are used as follows:
38 * ------------------------------------
39 *
40 * bb_enable_ant_div_lnadiv - Enable LNA path rx antenna diversity/combining.
41 * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
42 *
43 * bb_ant_div_[alt/main]_gaintb - 0 -> Antenna config Alt/Main uses gaintable 0
44 * 1 -> Antenna config Alt/Main uses gaintable 1
45 * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
46 *
47 * bb_enable_ant_fast_div - Enable fast antenna diversity.
48 * Set in AR_PHY_CCK_DETECT.
49 *
50 * bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config.
51 * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
52 * 10=LNA1
53 * 01=LNA2
54 * 11=LNA1+LNA2
55 * 00=LNA1-LNA2
56 *
57 * AR9485 / AR9565 / AR9331
58 * ========================
59 *
60 * The same bits are present in the EEPROM, but the location in the
61 * EEPROM is different (ant_div_control in ar9300_BaseExtension_1).
62 *
63 * ant_div_alt_lnaconf ==> bit 0~1
64 * ant_div_main_lnaconf ==> bit 2~3
65 * ant_div_alt_gaintb ==> bit 4
66 * ant_div_main_gaintb ==> bit 5
67 * enable_ant_div_lnadiv ==> bit 6
68 * enable_ant_fast_div ==> bit 7
69 */
70
71static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb,
72 int alt_ratio, int maxdelta,
20 int mindelta, int main_rssi_avg, 73 int mindelta, int main_rssi_avg,
21 int alt_rssi_avg, int pkt_count) 74 int alt_rssi_avg, int pkt_count)
22{ 75{
23 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 76 if (pkt_count <= 50)
24 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 77 return false;
25 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 78
79 if (alt_rssi_avg > main_rssi_avg + mindelta)
80 return true;
81
82 if (alt_ratio >= antcomb->ant_ratio2 &&
83 alt_rssi_avg >= antcomb->low_rssi_thresh &&
84 (alt_rssi_avg > main_rssi_avg + maxdelta))
85 return true;
86
87 return false;
26} 88}
27 89
28static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, 90static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf,
29 int curr_main_set, int curr_alt_set, 91 struct ath_ant_comb *antcomb,
30 int alt_rssi_avg, int main_rssi_avg) 92 int alt_ratio, int alt_rssi_avg,
93 int main_rssi_avg)
31{ 94{
32 bool result = false; 95 bool result, set1, set2;
33 switch (div_group) { 96
97 result = set1 = set2 = false;
98
99 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 &&
100 conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1)
101 set1 = true;
102
103 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 &&
104 conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2)
105 set2 = true;
106
107 switch (conf->div_group) {
34 case 0: 108 case 0:
35 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 109 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
36 result = true; 110 result = true;
37 break; 111 break;
38 case 1: 112 case 1:
39 case 2: 113 case 2:
40 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && 114 if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
41 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && 115 break;
42 (alt_rssi_avg >= (main_rssi_avg - 5))) || 116
43 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && 117 if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) ||
44 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && 118 (set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) ||
45 (alt_rssi_avg >= (main_rssi_avg - 2)))) && 119 (alt_ratio > antcomb->ant_ratio))
46 (alt_rssi_avg >= 4))
47 result = true; 120 result = true;
48 else 121
49 result = false; 122 break;
123 case 3:
124 if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
125 break;
126
127 if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) ||
128 (set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) ||
129 (alt_ratio > antcomb->ant_ratio))
130 result = true;
131
50 break; 132 break;
51 } 133 }
52 134
@@ -108,6 +190,74 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
108 } 190 }
109} 191}
110 192
193static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
194 struct ath_hw_antcomb_conf *conf)
195{
196 /* set alt to the conf with maximun ratio */
197 if (antcomb->first_ratio && antcomb->second_ratio) {
198 if (antcomb->rssi_second > antcomb->rssi_third) {
199 /* first alt*/
200 if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
201 (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
202 /* Set alt LNA1 or LNA2*/
203 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
204 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
205 else
206 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
207 else
208 /* Set alt to A+B or A-B */
209 conf->alt_lna_conf =
210 antcomb->first_quick_scan_conf;
211 } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
212 (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) {
213 /* Set alt LNA1 or LNA2 */
214 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
215 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
216 else
217 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
218 } else {
219 /* Set alt to A+B or A-B */
220 conf->alt_lna_conf = antcomb->second_quick_scan_conf;
221 }
222 } else if (antcomb->first_ratio) {
223 /* first alt */
224 if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
225 (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
226 /* Set alt LNA1 or LNA2 */
227 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
228 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
229 else
230 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
231 else
232 /* Set alt to A+B or A-B */
233 conf->alt_lna_conf = antcomb->first_quick_scan_conf;
234 } else if (antcomb->second_ratio) {
235 /* second alt */
236 if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
237 (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
238 /* Set alt LNA1 or LNA2 */
239 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
240 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
241 else
242 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
243 else
244 /* Set alt to A+B or A-B */
245 conf->alt_lna_conf = antcomb->second_quick_scan_conf;
246 } else {
247 /* main is largest */
248 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
249 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
250 /* Set alt LNA1 or LNA2 */
251 if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
252 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
253 else
254 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
255 else
256 /* Set alt to A+B or A-B */
257 conf->alt_lna_conf = antcomb->main_conf;
258 }
259}
260
111static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 261static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
112 struct ath_hw_antcomb_conf *div_ant_conf, 262 struct ath_hw_antcomb_conf *div_ant_conf,
113 int main_rssi_avg, int alt_rssi_avg, 263 int main_rssi_avg, int alt_rssi_avg,
@@ -129,7 +279,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
129 279
130 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 280 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
131 /* main is LNA1 */ 281 /* main is LNA1 */
132 if (ath_is_alt_ant_ratio_better(alt_ratio, 282 if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
133 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 283 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
134 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 284 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
135 main_rssi_avg, alt_rssi_avg, 285 main_rssi_avg, alt_rssi_avg,
@@ -138,7 +288,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
138 else 288 else
139 antcomb->first_ratio = false; 289 antcomb->first_ratio = false;
140 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 290 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
141 if (ath_is_alt_ant_ratio_better(alt_ratio, 291 if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
142 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 292 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
143 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 293 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
144 main_rssi_avg, alt_rssi_avg, 294 main_rssi_avg, alt_rssi_avg,
@@ -147,11 +297,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
147 else 297 else
148 antcomb->first_ratio = false; 298 antcomb->first_ratio = false;
149 } else { 299 } else {
150 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 300 if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
151 (alt_rssi_avg > main_rssi_avg + 301 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
152 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 302 0,
153 (alt_rssi_avg > main_rssi_avg)) && 303 main_rssi_avg, alt_rssi_avg,
154 (antcomb->total_pkt_count > 50)) 304 antcomb->total_pkt_count))
155 antcomb->first_ratio = true; 305 antcomb->first_ratio = true;
156 else 306 else
157 antcomb->first_ratio = false; 307 antcomb->first_ratio = false;
@@ -164,17 +314,21 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
164 antcomb->rssi_first = main_rssi_avg; 314 antcomb->rssi_first = main_rssi_avg;
165 antcomb->rssi_third = alt_rssi_avg; 315 antcomb->rssi_third = alt_rssi_avg;
166 316
167 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 317 switch(antcomb->second_quick_scan_conf) {
318 case ATH_ANT_DIV_COMB_LNA1:
168 antcomb->rssi_lna1 = alt_rssi_avg; 319 antcomb->rssi_lna1 = alt_rssi_avg;
169 else if (antcomb->second_quick_scan_conf == 320 break;
170 ATH_ANT_DIV_COMB_LNA2) 321 case ATH_ANT_DIV_COMB_LNA2:
171 antcomb->rssi_lna2 = alt_rssi_avg; 322 antcomb->rssi_lna2 = alt_rssi_avg;
172 else if (antcomb->second_quick_scan_conf == 323 break;
173 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 324 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
174 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 325 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
175 antcomb->rssi_lna2 = main_rssi_avg; 326 antcomb->rssi_lna2 = main_rssi_avg;
176 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 327 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
177 antcomb->rssi_lna1 = main_rssi_avg; 328 antcomb->rssi_lna1 = main_rssi_avg;
329 break;
330 default:
331 break;
178 } 332 }
179 333
180 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 334 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
@@ -184,7 +338,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
184 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 338 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
185 339
186 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 340 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
187 if (ath_is_alt_ant_ratio_better(alt_ratio, 341 if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
188 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 342 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
189 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 343 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
190 main_rssi_avg, alt_rssi_avg, 344 main_rssi_avg, alt_rssi_avg,
@@ -193,7 +347,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
193 else 347 else
194 antcomb->second_ratio = false; 348 antcomb->second_ratio = false;
195 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 349 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
196 if (ath_is_alt_ant_ratio_better(alt_ratio, 350 if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
197 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 351 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
198 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 352 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
199 main_rssi_avg, alt_rssi_avg, 353 main_rssi_avg, alt_rssi_avg,
@@ -202,105 +356,18 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
202 else 356 else
203 antcomb->second_ratio = false; 357 antcomb->second_ratio = false;
204 } else { 358 } else {
205 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 359 if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
206 (alt_rssi_avg > main_rssi_avg + 360 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
207 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 361 0,
208 (alt_rssi_avg > main_rssi_avg)) && 362 main_rssi_avg, alt_rssi_avg,
209 (antcomb->total_pkt_count > 50)) 363 antcomb->total_pkt_count))
210 antcomb->second_ratio = true; 364 antcomb->second_ratio = true;
211 else 365 else
212 antcomb->second_ratio = false; 366 antcomb->second_ratio = false;
213 } 367 }
214 368
215 /* set alt to the conf with maximun ratio */ 369 ath_ant_set_alt_ratio(antcomb, div_ant_conf);
216 if (antcomb->first_ratio && antcomb->second_ratio) { 370
217 if (antcomb->rssi_second > antcomb->rssi_third) {
218 /* first alt*/
219 if ((antcomb->first_quick_scan_conf ==
220 ATH_ANT_DIV_COMB_LNA1) ||
221 (antcomb->first_quick_scan_conf ==
222 ATH_ANT_DIV_COMB_LNA2))
223 /* Set alt LNA1 or LNA2*/
224 if (div_ant_conf->main_lna_conf ==
225 ATH_ANT_DIV_COMB_LNA2)
226 div_ant_conf->alt_lna_conf =
227 ATH_ANT_DIV_COMB_LNA1;
228 else
229 div_ant_conf->alt_lna_conf =
230 ATH_ANT_DIV_COMB_LNA2;
231 else
232 /* Set alt to A+B or A-B */
233 div_ant_conf->alt_lna_conf =
234 antcomb->first_quick_scan_conf;
235 } else if ((antcomb->second_quick_scan_conf ==
236 ATH_ANT_DIV_COMB_LNA1) ||
237 (antcomb->second_quick_scan_conf ==
238 ATH_ANT_DIV_COMB_LNA2)) {
239 /* Set alt LNA1 or LNA2 */
240 if (div_ant_conf->main_lna_conf ==
241 ATH_ANT_DIV_COMB_LNA2)
242 div_ant_conf->alt_lna_conf =
243 ATH_ANT_DIV_COMB_LNA1;
244 else
245 div_ant_conf->alt_lna_conf =
246 ATH_ANT_DIV_COMB_LNA2;
247 } else {
248 /* Set alt to A+B or A-B */
249 div_ant_conf->alt_lna_conf =
250 antcomb->second_quick_scan_conf;
251 }
252 } else if (antcomb->first_ratio) {
253 /* first alt */
254 if ((antcomb->first_quick_scan_conf ==
255 ATH_ANT_DIV_COMB_LNA1) ||
256 (antcomb->first_quick_scan_conf ==
257 ATH_ANT_DIV_COMB_LNA2))
258 /* Set alt LNA1 or LNA2 */
259 if (div_ant_conf->main_lna_conf ==
260 ATH_ANT_DIV_COMB_LNA2)
261 div_ant_conf->alt_lna_conf =
262 ATH_ANT_DIV_COMB_LNA1;
263 else
264 div_ant_conf->alt_lna_conf =
265 ATH_ANT_DIV_COMB_LNA2;
266 else
267 /* Set alt to A+B or A-B */
268 div_ant_conf->alt_lna_conf =
269 antcomb->first_quick_scan_conf;
270 } else if (antcomb->second_ratio) {
271 /* second alt */
272 if ((antcomb->second_quick_scan_conf ==
273 ATH_ANT_DIV_COMB_LNA1) ||
274 (antcomb->second_quick_scan_conf ==
275 ATH_ANT_DIV_COMB_LNA2))
276 /* Set alt LNA1 or LNA2 */
277 if (div_ant_conf->main_lna_conf ==
278 ATH_ANT_DIV_COMB_LNA2)
279 div_ant_conf->alt_lna_conf =
280 ATH_ANT_DIV_COMB_LNA1;
281 else
282 div_ant_conf->alt_lna_conf =
283 ATH_ANT_DIV_COMB_LNA2;
284 else
285 /* Set alt to A+B or A-B */
286 div_ant_conf->alt_lna_conf =
287 antcomb->second_quick_scan_conf;
288 } else {
289 /* main is largest */
290 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
291 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
292 /* Set alt LNA1 or LNA2 */
293 if (div_ant_conf->main_lna_conf ==
294 ATH_ANT_DIV_COMB_LNA2)
295 div_ant_conf->alt_lna_conf =
296 ATH_ANT_DIV_COMB_LNA1;
297 else
298 div_ant_conf->alt_lna_conf =
299 ATH_ANT_DIV_COMB_LNA2;
300 else
301 /* Set alt to A+B or A-B */
302 div_ant_conf->alt_lna_conf = antcomb->main_conf;
303 }
304 break; 371 break;
305 default: 372 default:
306 break; 373 break;
@@ -430,8 +497,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
430 ant_conf->fast_div_bias = 0x1; 497 ant_conf->fast_div_bias = 0x1;
431 break; 498 break;
432 case 0x10: /* LNA2 A-B */ 499 case 0x10: /* LNA2 A-B */
433 if (!(antcomb->scan) && 500 if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
434 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
435 ant_conf->fast_div_bias = 0x1; 501 ant_conf->fast_div_bias = 0x1;
436 else 502 else
437 ant_conf->fast_div_bias = 0x2; 503 ant_conf->fast_div_bias = 0x2;
@@ -440,15 +506,13 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
440 ant_conf->fast_div_bias = 0x1; 506 ant_conf->fast_div_bias = 0x1;
441 break; 507 break;
442 case 0x13: /* LNA2 A+B */ 508 case 0x13: /* LNA2 A+B */
443 if (!(antcomb->scan) && 509 if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
444 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
445 ant_conf->fast_div_bias = 0x1; 510 ant_conf->fast_div_bias = 0x1;
446 else 511 else
447 ant_conf->fast_div_bias = 0x2; 512 ant_conf->fast_div_bias = 0x2;
448 break; 513 break;
449 case 0x20: /* LNA1 A-B */ 514 case 0x20: /* LNA1 A-B */
450 if (!(antcomb->scan) && 515 if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
451 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
452 ant_conf->fast_div_bias = 0x1; 516 ant_conf->fast_div_bias = 0x1;
453 else 517 else
454 ant_conf->fast_div_bias = 0x2; 518 ant_conf->fast_div_bias = 0x2;
@@ -457,8 +521,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
457 ant_conf->fast_div_bias = 0x1; 521 ant_conf->fast_div_bias = 0x1;
458 break; 522 break;
459 case 0x23: /* LNA1 A+B */ 523 case 0x23: /* LNA1 A+B */
460 if (!(antcomb->scan) && 524 if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
461 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
462 ant_conf->fast_div_bias = 0x1; 525 ant_conf->fast_div_bias = 0x1;
463 else 526 else
464 ant_conf->fast_div_bias = 0x2; 527 ant_conf->fast_div_bias = 0x2;
@@ -475,6 +538,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
475 default: 538 default:
476 break; 539 break;
477 } 540 }
541
542 if (antcomb->fast_div_bias)
543 ant_conf->fast_div_bias = antcomb->fast_div_bias;
478 } else if (ant_conf->div_group == 3) { 544 } else if (ant_conf->div_group == 3) {
479 switch ((ant_conf->main_lna_conf << 4) | 545 switch ((ant_conf->main_lna_conf << 4) |
480 ant_conf->alt_lna_conf) { 546 ant_conf->alt_lna_conf) {
@@ -540,6 +606,138 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
540 } 606 }
541} 607}
542 608
609static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
610 struct ath_hw_antcomb_conf *conf,
611 int curr_alt_set, int alt_rssi_avg,
612 int main_rssi_avg)
613{
614 switch (curr_alt_set) {
615 case ATH_ANT_DIV_COMB_LNA2:
616 antcomb->rssi_lna2 = alt_rssi_avg;
617 antcomb->rssi_lna1 = main_rssi_avg;
618 antcomb->scan = true;
619 /* set to A+B */
620 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
621 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
622 break;
623 case ATH_ANT_DIV_COMB_LNA1:
624 antcomb->rssi_lna1 = alt_rssi_avg;
625 antcomb->rssi_lna2 = main_rssi_avg;
626 antcomb->scan = true;
627 /* set to A+B */
628 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
629 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
630 break;
631 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
632 antcomb->rssi_add = alt_rssi_avg;
633 antcomb->scan = true;
634 /* set to A-B */
635 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
636 break;
637 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
638 antcomb->rssi_sub = alt_rssi_avg;
639 antcomb->scan = false;
640 if (antcomb->rssi_lna2 >
641 (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
642 /* use LNA2 as main LNA */
643 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
644 (antcomb->rssi_add > antcomb->rssi_sub)) {
645 /* set to A+B */
646 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
647 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
648 } else if (antcomb->rssi_sub >
649 antcomb->rssi_lna1) {
650 /* set to A-B */
651 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
652 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
653 } else {
654 /* set to LNA1 */
655 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
656 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
657 }
658 } else {
659 /* use LNA1 as main LNA */
660 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
661 (antcomb->rssi_add > antcomb->rssi_sub)) {
662 /* set to A+B */
663 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
664 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
665 } else if (antcomb->rssi_sub >
666 antcomb->rssi_lna1) {
667 /* set to A-B */
668 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
669 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
670 } else {
671 /* set to LNA2 */
672 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
673 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
674 }
675 }
676 break;
677 default:
678 break;
679 }
680}
681
682static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf,
683 struct ath_ant_comb *antcomb,
684 int alt_ratio, int alt_rssi_avg,
685 int main_rssi_avg, int curr_main_set,
686 int curr_alt_set)
687{
688 bool ret = false;
689
690 if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio,
691 alt_rssi_avg, main_rssi_avg)) {
692 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
693 /*
694 * Switch main and alt LNA.
695 */
696 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
697 div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
698 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
699 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
700 div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
701 }
702
703 ret = true;
704 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
705 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
706 /*
707 Set alt to another LNA.
708 */
709 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
710 div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
711 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
712 div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
713
714 ret = true;
715 }
716
717 return ret;
718}
719
720static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb)
721{
722 int alt_ratio;
723
724 if (!antcomb->scan || !antcomb->alt_good)
725 return false;
726
727 if (time_after(jiffies, antcomb->scan_start_time +
728 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
729 return true;
730
731 if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
732 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
733 antcomb->total_pkt_count);
734 if (alt_ratio < antcomb->ant_ratio)
735 return true;
736 }
737
738 return false;
739}
740
543void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 741void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
544{ 742{
545 struct ath_hw_antcomb_conf div_ant_conf; 743 struct ath_hw_antcomb_conf div_ant_conf;
@@ -549,41 +747,46 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
549 int main_rssi = rs->rs_rssi_ctl0; 747 int main_rssi = rs->rs_rssi_ctl0;
550 int alt_rssi = rs->rs_rssi_ctl1; 748 int alt_rssi = rs->rs_rssi_ctl1;
551 int rx_ant_conf, main_ant_conf; 749 int rx_ant_conf, main_ant_conf;
552 bool short_scan = false; 750 bool short_scan = false, ret;
553 751
554 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 752 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
555 ATH_ANT_RX_MASK; 753 ATH_ANT_RX_MASK;
556 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 754 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
557 ATH_ANT_RX_MASK; 755 ATH_ANT_RX_MASK;
558 756
757 if (alt_rssi >= antcomb->low_rssi_thresh) {
758 antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO;
759 antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2;
760 } else {
761 antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI;
762 antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI;
763 }
764
559 /* Record packet only when both main_rssi and alt_rssi is positive */ 765 /* Record packet only when both main_rssi and alt_rssi is positive */
560 if (main_rssi > 0 && alt_rssi > 0) { 766 if (main_rssi > 0 && alt_rssi > 0) {
561 antcomb->total_pkt_count++; 767 antcomb->total_pkt_count++;
562 antcomb->main_total_rssi += main_rssi; 768 antcomb->main_total_rssi += main_rssi;
563 antcomb->alt_total_rssi += alt_rssi; 769 antcomb->alt_total_rssi += alt_rssi;
770
564 if (main_ant_conf == rx_ant_conf) 771 if (main_ant_conf == rx_ant_conf)
565 antcomb->main_recv_cnt++; 772 antcomb->main_recv_cnt++;
566 else 773 else
567 antcomb->alt_recv_cnt++; 774 antcomb->alt_recv_cnt++;
568 } 775 }
569 776
570 /* Short scan check */ 777 if (main_ant_conf == rx_ant_conf) {
571 if (antcomb->scan && antcomb->alt_good) { 778 ANT_STAT_INC(ANT_MAIN, recv_cnt);
572 if (time_after(jiffies, antcomb->scan_start_time + 779 ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
573 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 780 } else {
574 short_scan = true; 781 ANT_STAT_INC(ANT_ALT, recv_cnt);
575 else 782 ANT_LNA_INC(ANT_ALT, rx_ant_conf);
576 if (antcomb->total_pkt_count ==
577 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
578 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
579 antcomb->total_pkt_count);
580 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
581 short_scan = true;
582 }
583 } 783 }
584 784
785 /* Short scan check */
786 short_scan = ath_ant_short_scan_check(antcomb);
787
585 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 788 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
586 rs->rs_moreaggr) && !short_scan) 789 rs->rs_moreaggr) && !short_scan)
587 return; 790 return;
588 791
589 if (antcomb->total_pkt_count) { 792 if (antcomb->total_pkt_count) {
@@ -595,15 +798,13 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
595 antcomb->total_pkt_count); 798 antcomb->total_pkt_count);
596 } 799 }
597 800
598
599 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 801 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
600 curr_alt_set = div_ant_conf.alt_lna_conf; 802 curr_alt_set = div_ant_conf.alt_lna_conf;
601 curr_main_set = div_ant_conf.main_lna_conf; 803 curr_main_set = div_ant_conf.main_lna_conf;
602
603 antcomb->count++; 804 antcomb->count++;
604 805
605 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 806 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
606 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 807 if (alt_ratio > antcomb->ant_ratio) {
607 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 808 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
608 main_rssi_avg); 809 main_rssi_avg);
609 antcomb->alt_good = true; 810 antcomb->alt_good = true;
@@ -617,153 +818,47 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
617 } 818 }
618 819
619 if (!antcomb->scan) { 820 if (!antcomb->scan) {
620 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, 821 ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio,
621 alt_ratio, curr_main_set, curr_alt_set, 822 alt_rssi_avg, main_rssi_avg,
622 alt_rssi_avg, main_rssi_avg)) { 823 curr_main_set, curr_alt_set);
623 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 824 if (ret)
624 /* Switch main and alt LNA */
625 div_ant_conf.main_lna_conf =
626 ATH_ANT_DIV_COMB_LNA2;
627 div_ant_conf.alt_lna_conf =
628 ATH_ANT_DIV_COMB_LNA1;
629 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
630 div_ant_conf.main_lna_conf =
631 ATH_ANT_DIV_COMB_LNA1;
632 div_ant_conf.alt_lna_conf =
633 ATH_ANT_DIV_COMB_LNA2;
634 }
635
636 goto div_comb_done;
637 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
638 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
639 /* Set alt to another LNA */
640 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
641 div_ant_conf.alt_lna_conf =
642 ATH_ANT_DIV_COMB_LNA1;
643 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
644 div_ant_conf.alt_lna_conf =
645 ATH_ANT_DIV_COMB_LNA2;
646
647 goto div_comb_done;
648 }
649
650 if ((alt_rssi_avg < (main_rssi_avg +
651 div_ant_conf.lna1_lna2_delta)))
652 goto div_comb_done; 825 goto div_comb_done;
653 } 826 }
654 827
828 if (!antcomb->scan &&
829 (alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta)))
830 goto div_comb_done;
831
655 if (!antcomb->scan_not_start) { 832 if (!antcomb->scan_not_start) {
656 switch (curr_alt_set) { 833 ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set,
657 case ATH_ANT_DIV_COMB_LNA2: 834 alt_rssi_avg, main_rssi_avg);
658 antcomb->rssi_lna2 = alt_rssi_avg;
659 antcomb->rssi_lna1 = main_rssi_avg;
660 antcomb->scan = true;
661 /* set to A+B */
662 div_ant_conf.main_lna_conf =
663 ATH_ANT_DIV_COMB_LNA1;
664 div_ant_conf.alt_lna_conf =
665 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
666 break;
667 case ATH_ANT_DIV_COMB_LNA1:
668 antcomb->rssi_lna1 = alt_rssi_avg;
669 antcomb->rssi_lna2 = main_rssi_avg;
670 antcomb->scan = true;
671 /* set to A+B */
672 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
673 div_ant_conf.alt_lna_conf =
674 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
675 break;
676 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
677 antcomb->rssi_add = alt_rssi_avg;
678 antcomb->scan = true;
679 /* set to A-B */
680 div_ant_conf.alt_lna_conf =
681 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
682 break;
683 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
684 antcomb->rssi_sub = alt_rssi_avg;
685 antcomb->scan = false;
686 if (antcomb->rssi_lna2 >
687 (antcomb->rssi_lna1 +
688 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
689 /* use LNA2 as main LNA */
690 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
691 (antcomb->rssi_add > antcomb->rssi_sub)) {
692 /* set to A+B */
693 div_ant_conf.main_lna_conf =
694 ATH_ANT_DIV_COMB_LNA2;
695 div_ant_conf.alt_lna_conf =
696 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
697 } else if (antcomb->rssi_sub >
698 antcomb->rssi_lna1) {
699 /* set to A-B */
700 div_ant_conf.main_lna_conf =
701 ATH_ANT_DIV_COMB_LNA2;
702 div_ant_conf.alt_lna_conf =
703 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
704 } else {
705 /* set to LNA1 */
706 div_ant_conf.main_lna_conf =
707 ATH_ANT_DIV_COMB_LNA2;
708 div_ant_conf.alt_lna_conf =
709 ATH_ANT_DIV_COMB_LNA1;
710 }
711 } else {
712 /* use LNA1 as main LNA */
713 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
714 (antcomb->rssi_add > antcomb->rssi_sub)) {
715 /* set to A+B */
716 div_ant_conf.main_lna_conf =
717 ATH_ANT_DIV_COMB_LNA1;
718 div_ant_conf.alt_lna_conf =
719 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
720 } else if (antcomb->rssi_sub >
721 antcomb->rssi_lna1) {
722 /* set to A-B */
723 div_ant_conf.main_lna_conf =
724 ATH_ANT_DIV_COMB_LNA1;
725 div_ant_conf.alt_lna_conf =
726 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
727 } else {
728 /* set to LNA2 */
729 div_ant_conf.main_lna_conf =
730 ATH_ANT_DIV_COMB_LNA1;
731 div_ant_conf.alt_lna_conf =
732 ATH_ANT_DIV_COMB_LNA2;
733 }
734 }
735 break;
736 default:
737 break;
738 }
739 } else { 835 } else {
740 if (!antcomb->alt_good) { 836 if (!antcomb->alt_good) {
741 antcomb->scan_not_start = false; 837 antcomb->scan_not_start = false;
742 /* Set alt to another LNA */ 838 /* Set alt to another LNA */
743 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 839 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
744 div_ant_conf.main_lna_conf = 840 div_ant_conf.main_lna_conf =
745 ATH_ANT_DIV_COMB_LNA2; 841 ATH_ANT_DIV_COMB_LNA2;
746 div_ant_conf.alt_lna_conf = 842 div_ant_conf.alt_lna_conf =
747 ATH_ANT_DIV_COMB_LNA1; 843 ATH_ANT_DIV_COMB_LNA1;
748 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 844 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
749 div_ant_conf.main_lna_conf = 845 div_ant_conf.main_lna_conf =
750 ATH_ANT_DIV_COMB_LNA1; 846 ATH_ANT_DIV_COMB_LNA1;
751 div_ant_conf.alt_lna_conf = 847 div_ant_conf.alt_lna_conf =
752 ATH_ANT_DIV_COMB_LNA2; 848 ATH_ANT_DIV_COMB_LNA2;
753 } 849 }
754 goto div_comb_done; 850 goto div_comb_done;
755 } 851 }
852 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
853 main_rssi_avg, alt_rssi_avg,
854 alt_ratio);
855 antcomb->quick_scan_cnt++;
756 } 856 }
757 857
758 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
759 main_rssi_avg, alt_rssi_avg,
760 alt_ratio);
761
762 antcomb->quick_scan_cnt++;
763
764div_comb_done: 858div_comb_done:
765 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); 859 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
766 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 860 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
861 ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg);
767 862
768 antcomb->scan_start_time = jiffies; 863 antcomb->scan_start_time = jiffies;
769 antcomb->total_pkt_count = 0; 864 antcomb->total_pkt_count = 0;
@@ -772,26 +867,3 @@ div_comb_done:
772 antcomb->main_recv_cnt = 0; 867 antcomb->main_recv_cnt = 0;
773 antcomb->alt_recv_cnt = 0; 868 antcomb->alt_recv_cnt = 0;
774} 869}
775
776void ath_ant_comb_update(struct ath_softc *sc)
777{
778 struct ath_hw *ah = sc->sc_ah;
779 struct ath_common *common = ath9k_hw_common(ah);
780 struct ath_hw_antcomb_conf div_ant_conf;
781 u8 lna_conf;
782
783 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
784
785 if (sc->ant_rx == 1)
786 lna_conf = ATH_ANT_DIV_COMB_LNA1;
787 else
788 lna_conf = ATH_ANT_DIV_COMB_LNA2;
789
790 div_ant_conf.main_lna_conf = lna_conf;
791 div_ant_conf.alt_lna_conf = lna_conf;
792
793 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
794
795 if (common->antenna_diversity)
796 ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
797}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 1576d58291d4..08656473c63e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1160,8 +1160,6 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1160 */ 1160 */
1161 WARN_ON(1); 1161 WARN_ON(1);
1162 break; 1162 break;
1163 case ATH9K_ANI_PRESENT:
1164 break;
1165 default: 1163 default:
1166 ath_dbg(common, ANI, "invalid cmd %u\n", cmd); 1164 ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
1167 return false; 1165 return false;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 8dc2d089cdef..fb61b081d172 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -269,13 +269,12 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
269 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) 269 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
270 val |= AR_WA_D3_L1_DISABLE; 270 val |= AR_WA_D3_L1_DISABLE;
271 } else { 271 } else {
272 if (((AR_SREV_9285(ah) || 272 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
273 AR_SREV_9271(ah) || 273 if (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
274 AR_SREV_9287(ah)) && 274 val |= AR_WA_D3_L1_DISABLE;
275 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || 275 } else if (AR_SREV_9280(ah)) {
276 (AR_SREV_9280(ah) && 276 if (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
277 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { 277 val |= AR_WA_D3_L1_DISABLE;
278 val |= AR_WA_D3_L1_DISABLE;
279 } 278 }
280 } 279 }
281 280
@@ -297,24 +296,18 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
297 } else { 296 } else {
298 if (ah->config.pcie_waen) { 297 if (ah->config.pcie_waen) {
299 val = ah->config.pcie_waen; 298 val = ah->config.pcie_waen;
300 if (!power_off) 299 val &= (~AR_WA_D3_L1_DISABLE);
301 val &= (~AR_WA_D3_L1_DISABLE);
302 } else { 300 } else {
303 if (AR_SREV_9285(ah) || 301 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
304 AR_SREV_9271(ah) ||
305 AR_SREV_9287(ah)) {
306 val = AR9285_WA_DEFAULT; 302 val = AR9285_WA_DEFAULT;
307 if (!power_off) 303 val &= (~AR_WA_D3_L1_DISABLE);
308 val &= (~AR_WA_D3_L1_DISABLE); 304 } else if (AR_SREV_9280(ah)) {
309 }
310 else if (AR_SREV_9280(ah)) {
311 /* 305 /*
312 * For AR9280 chips, bit 22 of 0x4004 306 * For AR9280 chips, bit 22 of 0x4004
313 * needs to be set. 307 * needs to be set.
314 */ 308 */
315 val = AR9280_WA_DEFAULT; 309 val = AR9280_WA_DEFAULT;
316 if (!power_off) 310 val &= (~AR_WA_D3_L1_DISABLE);
317 val &= (~AR_WA_D3_L1_DISABLE);
318 } else { 311 } else {
319 val = AR_WA_DEFAULT; 312 val = AR_WA_DEFAULT;
320 } 313 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index f4003512d8d5..1fc1fa955d44 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -555,6 +555,69 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
555 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); 555 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
556} 556}
557 557
558#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
559
560static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
561{
562 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
563 u8 antdiv_ctrl1, antdiv_ctrl2;
564 u32 regval;
565
566 if (enable) {
567 antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE;
568 antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE;
569
570 /*
571 * Don't disable BT ant to allow BB to control SWCOM.
572 */
573 btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT));
574 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
575
576 REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM);
577 REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
578 } else {
579 /*
580 * Disable antenna diversity, use LNA1 only.
581 */
582 antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A;
583 antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A;
584
585 /*
586 * Disable BT Ant. to allow concurrent BT and WLAN receive.
587 */
588 btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT;
589 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
590
591 /*
592 * Program SWCOM table to make sure RF switch always parks
593 * at BT side.
594 */
595 REG_WRITE(ah, AR_PHY_SWITCH_COM, 0);
596 REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
597 }
598
599 regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
600 regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
601 /*
602 * Clear ant_fast_div_bias [14:9] since for WB195,
603 * the main LNA is always LNA1.
604 */
605 regval &= (~(AR_PHY_9285_FAST_DIV_BIAS));
606 regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL);
607 regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF);
608 regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
609 regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB);
610 regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
611 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
612
613 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
614 regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
615 regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
616 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
617}
618
619#endif
620
558static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, 621static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
559 struct ath_spec_scan *param) 622 struct ath_spec_scan *param)
560{ 623{
@@ -634,5 +697,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
634 ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger; 697 ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
635 ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait; 698 ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
636 699
700#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
701 ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
702#endif
703
637 ar9002_hw_set_nf_limits(ah); 704 ar9002_hw_set_nf_limits(ah);
638} 705}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index f9eb2c357169..6314ae2e93e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -317,13 +317,15 @@
317#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29 317#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
318#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000 318#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
319#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30 319#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
320#define AR_PHY_9285_ANT_DIV_LNA1 2
321#define AR_PHY_9285_ANT_DIV_LNA2 1
322#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
323#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
324#define AR_PHY_9285_ANT_DIV_GAINTB_0 0 320#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
325#define AR_PHY_9285_ANT_DIV_GAINTB_1 1 321#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
326 322
323#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE 0x0b
324#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE 0x09
325#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04
326#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09
327#define ATH_BT_COEX_ANT_DIV_SWITCH_COM 0x66666666
328
327#define AR_PHY_EXT_CCA0 0x99b8 329#define AR_PHY_EXT_CCA0 0x99b8
328#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF 330#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
329#define AR_PHY_EXT_CCA0_THRESH62_S 0 331#define AR_PHY_EXT_CCA0_THRESH62_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d105e43d22e1..f4864807e15b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3541,13 +3541,12 @@ static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
3541 return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt); 3541 return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
3542} 3542}
3543 3543
3544 3544u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
3545static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
3546{ 3545{
3547 return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon); 3546 return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
3548} 3547}
3549 3548
3550static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz) 3549u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
3551{ 3550{
3552 return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2); 3551 return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
3553} 3552}
@@ -3561,6 +3560,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
3561 3560
3562static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) 3561static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3563{ 3562{
3563 struct ath_common *common = ath9k_hw_common(ah);
3564 struct ath9k_hw_capabilities *pCap = &ah->caps; 3564 struct ath9k_hw_capabilities *pCap = &ah->caps;
3565 int chain; 3565 int chain;
3566 u32 regval, value, gpio; 3566 u32 regval, value, gpio;
@@ -3614,6 +3614,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3614 } 3614 }
3615 3615
3616 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); 3616 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
3617 if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
3618 value &= ~AR_SWITCH_TABLE_COM2_ALL;
3619 value |= ah->config.ant_ctrl_comm2g_switch_enable;
3620
3621 }
3617 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); 3622 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
3618 3623
3619 if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) { 3624 if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
@@ -3645,8 +3650,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3645 regval &= (~AR_PHY_ANT_DIV_LNADIV); 3650 regval &= (~AR_PHY_ANT_DIV_LNADIV);
3646 regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; 3651 regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
3647 3652
3653 if (AR_SREV_9485(ah) && common->bt_ant_diversity)
3654 regval |= AR_ANT_DIV_ENABLE;
3655
3648 if (AR_SREV_9565(ah)) { 3656 if (AR_SREV_9565(ah)) {
3649 if (ah->shared_chain_lnadiv) { 3657 if (common->bt_ant_diversity) {
3650 regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S); 3658 regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
3651 } else { 3659 } else {
3652 regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S); 3660 regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
@@ -3656,10 +3664,14 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3656 3664
3657 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 3665 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3658 3666
3659 /*enable fast_div */ 3667 /* enable fast_div */
3660 regval = REG_READ(ah, AR_PHY_CCK_DETECT); 3668 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
3661 regval &= (~AR_FAST_DIV_ENABLE); 3669 regval &= (~AR_FAST_DIV_ENABLE);
3662 regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; 3670 regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
3671
3672 if (AR_SREV_9485(ah) && common->bt_ant_diversity)
3673 regval |= AR_FAST_DIV_ENABLE;
3674
3663 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); 3675 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
3664 3676
3665 if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 3677 if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
@@ -3673,9 +3685,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3673 AR_PHY_ANT_DIV_ALT_GAINTB | 3685 AR_PHY_ANT_DIV_ALT_GAINTB |
3674 AR_PHY_ANT_DIV_MAIN_GAINTB)); 3686 AR_PHY_ANT_DIV_MAIN_GAINTB));
3675 /* by default use LNA1 for the main antenna */ 3687 /* by default use LNA1 for the main antenna */
3676 regval |= (AR_PHY_ANT_DIV_LNA1 << 3688 regval |= (ATH_ANT_DIV_COMB_LNA1 <<
3677 AR_PHY_ANT_DIV_MAIN_LNACONF_S); 3689 AR_PHY_ANT_DIV_MAIN_LNACONF_S);
3678 regval |= (AR_PHY_ANT_DIV_LNA2 << 3690 regval |= (ATH_ANT_DIV_COMB_LNA2 <<
3679 AR_PHY_ANT_DIV_ALT_LNACONF_S); 3691 AR_PHY_ANT_DIV_ALT_LNACONF_S);
3680 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 3692 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3681 } 3693 }
@@ -3813,6 +3825,11 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3813 else 3825 else
3814 value = ar9003_hw_atten_chain_get_margin(ah, i, chan); 3826 value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
3815 3827
3828 if (ah->config.alt_mingainidx)
3829 REG_RMW_FIELD(ah, AR_PHY_EXT_ATTEN_CTL_0,
3830 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
3831 value);
3832
3816 REG_RMW_FIELD(ah, ext_atten_reg[i], 3833 REG_RMW_FIELD(ah, ext_atten_reg[i],
3817 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, 3834 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
3818 value); 3835 value);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 874f6570bd1c..75d4fb41962f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,6 +334,8 @@ struct ar9300_eeprom {
334 334
335s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah); 335s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
336s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah); 336s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
337u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz);
338u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
337 339
338u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz); 340u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
339 341
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index d402cb32283f..608bb4824e2a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -153,7 +153,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
153 if (!ah->is_clk_25mhz) 153 if (!ah->is_clk_25mhz)
154 INIT_INI_ARRAY(&ah->iniAdditional, 154 INIT_INI_ARRAY(&ah->iniAdditional,
155 ar9340_1p0_radio_core_40M); 155 ar9340_1p0_radio_core_40M);
156 } else if (AR_SREV_9485_11(ah)) { 156 } else if (AR_SREV_9485_11_OR_LATER(ah)) {
157 /* mac */ 157 /* mac */
158 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 158 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
159 ar9485_1_1_mac_core); 159 ar9485_1_1_mac_core);
@@ -424,7 +424,7 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
424 else if (AR_SREV_9340(ah)) 424 else if (AR_SREV_9340(ah))
425 INIT_INI_ARRAY(&ah->iniModesTxGain, 425 INIT_INI_ARRAY(&ah->iniModesTxGain,
426 ar9340Modes_lowest_ob_db_tx_gain_table_1p0); 426 ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
427 else if (AR_SREV_9485_11(ah)) 427 else if (AR_SREV_9485_11_OR_LATER(ah))
428 INIT_INI_ARRAY(&ah->iniModesTxGain, 428 INIT_INI_ARRAY(&ah->iniModesTxGain,
429 ar9485_modes_lowest_ob_db_tx_gain_1_1); 429 ar9485_modes_lowest_ob_db_tx_gain_1_1);
430 else if (AR_SREV_9550(ah)) 430 else if (AR_SREV_9550(ah))
@@ -458,7 +458,7 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
458 else if (AR_SREV_9340(ah)) 458 else if (AR_SREV_9340(ah))
459 INIT_INI_ARRAY(&ah->iniModesTxGain, 459 INIT_INI_ARRAY(&ah->iniModesTxGain,
460 ar9340Modes_high_ob_db_tx_gain_table_1p0); 460 ar9340Modes_high_ob_db_tx_gain_table_1p0);
461 else if (AR_SREV_9485_11(ah)) 461 else if (AR_SREV_9485_11_OR_LATER(ah))
462 INIT_INI_ARRAY(&ah->iniModesTxGain, 462 INIT_INI_ARRAY(&ah->iniModesTxGain,
463 ar9485Modes_high_ob_db_tx_gain_1_1); 463 ar9485Modes_high_ob_db_tx_gain_1_1);
464 else if (AR_SREV_9580(ah)) 464 else if (AR_SREV_9580(ah))
@@ -492,7 +492,7 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
492 else if (AR_SREV_9340(ah)) 492 else if (AR_SREV_9340(ah))
493 INIT_INI_ARRAY(&ah->iniModesTxGain, 493 INIT_INI_ARRAY(&ah->iniModesTxGain,
494 ar9340Modes_low_ob_db_tx_gain_table_1p0); 494 ar9340Modes_low_ob_db_tx_gain_table_1p0);
495 else if (AR_SREV_9485_11(ah)) 495 else if (AR_SREV_9485_11_OR_LATER(ah))
496 INIT_INI_ARRAY(&ah->iniModesTxGain, 496 INIT_INI_ARRAY(&ah->iniModesTxGain,
497 ar9485Modes_low_ob_db_tx_gain_1_1); 497 ar9485Modes_low_ob_db_tx_gain_1_1);
498 else if (AR_SREV_9580(ah)) 498 else if (AR_SREV_9580(ah))
@@ -517,7 +517,7 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
517 else if (AR_SREV_9340(ah)) 517 else if (AR_SREV_9340(ah))
518 INIT_INI_ARRAY(&ah->iniModesTxGain, 518 INIT_INI_ARRAY(&ah->iniModesTxGain,
519 ar9340Modes_high_power_tx_gain_table_1p0); 519 ar9340Modes_high_power_tx_gain_table_1p0);
520 else if (AR_SREV_9485_11(ah)) 520 else if (AR_SREV_9485_11_OR_LATER(ah))
521 INIT_INI_ARRAY(&ah->iniModesTxGain, 521 INIT_INI_ARRAY(&ah->iniModesTxGain,
522 ar9485Modes_high_power_tx_gain_1_1); 522 ar9485Modes_high_power_tx_gain_1_1);
523 else if (AR_SREV_9580(ah)) 523 else if (AR_SREV_9580(ah))
@@ -552,7 +552,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
552 552
553static void ar9003_tx_gain_table_mode5(struct ath_hw *ah) 553static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
554{ 554{
555 if (AR_SREV_9485_11(ah)) 555 if (AR_SREV_9485_11_OR_LATER(ah))
556 INIT_INI_ARRAY(&ah->iniModesTxGain, 556 INIT_INI_ARRAY(&ah->iniModesTxGain,
557 ar9485Modes_green_ob_db_tx_gain_1_1); 557 ar9485Modes_green_ob_db_tx_gain_1_1);
558 else if (AR_SREV_9340(ah)) 558 else if (AR_SREV_9340(ah))
@@ -571,7 +571,7 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
571 if (AR_SREV_9340(ah)) 571 if (AR_SREV_9340(ah))
572 INIT_INI_ARRAY(&ah->iniModesTxGain, 572 INIT_INI_ARRAY(&ah->iniModesTxGain,
573 ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0); 573 ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
574 else if (AR_SREV_9485_11(ah)) 574 else if (AR_SREV_9485_11_OR_LATER(ah))
575 INIT_INI_ARRAY(&ah->iniModesTxGain, 575 INIT_INI_ARRAY(&ah->iniModesTxGain,
576 ar9485Modes_green_spur_ob_db_tx_gain_1_1); 576 ar9485Modes_green_spur_ob_db_tx_gain_1_1);
577 else if (AR_SREV_9580(ah)) 577 else if (AR_SREV_9580(ah))
@@ -611,7 +611,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
611 else if (AR_SREV_9340(ah)) 611 else if (AR_SREV_9340(ah))
612 INIT_INI_ARRAY(&ah->iniModesRxGain, 612 INIT_INI_ARRAY(&ah->iniModesRxGain,
613 ar9340Common_rx_gain_table_1p0); 613 ar9340Common_rx_gain_table_1p0);
614 else if (AR_SREV_9485_11(ah)) 614 else if (AR_SREV_9485_11_OR_LATER(ah))
615 INIT_INI_ARRAY(&ah->iniModesRxGain, 615 INIT_INI_ARRAY(&ah->iniModesRxGain,
616 ar9485_common_rx_gain_1_1); 616 ar9485_common_rx_gain_1_1);
617 else if (AR_SREV_9550(ah)) { 617 else if (AR_SREV_9550(ah)) {
@@ -644,7 +644,7 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
644 else if (AR_SREV_9340(ah)) 644 else if (AR_SREV_9340(ah))
645 INIT_INI_ARRAY(&ah->iniModesRxGain, 645 INIT_INI_ARRAY(&ah->iniModesRxGain,
646 ar9340Common_wo_xlna_rx_gain_table_1p0); 646 ar9340Common_wo_xlna_rx_gain_table_1p0);
647 else if (AR_SREV_9485_11(ah)) 647 else if (AR_SREV_9485_11_OR_LATER(ah))
648 INIT_INI_ARRAY(&ah->iniModesRxGain, 648 INIT_INI_ARRAY(&ah->iniModesRxGain,
649 ar9485Common_wo_xlna_rx_gain_1_1); 649 ar9485Common_wo_xlna_rx_gain_1_1);
650 else if (AR_SREV_9462_21(ah)) 650 else if (AR_SREV_9462_21(ah))
@@ -745,16 +745,25 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
745static void ar9003_hw_configpcipowersave(struct ath_hw *ah, 745static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
746 bool power_off) 746 bool power_off)
747{ 747{
748 /*
749 * Increase L1 Entry Latency. Some WB222 boards don't have
750 * this change in eeprom/OTP.
751 *
752 */
753 if (AR_SREV_9462(ah)) {
754 u32 val = ah->config.aspm_l1_fix;
755 if ((val & 0xff000000) == 0x17000000) {
756 val &= 0x00ffffff;
757 val |= 0x27000000;
758 REG_WRITE(ah, 0x570c, val);
759 }
760 }
761
748 /* Nothing to do on restore for 11N */ 762 /* Nothing to do on restore for 11N */
749 if (!power_off /* !restore */) { 763 if (!power_off /* !restore */) {
750 /* set bit 19 to allow forcing of pcie core into L1 state */ 764 /* set bit 19 to allow forcing of pcie core into L1 state */
751 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); 765 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
752 766 REG_WRITE(ah, AR_WA, ah->WARegVal);
753 /* Several PCIe massages to ensure proper behaviour */
754 if (ah->config.pcie_waen)
755 REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
756 else
757 REG_WRITE(ah, AR_WA, ah->WARegVal);
758 } 767 }
759 768
760 /* 769 /*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5163abd3937c..f6c5c1b50471 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -491,6 +491,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
491 rxs->rs_rate = MS(rxsp->status1, AR_RxRate); 491 rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
492 rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0; 492 rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
493 493
494 rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
494 rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; 495 rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
495 rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; 496 rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
496 rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); 497 rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1f694ab3cc78..e897648d3233 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -632,6 +632,22 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
632 632
633 REG_SET_BIT(ah, AR_PHY_CCK_DETECT, 633 REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
634 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); 634 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
635
636 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
637 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
638 AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
639
640 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
641 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
642 ah->enabled_cals |= TX_IQ_CAL;
643 else
644 ah->enabled_cals &= ~TX_IQ_CAL;
645
646 if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
647 ah->enabled_cals |= TX_CL_CAL;
648 else
649 ah->enabled_cals &= ~TX_CL_CAL;
650 }
635} 651}
636 652
637static void ar9003_hw_prog_ini(struct ath_hw *ah, 653static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -814,29 +830,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
814 if (chan->channel == 2484) 830 if (chan->channel == 2484)
815 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); 831 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
816 832
817 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
818 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
819 AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
820
821 ah->modes_index = modesIndex; 833 ah->modes_index = modesIndex;
822 ar9003_hw_override_ini(ah); 834 ar9003_hw_override_ini(ah);
823 ar9003_hw_set_channel_regs(ah, chan); 835 ar9003_hw_set_channel_regs(ah, chan);
824 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 836 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
825 ath9k_hw_apply_txpower(ah, chan, false); 837 ath9k_hw_apply_txpower(ah, chan, false);
826 838
827 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
828 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
829 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
830 ah->enabled_cals |= TX_IQ_CAL;
831 else
832 ah->enabled_cals &= ~TX_IQ_CAL;
833
834 if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
835 ah->enabled_cals |= TX_CL_CAL;
836 else
837 ah->enabled_cals &= ~TX_CL_CAL;
838 }
839
840 return 0; 839 return 0;
841} 840}
842 841
@@ -1173,6 +1172,10 @@ skip_ws_det:
1173 * is_on == 0 means MRC CCK is OFF (more noise imm) 1172 * is_on == 0 means MRC CCK is OFF (more noise imm)
1174 */ 1173 */
1175 bool is_on = param ? 1 : 0; 1174 bool is_on = param ? 1 : 0;
1175
1176 if (ah->caps.rx_chainmask == 1)
1177 break;
1178
1176 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, 1179 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
1177 AR_PHY_MRC_CCK_ENABLE, is_on); 1180 AR_PHY_MRC_CCK_ENABLE, is_on);
1178 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, 1181 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
@@ -1190,8 +1193,6 @@ skip_ws_det:
1190 } 1193 }
1191 break; 1194 break;
1192 } 1195 }
1193 case ATH9K_ANI_PRESENT:
1194 break;
1195 default: 1196 default:
1196 ath_dbg(common, ANI, "invalid cmd %u\n", cmd); 1197 ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
1197 return false; 1198 return false;
@@ -1413,65 +1414,111 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
1413 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 1414 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1414} 1415}
1415 1416
1416static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah, 1417#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1417 bool enable) 1418
1419static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
1418{ 1420{
1421 struct ath9k_hw_capabilities *pCap = &ah->caps;
1419 u8 ant_div_ctl1; 1422 u8 ant_div_ctl1;
1420 u32 regval; 1423 u32 regval;
1421 1424
1422 if (!AR_SREV_9565(ah)) 1425 if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah))
1423 return; 1426 return;
1424 1427
1425 ah->shared_chain_lnadiv = enable; 1428 if (AR_SREV_9485(ah)) {
1429 regval = ar9003_hw_ant_ctrl_common_2_get(ah,
1430 IS_CHAN_2GHZ(ah->curchan));
1431 if (enable) {
1432 regval &= ~AR_SWITCH_TABLE_COM2_ALL;
1433 regval |= ah->config.ant_ctrl_comm2g_switch_enable;
1434 }
1435 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2,
1436 AR_SWITCH_TABLE_COM2_ALL, regval);
1437 }
1438
1426 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 1439 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
1427 1440
1441 /*
1442 * Set MAIN/ALT LNA conf.
1443 * Set MAIN/ALT gain_tb.
1444 */
1428 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 1445 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1429 regval &= (~AR_ANT_DIV_CTRL_ALL); 1446 regval &= (~AR_ANT_DIV_CTRL_ALL);
1430 regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; 1447 regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
1431 regval &= ~AR_PHY_ANT_DIV_LNADIV;
1432 regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
1433
1434 if (enable)
1435 regval |= AR_ANT_DIV_ENABLE;
1436
1437 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 1448 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1438 1449
1439 regval = REG_READ(ah, AR_PHY_CCK_DETECT); 1450 if (AR_SREV_9485_11_OR_LATER(ah)) {
1440 regval &= ~AR_FAST_DIV_ENABLE; 1451 /*
1441 regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; 1452 * Enable LNA diversity.
1442 1453 */
1443 if (enable)
1444 regval |= AR_FAST_DIV_ENABLE;
1445
1446 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
1447
1448 if (enable) {
1449 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1450 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1451 if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
1452 REG_SET_BIT(ah, AR_PHY_RESTART,
1453 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
1454 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
1455 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1456 } else {
1457 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
1458 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1459 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1460 REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
1461 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
1462 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1463
1464 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 1454 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1465 regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF | 1455 regval &= ~AR_PHY_ANT_DIV_LNADIV;
1466 AR_PHY_ANT_DIV_ALT_LNACONF | 1456 regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
1467 AR_PHY_ANT_DIV_MAIN_GAINTB | 1457 if (enable)
1468 AR_PHY_ANT_DIV_ALT_GAINTB); 1458 regval |= AR_ANT_DIV_ENABLE;
1469 regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S); 1459
1470 regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
1471 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 1460 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1461
1462 /*
1463 * Enable fast antenna diversity.
1464 */
1465 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
1466 regval &= ~AR_FAST_DIV_ENABLE;
1467 regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
1468 if (enable)
1469 regval |= AR_FAST_DIV_ENABLE;
1470
1471 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
1472
1473 if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1474 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1475 regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
1476 AR_PHY_ANT_DIV_ALT_LNACONF |
1477 AR_PHY_ANT_DIV_ALT_GAINTB |
1478 AR_PHY_ANT_DIV_MAIN_GAINTB));
1479 /*
1480 * Set MAIN to LNA1 and ALT to LNA2 at the
1481 * beginning.
1482 */
1483 regval |= (ATH_ANT_DIV_COMB_LNA1 <<
1484 AR_PHY_ANT_DIV_MAIN_LNACONF_S);
1485 regval |= (ATH_ANT_DIV_COMB_LNA2 <<
1486 AR_PHY_ANT_DIV_ALT_LNACONF_S);
1487 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1488 }
1489 } else if (AR_SREV_9565(ah)) {
1490 if (enable) {
1491 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1492 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1493 if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
1494 REG_SET_BIT(ah, AR_PHY_RESTART,
1495 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
1496 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
1497 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1498 } else {
1499 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
1500 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1501 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1502 REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
1503 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
1504 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1505
1506 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1507 regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
1508 AR_PHY_ANT_DIV_ALT_LNACONF |
1509 AR_PHY_ANT_DIV_MAIN_GAINTB |
1510 AR_PHY_ANT_DIV_ALT_GAINTB);
1511 regval |= (ATH_ANT_DIV_COMB_LNA1 <<
1512 AR_PHY_ANT_DIV_MAIN_LNACONF_S);
1513 regval |= (ATH_ANT_DIV_COMB_LNA2 <<
1514 AR_PHY_ANT_DIV_ALT_LNACONF_S);
1515 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1516 }
1472 } 1517 }
1473} 1518}
1474 1519
1520#endif
1521
1475static int ar9003_hw_fast_chan_change(struct ath_hw *ah, 1522static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1476 struct ath9k_channel *chan, 1523 struct ath9k_channel *chan,
1477 u8 *ini_reloaded) 1524 u8 *ini_reloaded)
@@ -1518,6 +1565,18 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1518 1565
1519 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); 1566 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1520 1567
1568 if (AR_SREV_9462_20_OR_LATER(ah)) {
1569 /*
1570 * CUS217 mix LNA mode.
1571 */
1572 if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
1573 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
1574 1, regWrites);
1575 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
1576 modesIndex, regWrites);
1577 }
1578 }
1579
1521 /* 1580 /*
1522 * For 5GHz channels requiring Fast Clock, apply 1581 * For 5GHz channels requiring Fast Clock, apply
1523 * different modal values. 1582 * different modal values.
@@ -1528,7 +1587,11 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1528 if (AR_SREV_9565(ah)) 1587 if (AR_SREV_9565(ah))
1529 REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites); 1588 REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
1530 1589
1531 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 1590 /*
1591 * JAPAN regulatory.
1592 */
1593 if (chan->channel == 2484)
1594 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
1532 1595
1533 ah->modes_index = modesIndex; 1596 ah->modes_index = modesIndex;
1534 *ini_reloaded = true; 1597 *ini_reloaded = true;
@@ -1631,11 +1694,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1631 1694
1632 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; 1695 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
1633 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; 1696 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
1634 ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
1635 ops->spectral_scan_config = ar9003_hw_spectral_scan_config; 1697 ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
1636 ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger; 1698 ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
1637 ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait; 1699 ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
1638 1700
1701#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1702 ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
1703#endif
1704
1639 ar9003_hw_set_nf_limits(ah); 1705 ar9003_hw_set_nf_limits(ah);
1640 ar9003_hw_set_radar_conf(ah); 1706 ar9003_hw_set_radar_conf(ah);
1641 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); 1707 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index d4d39f305a0b..6fd752321e36 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -148,6 +148,8 @@
148#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28 148#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
149#define AR_PHY_EXT_CCA_THRESH62 0x007F0000 149#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
150#define AR_PHY_EXT_CCA_THRESH62_S 16 150#define AR_PHY_EXT_CCA_THRESH62_S 16
151#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX 0x0000FF00
152#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX_S 8
151#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000 153#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
152#define AR_PHY_EXT_MINCCA_PWR_S 16 154#define AR_PHY_EXT_MINCCA_PWR_S 16
153#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L 155#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
@@ -296,11 +298,6 @@
296#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000 298#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
297#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30 299#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
298 300
299#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
300#define AR_PHY_ANT_DIV_LNA2 0x1
301#define AR_PHY_ANT_DIV_LNA1 0x2
302#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
303
304#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c) 301#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
305#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30) 302#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
306#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34) 303#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c1224b5a257b..2ee35f677c0e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -72,17 +72,12 @@ struct ath_config {
72/*************************/ 72/*************************/
73 73
74#define ATH_TXBUF_RESET(_bf) do { \ 74#define ATH_TXBUF_RESET(_bf) do { \
75 (_bf)->bf_stale = false; \
76 (_bf)->bf_lastbf = NULL; \ 75 (_bf)->bf_lastbf = NULL; \
77 (_bf)->bf_next = NULL; \ 76 (_bf)->bf_next = NULL; \
78 memset(&((_bf)->bf_state), 0, \ 77 memset(&((_bf)->bf_state), 0, \
79 sizeof(struct ath_buf_state)); \ 78 sizeof(struct ath_buf_state)); \
80 } while (0) 79 } while (0)
81 80
82#define ATH_RXBUF_RESET(_bf) do { \
83 (_bf)->bf_stale = false; \
84 } while (0)
85
86/** 81/**
87 * enum buffer_type - Buffer type flags 82 * enum buffer_type - Buffer type flags
88 * 83 *
@@ -137,7 +132,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
137#define ATH_AGGR_ENCRYPTDELIM 10 132#define ATH_AGGR_ENCRYPTDELIM 10
138/* minimum h/w qdepth to be sustained to maximize aggregation */ 133/* minimum h/w qdepth to be sustained to maximize aggregation */
139#define ATH_AGGR_MIN_QDEPTH 2 134#define ATH_AGGR_MIN_QDEPTH 2
140#define ATH_AMPDU_SUBFRAME_DEFAULT 32 135/* minimum h/w qdepth for non-aggregated traffic */
136#define ATH_NON_AGGR_MIN_QDEPTH 8
141 137
142#define IEEE80211_SEQ_SEQ_SHIFT 4 138#define IEEE80211_SEQ_SEQ_SHIFT 4
143#define IEEE80211_SEQ_MAX 4096 139#define IEEE80211_SEQ_MAX 4096
@@ -174,12 +170,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
174 170
175#define ATH_TX_COMPLETE_POLL_INT 1000 171#define ATH_TX_COMPLETE_POLL_INT 1000
176 172
177enum ATH_AGGR_STATUS {
178 ATH_AGGR_DONE,
179 ATH_AGGR_BAW_CLOSED,
180 ATH_AGGR_LIMITED,
181};
182
183#define ATH_TXFIFO_DEPTH 8 173#define ATH_TXFIFO_DEPTH 8
184struct ath_txq { 174struct ath_txq {
185 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */ 175 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
@@ -201,10 +191,10 @@ struct ath_txq {
201 191
202struct ath_atx_ac { 192struct ath_atx_ac {
203 struct ath_txq *txq; 193 struct ath_txq *txq;
204 int sched;
205 struct list_head list; 194 struct list_head list;
206 struct list_head tid_q; 195 struct list_head tid_q;
207 bool clear_ps_filter; 196 bool clear_ps_filter;
197 bool sched;
208}; 198};
209 199
210struct ath_frame_info { 200struct ath_frame_info {
@@ -212,14 +202,16 @@ struct ath_frame_info {
212 int framelen; 202 int framelen;
213 enum ath9k_key_type keytype; 203 enum ath9k_key_type keytype;
214 u8 keyix; 204 u8 keyix;
215 u8 retries;
216 u8 rtscts_rate; 205 u8 rtscts_rate;
206 u8 retries : 7;
207 u8 baw_tracked : 1;
217}; 208};
218 209
219struct ath_buf_state { 210struct ath_buf_state {
220 u8 bf_type; 211 u8 bf_type;
221 u8 bfs_paprd; 212 u8 bfs_paprd;
222 u8 ndelim; 213 u8 ndelim;
214 bool stale;
223 u16 seqno; 215 u16 seqno;
224 unsigned long bfs_paprd_timestamp; 216 unsigned long bfs_paprd_timestamp;
225}; 217};
@@ -233,7 +225,6 @@ struct ath_buf {
233 void *bf_desc; /* virtual addr of desc */ 225 void *bf_desc; /* virtual addr of desc */
234 dma_addr_t bf_daddr; /* physical addr of desc */ 226 dma_addr_t bf_daddr; /* physical addr of desc */
235 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */ 227 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
236 bool bf_stale;
237 struct ieee80211_tx_rate rates[4]; 228 struct ieee80211_tx_rate rates[4];
238 struct ath_buf_state bf_state; 229 struct ath_buf_state bf_state;
239}; 230};
@@ -241,16 +232,18 @@ struct ath_buf {
241struct ath_atx_tid { 232struct ath_atx_tid {
242 struct list_head list; 233 struct list_head list;
243 struct sk_buff_head buf_q; 234 struct sk_buff_head buf_q;
235 struct sk_buff_head retry_q;
244 struct ath_node *an; 236 struct ath_node *an;
245 struct ath_atx_ac *ac; 237 struct ath_atx_ac *ac;
246 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; 238 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
247 int bar_index;
248 u16 seq_start; 239 u16 seq_start;
249 u16 seq_next; 240 u16 seq_next;
250 u16 baw_size; 241 u16 baw_size;
251 int tidno; 242 u8 tidno;
252 int baw_head; /* first un-acked tx buffer */ 243 int baw_head; /* first un-acked tx buffer */
253 int baw_tail; /* next unused tx buffer slot */ 244 int baw_tail; /* next unused tx buffer slot */
245
246 s8 bar_index;
254 bool sched; 247 bool sched;
255 bool paused; 248 bool paused;
256 bool active; 249 bool active;
@@ -262,16 +255,13 @@ struct ath_node {
262 struct ieee80211_vif *vif; /* interface with which we're associated */ 255 struct ieee80211_vif *vif; /* interface with which we're associated */
263 struct ath_atx_tid tid[IEEE80211_NUM_TIDS]; 256 struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
264 struct ath_atx_ac ac[IEEE80211_NUM_ACS]; 257 struct ath_atx_ac ac[IEEE80211_NUM_ACS];
265 int ps_key;
266 258
267 u16 maxampdu; 259 u16 maxampdu;
268 u8 mpdudensity; 260 u8 mpdudensity;
261 s8 ps_key;
269 262
270 bool sleeping; 263 bool sleeping;
271 264 bool no_ps_filter;
272#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
273 struct dentry *node_stat;
274#endif
275}; 265};
276 266
277struct ath_tx_control { 267struct ath_tx_control {
@@ -317,6 +307,7 @@ struct ath_rx {
317 struct ath_descdma rxdma; 307 struct ath_descdma rxdma;
318 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; 308 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
319 309
310 struct ath_buf *buf_hold;
320 struct sk_buff *frag; 311 struct sk_buff *frag;
321 312
322 u32 ampdu_ref; 313 u32 ampdu_ref;
@@ -367,6 +358,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
367/********/ 358/********/
368 359
369struct ath_vif { 360struct ath_vif {
361 struct ath_node mcast_node;
370 int av_bslot; 362 int av_bslot;
371 bool primary_sta_vif; 363 bool primary_sta_vif;
372 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ 364 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
@@ -428,6 +420,7 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
428void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif); 420void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
429void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif); 421void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
430void ath9k_set_beacon(struct ath_softc *sc); 422void ath9k_set_beacon(struct ath_softc *sc);
423bool ath9k_csa_is_finished(struct ath_softc *sc);
431 424
432/*******************/ 425/*******************/
433/* Link Monitoring */ 426/* Link Monitoring */
@@ -585,19 +578,14 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
585#define ATH_ANT_DIV_COMB_MAX_COUNT 100 578#define ATH_ANT_DIV_COMB_MAX_COUNT 100
586#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30 579#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
587#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20 580#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
581#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
582#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
588 583
589#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1 584#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
590#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4 585#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
591#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2 586#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
592#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2 587#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
593 588
594enum ath9k_ant_div_comb_lna_conf {
595 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
596 ATH_ANT_DIV_COMB_LNA2,
597 ATH_ANT_DIV_COMB_LNA1,
598 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
599};
600
601struct ath_ant_comb { 589struct ath_ant_comb {
602 u16 count; 590 u16 count;
603 u16 total_pkt_count; 591 u16 total_pkt_count;
@@ -614,27 +602,36 @@ struct ath_ant_comb {
614 int rssi_first; 602 int rssi_first;
615 int rssi_second; 603 int rssi_second;
616 int rssi_third; 604 int rssi_third;
605 int ant_ratio;
606 int ant_ratio2;
617 bool alt_good; 607 bool alt_good;
618 int quick_scan_cnt; 608 int quick_scan_cnt;
619 int main_conf; 609 enum ath9k_ant_div_comb_lna_conf main_conf;
620 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf; 610 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
621 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf; 611 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
622 bool first_ratio; 612 bool first_ratio;
623 bool second_ratio; 613 bool second_ratio;
624 unsigned long scan_start_time; 614 unsigned long scan_start_time;
615
616 /*
617 * Card-specific config values.
618 */
619 int low_rssi_thresh;
620 int fast_div_bias;
625}; 621};
626 622
627void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); 623void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
628void ath_ant_comb_update(struct ath_softc *sc);
629 624
630/********************/ 625/********************/
631/* Main driver core */ 626/* Main driver core */
632/********************/ 627/********************/
633 628
634#define ATH9K_PCI_CUS198 0x0001 629#define ATH9K_PCI_CUS198 0x0001
635#define ATH9K_PCI_CUS230 0x0002 630#define ATH9K_PCI_CUS230 0x0002
636#define ATH9K_PCI_CUS217 0x0004 631#define ATH9K_PCI_CUS217 0x0004
637#define ATH9K_PCI_WOW 0x0008 632#define ATH9K_PCI_WOW 0x0008
633#define ATH9K_PCI_BT_ANT_DIV 0x0010
634#define ATH9K_PCI_D3_L1_WAR 0x0020
638 635
639/* 636/*
640 * Default cache line size, in bytes. 637 * Default cache line size, in bytes.
@@ -761,6 +758,7 @@ struct ath_softc {
761#endif 758#endif
762 759
763 struct ath_descdma txsdma; 760 struct ath_descdma txsdma;
761 struct ieee80211_vif *csa_vif;
764 762
765 struct ath_ant_comb ant_comb; 763 struct ath_ant_comb ant_comb;
766 u8 ant_tx, ant_rx; 764 u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1a17732bb089..b5c16b3a37b9 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -291,6 +291,23 @@ void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
291 (unsigned long long)tsfadjust, avp->av_bslot); 291 (unsigned long long)tsfadjust, avp->av_bslot);
292} 292}
293 293
294bool ath9k_csa_is_finished(struct ath_softc *sc)
295{
296 struct ieee80211_vif *vif;
297
298 vif = sc->csa_vif;
299 if (!vif || !vif->csa_active)
300 return false;
301
302 if (!ieee80211_csa_is_complete(vif))
303 return false;
304
305 ieee80211_csa_finish(vif);
306
307 sc->csa_vif = NULL;
308 return true;
309}
310
294void ath9k_beacon_tasklet(unsigned long data) 311void ath9k_beacon_tasklet(unsigned long data)
295{ 312{
296 struct ath_softc *sc = (struct ath_softc *)data; 313 struct ath_softc *sc = (struct ath_softc *)data;
@@ -336,6 +353,10 @@ void ath9k_beacon_tasklet(unsigned long data)
336 return; 353 return;
337 } 354 }
338 355
356 /* EDMA devices check that in the tx completion function. */
357 if (!edma && ath9k_csa_is_finished(sc))
358 return;
359
339 slot = ath9k_beacon_choose_slot(sc); 360 slot = ath9k_beacon_choose_slot(sc);
340 vif = sc->beacon.bslot[slot]; 361 vif = sc->beacon.bslot[slot];
341 362
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 344fdde1d7a3..d3063c21e16c 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,37 +49,40 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
49} 49}
50EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype); 50EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
51 51
52static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan, 52static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
53 enum nl80211_channel_type channel_type)
54{ 53{
55 u32 chanmode = 0; 54 u32 chanmode = 0;
56 55
57 switch (chan->band) { 56 switch (chandef->chan->band) {
58 case IEEE80211_BAND_2GHZ: 57 case IEEE80211_BAND_2GHZ:
59 switch (channel_type) { 58 switch (chandef->width) {
60 case NL80211_CHAN_NO_HT: 59 case NL80211_CHAN_WIDTH_20_NOHT:
61 case NL80211_CHAN_HT20: 60 case NL80211_CHAN_WIDTH_20:
62 chanmode = CHANNEL_G_HT20; 61 chanmode = CHANNEL_G_HT20;
63 break; 62 break;
64 case NL80211_CHAN_HT40PLUS: 63 case NL80211_CHAN_WIDTH_40:
65 chanmode = CHANNEL_G_HT40PLUS; 64 if (chandef->center_freq1 > chandef->chan->center_freq)
65 chanmode = CHANNEL_G_HT40PLUS;
66 else
67 chanmode = CHANNEL_G_HT40MINUS;
66 break; 68 break;
67 case NL80211_CHAN_HT40MINUS: 69 default:
68 chanmode = CHANNEL_G_HT40MINUS;
69 break; 70 break;
70 } 71 }
71 break; 72 break;
72 case IEEE80211_BAND_5GHZ: 73 case IEEE80211_BAND_5GHZ:
73 switch (channel_type) { 74 switch (chandef->width) {
74 case NL80211_CHAN_NO_HT: 75 case NL80211_CHAN_WIDTH_20_NOHT:
75 case NL80211_CHAN_HT20: 76 case NL80211_CHAN_WIDTH_20:
76 chanmode = CHANNEL_A_HT20; 77 chanmode = CHANNEL_A_HT20;
77 break; 78 break;
78 case NL80211_CHAN_HT40PLUS: 79 case NL80211_CHAN_WIDTH_40:
79 chanmode = CHANNEL_A_HT40PLUS; 80 if (chandef->center_freq1 > chandef->chan->center_freq)
81 chanmode = CHANNEL_A_HT40PLUS;
82 else
83 chanmode = CHANNEL_A_HT40MINUS;
80 break; 84 break;
81 case NL80211_CHAN_HT40MINUS: 85 default:
82 chanmode = CHANNEL_A_HT40MINUS;
83 break; 86 break;
84 } 87 }
85 break; 88 break;
@@ -94,13 +97,12 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
94 * Update internal channel flags. 97 * Update internal channel flags.
95 */ 98 */
96void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, 99void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
97 struct ieee80211_channel *chan, 100 struct cfg80211_chan_def *chandef)
98 enum nl80211_channel_type channel_type)
99{ 101{
100 ichan->channel = chan->center_freq; 102 ichan->channel = chandef->chan->center_freq;
101 ichan->chan = chan; 103 ichan->chan = chandef->chan;
102 104
103 if (chan->band == IEEE80211_BAND_2GHZ) { 105 if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
104 ichan->chanmode = CHANNEL_G; 106 ichan->chanmode = CHANNEL_G;
105 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM; 107 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
106 } else { 108 } else {
@@ -108,8 +110,22 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
108 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 110 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
109 } 111 }
110 112
111 if (channel_type != NL80211_CHAN_NO_HT) 113 switch (chandef->width) {
112 ichan->chanmode = ath9k_get_extchanmode(chan, channel_type); 114 case NL80211_CHAN_WIDTH_5:
115 ichan->channelFlags |= CHANNEL_QUARTER;
116 break;
117 case NL80211_CHAN_WIDTH_10:
118 ichan->channelFlags |= CHANNEL_HALF;
119 break;
120 case NL80211_CHAN_WIDTH_20_NOHT:
121 break;
122 case NL80211_CHAN_WIDTH_20:
123 case NL80211_CHAN_WIDTH_40:
124 ichan->chanmode = ath9k_get_extchanmode(chandef);
125 break;
126 default:
127 WARN_ON(1);
128 }
113} 129}
114EXPORT_SYMBOL(ath9k_cmn_update_ichannel); 130EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
115 131
@@ -125,8 +141,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
125 141
126 chan_idx = curchan->hw_value; 142 chan_idx = curchan->hw_value;
127 channel = &ah->channels[chan_idx]; 143 channel = &ah->channels[chan_idx];
128 ath9k_cmn_update_ichannel(channel, curchan, 144 ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
129 cfg80211_get_chandef_type(&hw->conf.chandef));
130 145
131 return channel; 146 return channel;
132} 147}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 207d06995b15..e039bcbfbd79 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -44,8 +44,7 @@
44 44
45int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 45int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
46void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, 46void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
47 struct ieee80211_channel *chan, 47 struct cfg80211_chan_def *chandef);
48 enum nl80211_channel_type channel_type);
49struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 48struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
50 struct ath_hw *ah); 49 struct ath_hw *ah);
51int ath9k_cmn_count_streams(unsigned int chainmask, int max); 50int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 87454f6c7b4f..c088744a6bfb 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -88,90 +88,6 @@ static const struct file_operations fops_debug = {
88 88
89#define DMA_BUF_LEN 1024 89#define DMA_BUF_LEN 1024
90 90
91static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
92 size_t count, loff_t *ppos)
93{
94 struct ath_softc *sc = file->private_data;
95 struct ath_hw *ah = sc->sc_ah;
96 char buf[32];
97 unsigned int len;
98
99 len = sprintf(buf, "0x%08x\n", ah->txchainmask);
100 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
101}
102
103static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
104 size_t count, loff_t *ppos)
105{
106 struct ath_softc *sc = file->private_data;
107 struct ath_hw *ah = sc->sc_ah;
108 unsigned long mask;
109 char buf[32];
110 ssize_t len;
111
112 len = min(count, sizeof(buf) - 1);
113 if (copy_from_user(buf, user_buf, len))
114 return -EFAULT;
115
116 buf[len] = '\0';
117 if (kstrtoul(buf, 0, &mask))
118 return -EINVAL;
119
120 ah->txchainmask = mask;
121 ah->caps.tx_chainmask = mask;
122 return count;
123}
124
125static const struct file_operations fops_tx_chainmask = {
126 .read = read_file_tx_chainmask,
127 .write = write_file_tx_chainmask,
128 .open = simple_open,
129 .owner = THIS_MODULE,
130 .llseek = default_llseek,
131};
132
133
134static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct ath_softc *sc = file->private_data;
138 struct ath_hw *ah = sc->sc_ah;
139 char buf[32];
140 unsigned int len;
141
142 len = sprintf(buf, "0x%08x\n", ah->rxchainmask);
143 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
144}
145
146static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct ath_softc *sc = file->private_data;
150 struct ath_hw *ah = sc->sc_ah;
151 unsigned long mask;
152 char buf[32];
153 ssize_t len;
154
155 len = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, len))
157 return -EFAULT;
158
159 buf[len] = '\0';
160 if (kstrtoul(buf, 0, &mask))
161 return -EINVAL;
162
163 ah->rxchainmask = mask;
164 ah->caps.rx_chainmask = mask;
165 return count;
166}
167
168static const struct file_operations fops_rx_chainmask = {
169 .read = read_file_rx_chainmask,
170 .write = write_file_rx_chainmask,
171 .open = simple_open,
172 .owner = THIS_MODULE,
173 .llseek = default_llseek,
174};
175 91
176static ssize_t read_file_ani(struct file *file, char __user *user_buf, 92static ssize_t read_file_ani(struct file *file, char __user *user_buf,
177 size_t count, loff_t *ppos) 93 size_t count, loff_t *ppos)
@@ -270,25 +186,29 @@ static const struct file_operations fops_ani = {
270 .llseek = default_llseek, 186 .llseek = default_llseek,
271}; 187};
272 188
273static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf, 189#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
274 size_t count, loff_t *ppos) 190
191static ssize_t read_file_bt_ant_diversity(struct file *file,
192 char __user *user_buf,
193 size_t count, loff_t *ppos)
275{ 194{
276 struct ath_softc *sc = file->private_data; 195 struct ath_softc *sc = file->private_data;
277 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 196 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
278 char buf[32]; 197 char buf[32];
279 unsigned int len; 198 unsigned int len;
280 199
281 len = sprintf(buf, "%d\n", common->antenna_diversity); 200 len = sprintf(buf, "%d\n", common->bt_ant_diversity);
282 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 201 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
283} 202}
284 203
285static ssize_t write_file_ant_diversity(struct file *file, 204static ssize_t write_file_bt_ant_diversity(struct file *file,
286 const char __user *user_buf, 205 const char __user *user_buf,
287 size_t count, loff_t *ppos) 206 size_t count, loff_t *ppos)
288{ 207{
289 struct ath_softc *sc = file->private_data; 208 struct ath_softc *sc = file->private_data;
290 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 209 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
291 unsigned long antenna_diversity; 210 struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
211 unsigned long bt_ant_diversity;
292 char buf[32]; 212 char buf[32];
293 ssize_t len; 213 ssize_t len;
294 214
@@ -296,26 +216,147 @@ static ssize_t write_file_ant_diversity(struct file *file,
296 if (copy_from_user(buf, user_buf, len)) 216 if (copy_from_user(buf, user_buf, len))
297 return -EFAULT; 217 return -EFAULT;
298 218
299 if (!AR_SREV_9565(sc->sc_ah)) 219 if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
300 goto exit; 220 goto exit;
301 221
302 buf[len] = '\0'; 222 buf[len] = '\0';
303 if (kstrtoul(buf, 0, &antenna_diversity)) 223 if (kstrtoul(buf, 0, &bt_ant_diversity))
304 return -EINVAL; 224 return -EINVAL;
305 225
306 common->antenna_diversity = !!antenna_diversity; 226 common->bt_ant_diversity = !!bt_ant_diversity;
307 ath9k_ps_wakeup(sc); 227 ath9k_ps_wakeup(sc);
308 ath_ant_comb_update(sc); 228 ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
309 ath_dbg(common, CONFIG, "Antenna diversity: %d\n", 229 ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
310 common->antenna_diversity); 230 common->bt_ant_diversity);
311 ath9k_ps_restore(sc); 231 ath9k_ps_restore(sc);
312exit: 232exit:
313 return count; 233 return count;
314} 234}
315 235
316static const struct file_operations fops_ant_diversity = { 236static const struct file_operations fops_bt_ant_diversity = {
317 .read = read_file_ant_diversity, 237 .read = read_file_bt_ant_diversity,
318 .write = write_file_ant_diversity, 238 .write = write_file_bt_ant_diversity,
239 .open = simple_open,
240 .owner = THIS_MODULE,
241 .llseek = default_llseek,
242};
243
244#endif
245
246void ath9k_debug_stat_ant(struct ath_softc *sc,
247 struct ath_hw_antcomb_conf *div_ant_conf,
248 int main_rssi_avg, int alt_rssi_avg)
249{
250 struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
251 struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
252
253 as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
254 as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
255
256 as_main->rssi_avg = main_rssi_avg;
257 as_alt->rssi_avg = alt_rssi_avg;
258}
259
260static ssize_t read_file_antenna_diversity(struct file *file,
261 char __user *user_buf,
262 size_t count, loff_t *ppos)
263{
264 struct ath_softc *sc = file->private_data;
265 struct ath_hw *ah = sc->sc_ah;
266 struct ath9k_hw_capabilities *pCap = &ah->caps;
267 struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
268 struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
269 struct ath_hw_antcomb_conf div_ant_conf;
270 unsigned int len = 0, size = 1024;
271 ssize_t retval = 0;
272 char *buf;
273 char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
274 "LNA2",
275 "LNA1",
276 "LNA1_PLUS_LNA2"};
277
278 buf = kzalloc(size, GFP_KERNEL);
279 if (buf == NULL)
280 return -ENOMEM;
281
282 if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
283 len += snprintf(buf + len, size - len, "%s\n",
284 "Antenna Diversity Combining is disabled");
285 goto exit;
286 }
287
288 ath9k_ps_wakeup(sc);
289 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
290 len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
291 lna_conf_str[div_ant_conf.main_lna_conf]);
292 len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
293 lna_conf_str[div_ant_conf.alt_lna_conf]);
294 len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
295 as_main->rssi_avg);
296 len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
297 as_alt->rssi_avg);
298 ath9k_ps_restore(sc);
299
300 len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
301 len += snprintf(buf + len, size - len, "-------------------\n");
302
303 len += snprintf(buf + len, size - len, "%30s%15s\n",
304 "MAIN", "ALT");
305 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
306 "TOTAL COUNT",
307 as_main->recv_cnt,
308 as_alt->recv_cnt);
309 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
310 "LNA1",
311 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
312 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
313 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
314 "LNA2",
315 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
316 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
317 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
318 "LNA1 + LNA2",
319 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
320 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
321 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
322 "LNA1 - LNA2",
323 as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
324 as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
325
326 len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
327 len += snprintf(buf + len, size - len, "--------------------\n");
328
329 len += snprintf(buf + len, size - len, "%30s%15s\n",
330 "MAIN", "ALT");
331 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
332 "LNA1",
333 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
334 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
335 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
336 "LNA2",
337 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
338 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
339 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
340 "LNA1 + LNA2",
341 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
342 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
343 len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
344 "LNA1 - LNA2",
345 as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
346 as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
347
348exit:
349 if (len > size)
350 len = size;
351
352 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
353 kfree(buf);
354
355 return retval;
356}
357
358static const struct file_operations fops_antenna_diversity = {
359 .read = read_file_antenna_diversity,
319 .open = simple_open, 360 .open = simple_open,
320 .owner = THIS_MODULE, 361 .owner = THIS_MODULE,
321 .llseek = default_llseek, 362 .llseek = default_llseek,
@@ -607,6 +648,28 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
607 return retval; 648 return retval;
608} 649}
609 650
651static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
652 char *buf, ssize_t size)
653{
654 ssize_t len = 0;
655
656 ath_txq_lock(sc, txq);
657
658 len += snprintf(buf + len, size - len, "%s: %d ",
659 "qnum", txq->axq_qnum);
660 len += snprintf(buf + len, size - len, "%s: %2d ",
661 "qdepth", txq->axq_depth);
662 len += snprintf(buf + len, size - len, "%s: %2d ",
663 "ampdu-depth", txq->axq_ampdu_depth);
664 len += snprintf(buf + len, size - len, "%s: %3d ",
665 "pending", txq->pending_frames);
666 len += snprintf(buf + len, size - len, "%s: %d\n",
667 "stopped", txq->stopped);
668
669 ath_txq_unlock(sc, txq);
670 return len;
671}
672
610static ssize_t read_file_queues(struct file *file, char __user *user_buf, 673static ssize_t read_file_queues(struct file *file, char __user *user_buf,
611 size_t count, loff_t *ppos) 674 size_t count, loff_t *ppos)
612{ 675{
@@ -624,24 +687,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
624 687
625 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 688 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
626 txq = sc->tx.txq_map[i]; 689 txq = sc->tx.txq_map[i];
627 len += snprintf(buf + len, size - len, "(%s): ", qname[i]); 690 len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
628 691 len += print_queue(sc, txq, buf + len, size - len);
629 ath_txq_lock(sc, txq);
630
631 len += snprintf(buf + len, size - len, "%s: %d ",
632 "qnum", txq->axq_qnum);
633 len += snprintf(buf + len, size - len, "%s: %2d ",
634 "qdepth", txq->axq_depth);
635 len += snprintf(buf + len, size - len, "%s: %2d ",
636 "ampdu-depth", txq->axq_ampdu_depth);
637 len += snprintf(buf + len, size - len, "%s: %3d ",
638 "pending", txq->pending_frames);
639 len += snprintf(buf + len, size - len, "%s: %d\n",
640 "stopped", txq->stopped);
641
642 ath_txq_unlock(sc, txq);
643 } 692 }
644 693
694 len += snprintf(buf + len, size - len, "(CAB): ");
695 len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
696
645 if (len > size) 697 if (len > size)
646 len = size; 698 len = size;
647 699
@@ -1589,17 +1641,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
1589 struct dentry *dir) 1641 struct dentry *dir)
1590{ 1642{
1591 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1643 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1592 an->node_stat = debugfs_create_file("node_stat", S_IRUGO, 1644 debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
1593 dir, an, &fops_node_stat);
1594}
1595
1596void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
1597 struct ieee80211_vif *vif,
1598 struct ieee80211_sta *sta,
1599 struct dentry *dir)
1600{
1601 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1602 debugfs_remove(an->node_stat);
1603} 1645}
1604 1646
1605/* Ethtool support for get-stats */ 1647/* Ethtool support for get-stats */
@@ -1770,10 +1812,10 @@ int ath9k_init_debug(struct ath_hw *ah)
1770 &fops_reset); 1812 &fops_reset);
1771 debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, 1813 debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
1772 &fops_recv); 1814 &fops_recv);
1773 debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR, 1815 debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
1774 sc->debug.debugfs_phy, sc, &fops_rx_chainmask); 1816 &ah->rxchainmask);
1775 debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR, 1817 debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
1776 sc->debug.debugfs_phy, sc, &fops_tx_chainmask); 1818 &ah->txchainmask);
1777 debugfs_create_file("ani", S_IRUSR | S_IWUSR, 1819 debugfs_create_file("ani", S_IRUSR | S_IWUSR,
1778 sc->debug.debugfs_phy, sc, &fops_ani); 1820 sc->debug.debugfs_phy, sc, &fops_ani);
1779 debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1821 debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -1814,9 +1856,11 @@ int ath9k_init_debug(struct ath_hw *ah)
1814 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1856 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1815 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, 1857 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
1816 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); 1858 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1817 debugfs_create_file("diversity", S_IRUSR | S_IWUSR, 1859 debugfs_create_file("antenna_diversity", S_IRUSR,
1818 sc->debug.debugfs_phy, sc, &fops_ant_diversity); 1860 sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
1819#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1861#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1862 debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
1863 sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
1820 debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc, 1864 debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
1821 &fops_btcoex); 1865 &fops_btcoex);
1822#endif 1866#endif
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index fc679198a0f3..6e1556fa2f3e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -28,9 +28,13 @@ struct fft_sample_tlv;
28#ifdef CONFIG_ATH9K_DEBUGFS 28#ifdef CONFIG_ATH9K_DEBUGFS
29#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ 29#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
30#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++ 30#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
31#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
32#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
31#else 33#else
32#define TX_STAT_INC(q, c) do { } while (0) 34#define TX_STAT_INC(q, c) do { } while (0)
33#define RESET_STAT_INC(sc, type) do { } while (0) 35#define RESET_STAT_INC(sc, type) do { } while (0)
36#define ANT_STAT_INC(i, c) do { } while (0)
37#define ANT_LNA_INC(i, c) do { } while (0)
34#endif 38#endif
35 39
36enum ath_reset_type { 40enum ath_reset_type {
@@ -243,11 +247,22 @@ struct ath_rx_stats {
243 u32 rx_spectral; 247 u32 rx_spectral;
244}; 248};
245 249
250#define ANT_MAIN 0
251#define ANT_ALT 1
252
253struct ath_antenna_stats {
254 u32 recv_cnt;
255 u32 rssi_avg;
256 u32 lna_recv_cnt[4];
257 u32 lna_attempt_cnt[4];
258};
259
246struct ath_stats { 260struct ath_stats {
247 struct ath_interrupt_stats istats; 261 struct ath_interrupt_stats istats;
248 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; 262 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
249 struct ath_rx_stats rxstats; 263 struct ath_rx_stats rxstats;
250 struct ath_dfs_stats dfs_stats; 264 struct ath_dfs_stats dfs_stats;
265 struct ath_antenna_stats ant_stats[2];
251 u32 reset[__RESET_TYPE_MAX]; 266 u32 reset[__RESET_TYPE_MAX];
252}; 267};
253 268
@@ -277,14 +292,11 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif, 292 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta, 293 struct ieee80211_sta *sta,
279 struct dentry *dir); 294 struct dentry *dir);
280void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
281 struct ieee80211_vif *vif,
282 struct ieee80211_sta *sta,
283 struct dentry *dir);
284
285void ath_debug_send_fft_sample(struct ath_softc *sc, 295void ath_debug_send_fft_sample(struct ath_softc *sc,
286 struct fft_sample_tlv *fft_sample); 296 struct fft_sample_tlv *fft_sample);
287 297void ath9k_debug_stat_ant(struct ath_softc *sc,
298 struct ath_hw_antcomb_conf *div_ant_conf,
299 int main_rssi_avg, int alt_rssi_avg);
288#else 300#else
289 301
290#define RX_STAT_INC(c) /* NOP */ 302#define RX_STAT_INC(c) /* NOP */
@@ -297,12 +309,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
297static inline void ath9k_deinit_debug(struct ath_softc *sc) 309static inline void ath9k_deinit_debug(struct ath_softc *sc)
298{ 310{
299} 311}
300
301static inline void ath_debug_stat_interrupt(struct ath_softc *sc, 312static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
302 enum ath9k_int status) 313 enum ath9k_int status)
303{ 314{
304} 315}
305
306static inline void ath_debug_stat_tx(struct ath_softc *sc, 316static inline void ath_debug_stat_tx(struct ath_softc *sc,
307 struct ath_buf *bf, 317 struct ath_buf *bf,
308 struct ath_tx_status *ts, 318 struct ath_tx_status *ts,
@@ -310,11 +320,16 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
310 unsigned int flags) 320 unsigned int flags)
311{ 321{
312} 322}
313
314static inline void ath_debug_stat_rx(struct ath_softc *sc, 323static inline void ath_debug_stat_rx(struct ath_softc *sc,
315 struct ath_rx_status *rs) 324 struct ath_rx_status *rs)
316{ 325{
317} 326}
327static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
328 struct ath_hw_antcomb_conf *div_ant_conf,
329 int main_rssi_avg, int alt_rssi_avg)
330{
331
332}
318 333
319#endif /* CONFIG_ATH9K_DEBUGFS */ 334#endif /* CONFIG_ATH9K_DEBUGFS */
320 335
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index c2bfd748eed8..9ea8e4b779c9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -812,6 +812,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
812static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, 812static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
813 struct ath9k_channel *chan) 813 struct ath9k_channel *chan)
814{ 814{
815 struct ath9k_hw_capabilities *pCap = &ah->caps;
815 struct modal_eep_4k_header *pModal; 816 struct modal_eep_4k_header *pModal;
816 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; 817 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
817 struct base_eep_header_4k *pBase = &eep->baseEepHeader; 818 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
@@ -858,6 +859,24 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
858 859
859 REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); 860 REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal);
860 regVal = REG_READ(ah, AR_PHY_CCK_DETECT); 861 regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
862
863 if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
864 /*
865 * If diversity combining is enabled,
866 * set MAIN to LNA1 and ALT to LNA2 initially.
867 */
868 regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
869 regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
870 AR_PHY_9285_ANT_DIV_ALT_LNACONF));
871
872 regVal |= (ATH_ANT_DIV_COMB_LNA1 <<
873 AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S);
874 regVal |= (ATH_ANT_DIV_COMB_LNA2 <<
875 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S);
876 regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS));
877 regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S);
878 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
879 }
861 } 880 }
862 881
863 if (pModal->version >= 2) { 882 if (pModal->version >= 2) {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5205a3625e84..6d5d716adc1b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -115,10 +115,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
115 cmd->skb = skb; 115 cmd->skb = skb;
116 cmd->hif_dev = hif_dev; 116 cmd->hif_dev = hif_dev;
117 117
118 usb_fill_bulk_urb(urb, hif_dev->udev, 118 usb_fill_int_urb(urb, hif_dev->udev,
119 usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE), 119 usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
120 skb->data, skb->len, 120 skb->data, skb->len,
121 hif_usb_regout_cb, cmd); 121 hif_usb_regout_cb, cmd, 1);
122 122
123 usb_anchor_urb(urb, &hif_dev->regout_submitted); 123 usb_anchor_urb(urb, &hif_dev->regout_submitted);
124 ret = usb_submit_urb(urb, GFP_KERNEL); 124 ret = usb_submit_urb(urb, GFP_KERNEL);
@@ -723,11 +723,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
723 return; 723 return;
724 } 724 }
725 725
726 usb_fill_bulk_urb(urb, hif_dev->udev, 726 usb_fill_int_urb(urb, hif_dev->udev,
727 usb_rcvbulkpipe(hif_dev->udev, 727 usb_rcvintpipe(hif_dev->udev,
728 USB_REG_IN_PIPE), 728 USB_REG_IN_PIPE),
729 nskb->data, MAX_REG_IN_BUF_SIZE, 729 nskb->data, MAX_REG_IN_BUF_SIZE,
730 ath9k_hif_usb_reg_in_cb, nskb); 730 ath9k_hif_usb_reg_in_cb, nskb, 1);
731 } 731 }
732 732
733resubmit: 733resubmit:
@@ -909,11 +909,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
909 goto err_skb; 909 goto err_skb;
910 } 910 }
911 911
912 usb_fill_bulk_urb(urb, hif_dev->udev, 912 usb_fill_int_urb(urb, hif_dev->udev,
913 usb_rcvbulkpipe(hif_dev->udev, 913 usb_rcvintpipe(hif_dev->udev,
914 USB_REG_IN_PIPE), 914 USB_REG_IN_PIPE),
915 skb->data, MAX_REG_IN_BUF_SIZE, 915 skb->data, MAX_REG_IN_BUF_SIZE,
916 ath9k_hif_usb_reg_in_cb, skb); 916 ath9k_hif_usb_reg_in_cb, skb, 1);
917 917
918 /* Anchor URB */ 918 /* Anchor URB */
919 usb_anchor_urb(urb, &hif_dev->reg_in_submitted); 919 usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -1031,9 +1031,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
1031 1031
1032static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) 1032static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
1033{ 1033{
1034 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0]; 1034 int ret;
1035 struct usb_endpoint_descriptor *endp;
1036 int ret, idx;
1037 1035
1038 ret = ath9k_hif_usb_download_fw(hif_dev); 1036 ret = ath9k_hif_usb_download_fw(hif_dev);
1039 if (ret) { 1037 if (ret) {
@@ -1043,20 +1041,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
1043 return ret; 1041 return ret;
1044 } 1042 }
1045 1043
1046 /* On downloading the firmware to the target, the USB descriptor of EP4
1047 * is 'patched' to change the type of the endpoint to Bulk. This will
1048 * bring down CPU usage during the scan period.
1049 */
1050 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
1051 endp = &alt->endpoint[idx].desc;
1052 if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1053 == USB_ENDPOINT_XFER_INT) {
1054 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
1055 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
1056 endp->bInterval = 0;
1057 }
1058 }
1059
1060 /* Alloc URBs */ 1044 /* Alloc URBs */
1061 ret = ath9k_hif_usb_alloc_urbs(hif_dev); 1045 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
1062 if (ret) { 1046 if (ret) {
@@ -1268,7 +1252,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
1268 if (!buf) 1252 if (!buf)
1269 return; 1253 return;
1270 1254
1271 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE), 1255 ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
1272 buf, 4, NULL, HZ); 1256 buf, 4, NULL, HZ);
1273 if (ret) 1257 if (ret)
1274 dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n"); 1258 dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 5c1bec18c9e3..d44258172c0f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1203,16 +1203,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1203 1203
1204 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) { 1204 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
1205 struct ieee80211_channel *curchan = hw->conf.chandef.chan; 1205 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
1206 enum nl80211_channel_type channel_type =
1207 cfg80211_get_chandef_type(&hw->conf.chandef);
1208 int pos = curchan->hw_value; 1206 int pos = curchan->hw_value;
1209 1207
1210 ath_dbg(common, CONFIG, "Set channel: %d MHz\n", 1208 ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
1211 curchan->center_freq); 1209 curchan->center_freq);
1212 1210
1213 ath9k_cmn_update_ichannel(&priv->ah->channels[pos], 1211 ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
1214 hw->conf.chandef.chan, 1212 &hw->conf.chandef);
1215 channel_type);
1216 1213
1217 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1214 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1218 ath_err(common, "Unable to set channel\n"); 1215 ath_err(common, "Unable to set channel\n");
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 14b701140b49..83f4927aeaca 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,13 +78,16 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf); 78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
79} 79}
80 80
81static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah, 81#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
82 bool enable) 82
83static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
83{ 84{
84 if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv) 85 if (ath9k_hw_ops(ah)->set_bt_ant_diversity)
85 ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable); 86 ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
86} 87}
87 88
89#endif
90
88/* Private hardware call ops */ 91/* Private hardware call ops */
89 92
90/* PHY ops */ 93/* PHY ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4ca0cb060106..ecc6ec4a1edb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -450,7 +450,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
450 ah->config.ack_6mb = 0x0; 450 ah->config.ack_6mb = 0x0;
451 ah->config.cwm_ignore_extcca = 0; 451 ah->config.cwm_ignore_extcca = 0;
452 ah->config.pcie_clock_req = 0; 452 ah->config.pcie_clock_req = 0;
453 ah->config.pcie_waen = 0;
454 ah->config.analog_shiftreg = 1; 453 ah->config.analog_shiftreg = 1;
455 454
456 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 455 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
@@ -575,18 +574,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
575 * We need to do this to avoid RMW of this register. We cannot 574 * We need to do this to avoid RMW of this register. We cannot
576 * read the reg when chip is asleep. 575 * read the reg when chip is asleep.
577 */ 576 */
578 ah->WARegVal = REG_READ(ah, AR_WA); 577 if (AR_SREV_9300_20_OR_LATER(ah)) {
579 ah->WARegVal |= (AR_WA_D3_L1_DISABLE | 578 ah->WARegVal = REG_READ(ah, AR_WA);
580 AR_WA_ASPM_TIMER_BASED_DISABLE); 579 ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
580 AR_WA_ASPM_TIMER_BASED_DISABLE);
581 }
581 582
582 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 583 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
583 ath_err(common, "Couldn't reset chip\n"); 584 ath_err(common, "Couldn't reset chip\n");
584 return -EIO; 585 return -EIO;
585 } 586 }
586 587
587 if (AR_SREV_9462(ah))
588 ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
589
590 if (AR_SREV_9565(ah)) { 588 if (AR_SREV_9565(ah)) {
591 ah->WARegVal |= AR_WA_BIT22; 589 ah->WARegVal |= AR_WA_BIT22;
592 REG_WRITE(ah, AR_WA, ah->WARegVal); 590 REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -656,8 +654,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
656 ath9k_hw_init_cal_settings(ah); 654 ath9k_hw_init_cal_settings(ah);
657 655
658 ah->ani_function = ATH9K_ANI_ALL; 656 ah->ani_function = ATH9K_ANI_ALL;
659 if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
660 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
661 if (!AR_SREV_9300_20_OR_LATER(ah)) 657 if (!AR_SREV_9300_20_OR_LATER(ah))
662 ah->ani_function &= ~ATH9K_ANI_MRC_CCK; 658 ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
663 659
@@ -1069,7 +1065,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1069 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1065 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1070 tx_lat += 11; 1066 tx_lat += 11;
1071 1067
1072 sifstime *= 2; 1068 sifstime = 32;
1073 ack_offset = 16; 1069 ack_offset = 16;
1074 slottime = 13; 1070 slottime = 13;
1075 } else if (IS_CHAN_QUARTER_RATE(chan)) { 1071 } else if (IS_CHAN_QUARTER_RATE(chan)) {
@@ -1079,7 +1075,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1079 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1075 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1080 tx_lat += 22; 1076 tx_lat += 22;
1081 1077
1082 sifstime *= 4; 1078 sifstime = 64;
1083 ack_offset = 32; 1079 ack_offset = 32;
1084 slottime = 21; 1080 slottime = 21;
1085 } else { 1081 } else {
@@ -1116,7 +1112,6 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1116 ctstimeout += 48 - sifstime - ah->slottime; 1112 ctstimeout += 48 - sifstime - ah->slottime;
1117 } 1113 }
1118 1114
1119
1120 ath9k_hw_set_sifs_time(ah, sifstime); 1115 ath9k_hw_set_sifs_time(ah, sifstime);
1121 ath9k_hw_setslottime(ah, slottime); 1116 ath9k_hw_setslottime(ah, slottime);
1122 ath9k_hw_set_ack_timeout(ah, acktimeout); 1117 ath9k_hw_set_ack_timeout(ah, acktimeout);
@@ -1496,16 +1491,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1496 struct ath9k_channel *chan) 1491 struct ath9k_channel *chan)
1497{ 1492{
1498 struct ath_common *common = ath9k_hw_common(ah); 1493 struct ath_common *common = ath9k_hw_common(ah);
1494 struct ath9k_hw_capabilities *pCap = &ah->caps;
1495 bool band_switch = false, mode_diff = false;
1496 u8 ini_reloaded = 0;
1499 u32 qnum; 1497 u32 qnum;
1500 int r; 1498 int r;
1501 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1502 bool band_switch, mode_diff;
1503 u8 ini_reloaded;
1504 1499
1505 band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) != 1500 if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
1506 (ah->curchan->channelFlags & (CHANNEL_2GHZ | 1501 u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
1507 CHANNEL_5GHZ)); 1502 u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
1508 mode_diff = (chan->chanmode != ah->curchan->chanmode); 1503 band_switch = (cur != new);
1504 mode_diff = (chan->chanmode != ah->curchan->chanmode);
1505 }
1509 1506
1510 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1507 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
1511 if (ath9k_hw_numtxpending(ah, qnum)) { 1508 if (ath9k_hw_numtxpending(ah, qnum)) {
@@ -1520,11 +1517,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1520 return false; 1517 return false;
1521 } 1518 }
1522 1519
1523 if (edma && (band_switch || mode_diff)) { 1520 if (band_switch || mode_diff) {
1524 ath9k_hw_mark_phy_inactive(ah); 1521 ath9k_hw_mark_phy_inactive(ah);
1525 udelay(5); 1522 udelay(5);
1526 1523
1527 ath9k_hw_init_pll(ah, NULL); 1524 if (band_switch)
1525 ath9k_hw_init_pll(ah, chan);
1528 1526
1529 if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) { 1527 if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
1530 ath_err(common, "Failed to do fast channel change\n"); 1528 ath_err(common, "Failed to do fast channel change\n");
@@ -1541,22 +1539,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1541 } 1539 }
1542 ath9k_hw_set_clockrate(ah); 1540 ath9k_hw_set_clockrate(ah);
1543 ath9k_hw_apply_txpower(ah, chan, false); 1541 ath9k_hw_apply_txpower(ah, chan, false);
1544 ath9k_hw_rfbus_done(ah);
1545 1542
1546 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1543 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1547 ath9k_hw_set_delta_slope(ah, chan); 1544 ath9k_hw_set_delta_slope(ah, chan);
1548 1545
1549 ath9k_hw_spur_mitigate_freq(ah, chan); 1546 ath9k_hw_spur_mitigate_freq(ah, chan);
1550 1547
1551 if (edma && (band_switch || mode_diff)) { 1548 if (band_switch || ini_reloaded)
1552 ah->ah_flags |= AH_FASTCC; 1549 ah->eep_ops->set_board_values(ah, chan);
1553 if (band_switch || ini_reloaded)
1554 ah->eep_ops->set_board_values(ah, chan);
1555 1550
1556 ath9k_hw_init_bb(ah, chan); 1551 ath9k_hw_init_bb(ah, chan);
1552 ath9k_hw_rfbus_done(ah);
1557 1553
1558 if (band_switch || ini_reloaded) 1554 if (band_switch || ini_reloaded) {
1559 ath9k_hw_init_cal(ah, chan); 1555 ah->ah_flags |= AH_FASTCC;
1556 ath9k_hw_init_cal(ah, chan);
1560 ah->ah_flags &= ~AH_FASTCC; 1557 ah->ah_flags &= ~AH_FASTCC;
1561 } 1558 }
1562 1559
@@ -1778,16 +1775,11 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
1778/* 1775/*
1779 * Fast channel change: 1776 * Fast channel change:
1780 * (Change synthesizer based on channel freq without resetting chip) 1777 * (Change synthesizer based on channel freq without resetting chip)
1781 *
1782 * Don't do FCC when
1783 * - Flag is not set
1784 * - Chip is just coming out of full sleep
1785 * - Channel to be set is same as current channel
1786 * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
1787 */ 1778 */
1788static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) 1779static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1789{ 1780{
1790 struct ath_common *common = ath9k_hw_common(ah); 1781 struct ath_common *common = ath9k_hw_common(ah);
1782 struct ath9k_hw_capabilities *pCap = &ah->caps;
1791 int ret; 1783 int ret;
1792 1784
1793 if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI) 1785 if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
@@ -1806,9 +1798,21 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1806 (CHANNEL_HALF | CHANNEL_QUARTER)) 1798 (CHANNEL_HALF | CHANNEL_QUARTER))
1807 goto fail; 1799 goto fail;
1808 1800
1809 if ((chan->channelFlags & CHANNEL_ALL) != 1801 /*
1810 (ah->curchan->channelFlags & CHANNEL_ALL)) 1802 * If cross-band fcc is not supoprted, bail out if
1811 goto fail; 1803 * either channelFlags or chanmode differ.
1804 *
1805 * chanmode will be different if the HT operating mode
1806 * changes because of CSA.
1807 */
1808 if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
1809 if ((chan->channelFlags & CHANNEL_ALL) !=
1810 (ah->curchan->channelFlags & CHANNEL_ALL))
1811 goto fail;
1812
1813 if (chan->chanmode != ah->curchan->chanmode)
1814 goto fail;
1815 }
1812 1816
1813 if (!ath9k_hw_check_alive(ah)) 1817 if (!ath9k_hw_check_alive(ah))
1814 goto fail; 1818 goto fail;
@@ -2047,7 +2051,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2047 2051
2048 ath9k_hw_apply_gpio_override(ah); 2052 ath9k_hw_apply_gpio_override(ah);
2049 2053
2050 if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv) 2054 if (AR_SREV_9565(ah) && common->bt_ant_diversity)
2051 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON); 2055 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
2052 2056
2053 return 0; 2057 return 0;
@@ -2504,7 +2508,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2504 else 2508 else
2505 pCap->rts_aggr_limit = (8 * 1024); 2509 pCap->rts_aggr_limit = (8 * 1024);
2506 2510
2507#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 2511#ifdef CONFIG_ATH9K_RFKILL
2508 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); 2512 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
2509 if (ah->rfsilent & EEP_RFSILENT_ENABLED) { 2513 if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
2510 ah->rfkill_gpio = 2514 ah->rfkill_gpio =
@@ -2550,34 +2554,28 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2550 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) 2554 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
2551 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; 2555 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
2552 2556
2553 if (AR_SREV_9285(ah)) 2557 if (AR_SREV_9285(ah)) {
2554 if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) { 2558 if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
2555 ant_div_ctl1 = 2559 ant_div_ctl1 =
2556 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 2560 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
2557 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) 2561 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) {
2558 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; 2562 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
2563 ath_info(common, "Enable LNA combining\n");
2564 }
2559 } 2565 }
2566 }
2567
2560 if (AR_SREV_9300_20_OR_LATER(ah)) { 2568 if (AR_SREV_9300_20_OR_LATER(ah)) {
2561 if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE)) 2569 if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
2562 pCap->hw_caps |= ATH9K_HW_CAP_APM; 2570 pCap->hw_caps |= ATH9K_HW_CAP_APM;
2563 } 2571 }
2564 2572
2565
2566 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) { 2573 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
2567 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 2574 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
2568 /* 2575 if ((ant_div_ctl1 >> 0x6) == 0x3) {
2569 * enable the diversity-combining algorithm only when
2570 * both enable_lna_div and enable_fast_div are set
2571 * Table for Diversity
2572 * ant_div_alt_lnaconf bit 0-1
2573 * ant_div_main_lnaconf bit 2-3
2574 * ant_div_alt_gaintb bit 4
2575 * ant_div_main_gaintb bit 5
2576 * enable_ant_div_lnadiv bit 6
2577 * enable_ant_fast_div bit 7
2578 */
2579 if ((ant_div_ctl1 >> 0x6) == 0x3)
2580 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; 2576 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
2577 ath_info(common, "Enable LNA combining\n");
2578 }
2581 } 2579 }
2582 2580
2583 if (ath9k_hw_dfs_tested(ah)) 2581 if (ath9k_hw_dfs_tested(ah))
@@ -2610,6 +2608,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2610 ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 2608 ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
2611 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 2609 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
2612 2610
2611 /*
2612 * Fast channel change across bands is available
2613 * only for AR9462 and AR9565.
2614 */
2615 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
2616 pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
2617
2613 return 0; 2618 return 0;
2614} 2619}
2615 2620
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index cd74b3afef7d..69a907b55a73 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -247,6 +247,8 @@ enum ath9k_hw_caps {
247 ATH9K_HW_CAP_DFS = BIT(16), 247 ATH9K_HW_CAP_DFS = BIT(16),
248 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), 248 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
249 ATH9K_HW_CAP_PAPRD = BIT(18), 249 ATH9K_HW_CAP_PAPRD = BIT(18),
250 ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19),
251 ATH9K_HW_CAP_BT_ANT_DIV = BIT(20),
250}; 252};
251 253
252/* 254/*
@@ -309,8 +311,11 @@ struct ath9k_ops_config {
309 u16 ani_poll_interval; /* ANI poll interval in ms */ 311 u16 ani_poll_interval; /* ANI poll interval in ms */
310 312
311 /* Platform specific config */ 313 /* Platform specific config */
314 u32 aspm_l1_fix;
312 u32 xlna_gpio; 315 u32 xlna_gpio;
316 u32 ant_ctrl_comm2g_switch_enable;
313 bool xatten_margin_cfg; 317 bool xatten_margin_cfg;
318 bool alt_mingainidx;
314}; 319};
315 320
316enum ath9k_int { 321enum ath9k_int {
@@ -716,11 +721,14 @@ struct ath_hw_ops {
716 struct ath_hw_antcomb_conf *antconf); 721 struct ath_hw_antcomb_conf *antconf);
717 void (*antdiv_comb_conf_set)(struct ath_hw *ah, 722 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
718 struct ath_hw_antcomb_conf *antconf); 723 struct ath_hw_antcomb_conf *antconf);
719 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
720 void (*spectral_scan_config)(struct ath_hw *ah, 724 void (*spectral_scan_config)(struct ath_hw *ah,
721 struct ath_spec_scan *param); 725 struct ath_spec_scan *param);
722 void (*spectral_scan_trigger)(struct ath_hw *ah); 726 void (*spectral_scan_trigger)(struct ath_hw *ah);
723 void (*spectral_scan_wait)(struct ath_hw *ah); 727 void (*spectral_scan_wait)(struct ath_hw *ah);
728
729#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
730 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
731#endif
724}; 732};
725 733
726struct ath_nf_limits { 734struct ath_nf_limits {
@@ -765,7 +773,6 @@ struct ath_hw {
765 bool aspm_enabled; 773 bool aspm_enabled;
766 bool is_monitoring; 774 bool is_monitoring;
767 bool need_an_top2_fixup; 775 bool need_an_top2_fixup;
768 bool shared_chain_lnadiv;
769 u16 tx_trig_level; 776 u16 tx_trig_level;
770 777
771 u32 nf_regs[6]; 778 u32 nf_regs[6];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 026a2a067b46..9a1f349f9260 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -53,9 +53,9 @@ static int ath9k_btcoex_enable;
53module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 53module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
54MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 54MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
55 55
56static int ath9k_enable_diversity; 56static int ath9k_bt_ant_diversity;
57module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444); 57module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
58MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565"); 58MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
59 59
60bool is_ath9k_unloaded; 60bool is_ath9k_unloaded;
61/* We use the hw_value as an index into our private channel structure */ 61/* We use the hw_value as an index into our private channel structure */
@@ -146,14 +146,22 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
146 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), 146 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
147 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), 147 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
148 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), 148 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
149 RATE(60, 0x0b, 0), 149 RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
150 RATE(90, 0x0f, 0), 150 IEEE80211_RATE_SUPPORTS_10MHZ)),
151 RATE(120, 0x0a, 0), 151 RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
152 RATE(180, 0x0e, 0), 152 IEEE80211_RATE_SUPPORTS_10MHZ)),
153 RATE(240, 0x09, 0), 153 RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
154 RATE(360, 0x0d, 0), 154 IEEE80211_RATE_SUPPORTS_10MHZ)),
155 RATE(480, 0x08, 0), 155 RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
156 RATE(540, 0x0c, 0), 156 IEEE80211_RATE_SUPPORTS_10MHZ)),
157 RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
158 IEEE80211_RATE_SUPPORTS_10MHZ)),
159 RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
160 IEEE80211_RATE_SUPPORTS_10MHZ)),
161 RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
162 IEEE80211_RATE_SUPPORTS_10MHZ)),
163 RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
164 IEEE80211_RATE_SUPPORTS_10MHZ)),
157}; 165};
158 166
159#ifdef CONFIG_MAC80211_LEDS 167#ifdef CONFIG_MAC80211_LEDS
@@ -516,6 +524,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
516static void ath9k_init_platform(struct ath_softc *sc) 524static void ath9k_init_platform(struct ath_softc *sc)
517{ 525{
518 struct ath_hw *ah = sc->sc_ah; 526 struct ath_hw *ah = sc->sc_ah;
527 struct ath9k_hw_capabilities *pCap = &ah->caps;
519 struct ath_common *common = ath9k_hw_common(ah); 528 struct ath_common *common = ath9k_hw_common(ah);
520 529
521 if (common->bus_ops->ath_bus_type != ATH_PCI) 530 if (common->bus_ops->ath_bus_type != ATH_PCI)
@@ -525,12 +534,27 @@ static void ath9k_init_platform(struct ath_softc *sc)
525 ATH9K_PCI_CUS230)) { 534 ATH9K_PCI_CUS230)) {
526 ah->config.xlna_gpio = 9; 535 ah->config.xlna_gpio = 9;
527 ah->config.xatten_margin_cfg = true; 536 ah->config.xatten_margin_cfg = true;
537 ah->config.alt_mingainidx = true;
538 ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
539 sc->ant_comb.low_rssi_thresh = 20;
540 sc->ant_comb.fast_div_bias = 3;
528 541
529 ath_info(common, "Set parameters for %s\n", 542 ath_info(common, "Set parameters for %s\n",
530 (sc->driver_data & ATH9K_PCI_CUS198) ? 543 (sc->driver_data & ATH9K_PCI_CUS198) ?
531 "CUS198" : "CUS230"); 544 "CUS198" : "CUS230");
532 } else if (sc->driver_data & ATH9K_PCI_CUS217) { 545 }
546
547 if (sc->driver_data & ATH9K_PCI_CUS217)
533 ath_info(common, "CUS217 card detected\n"); 548 ath_info(common, "CUS217 card detected\n");
549
550 if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
551 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
552 ath_info(common, "Set BT/WLAN RX diversity capability\n");
553 }
554
555 if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) {
556 ah->config.pcie_waen = 0x0040473b;
557 ath_info(common, "Enable WAR for ASPM D3/L1\n");
534 } 558 }
535} 559}
536 560
@@ -584,6 +608,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
584{ 608{
585 struct ath9k_platform_data *pdata = sc->dev->platform_data; 609 struct ath9k_platform_data *pdata = sc->dev->platform_data;
586 struct ath_hw *ah = NULL; 610 struct ath_hw *ah = NULL;
611 struct ath9k_hw_capabilities *pCap;
587 struct ath_common *common; 612 struct ath_common *common;
588 int ret = 0, i; 613 int ret = 0, i;
589 int csz = 0; 614 int csz = 0;
@@ -600,6 +625,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
600 ah->reg_ops.rmw = ath9k_reg_rmw; 625 ah->reg_ops.rmw = ath9k_reg_rmw;
601 atomic_set(&ah->intr_ref_cnt, -1); 626 atomic_set(&ah->intr_ref_cnt, -1);
602 sc->sc_ah = ah; 627 sc->sc_ah = ah;
628 pCap = &ah->caps;
603 629
604 sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET); 630 sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
605 631
@@ -631,11 +657,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
631 ath9k_init_platform(sc); 657 ath9k_init_platform(sc);
632 658
633 /* 659 /*
634 * Enable Antenna diversity only when BTCOEX is disabled 660 * Enable WLAN/BT RX Antenna diversity only when:
635 * and the user manually requests the feature. 661 *
662 * - BTCOEX is disabled.
663 * - the user manually requests the feature.
664 * - the HW cap is set using the platform data.
636 */ 665 */
637 if (!common->btcoex_enabled && ath9k_enable_diversity) 666 if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
638 common->antenna_diversity = 1; 667 (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
668 common->bt_ant_diversity = 1;
639 669
640 spin_lock_init(&common->cc_lock); 670 spin_lock_init(&common->cc_lock);
641 671
@@ -710,13 +740,15 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
710 struct ieee80211_supported_band *sband; 740 struct ieee80211_supported_band *sband;
711 struct ieee80211_channel *chan; 741 struct ieee80211_channel *chan;
712 struct ath_hw *ah = sc->sc_ah; 742 struct ath_hw *ah = sc->sc_ah;
743 struct cfg80211_chan_def chandef;
713 int i; 744 int i;
714 745
715 sband = &sc->sbands[band]; 746 sband = &sc->sbands[band];
716 for (i = 0; i < sband->n_channels; i++) { 747 for (i = 0; i < sband->n_channels; i++) {
717 chan = &sband->channels[i]; 748 chan = &sband->channels[i];
718 ah->curchan = &ah->channels[chan->hw_value]; 749 ah->curchan = &ah->channels[chan->hw_value];
719 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20); 750 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
751 ath9k_cmn_update_ichannel(ah->curchan, &chandef);
720 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); 752 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
721 } 753 }
722} 754}
@@ -835,6 +867,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
835 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 867 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
836 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 868 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
837 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 869 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
870 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
871 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
838 872
839#ifdef CONFIG_PM_SLEEP 873#ifdef CONFIG_PM_SLEEP
840 if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && 874 if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index fff5d3ccc663..2f831db396ac 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -41,7 +41,7 @@ void ath_tx_complete_poll_work(struct work_struct *work)
41 txq->axq_tx_inprogress = true; 41 txq->axq_tx_inprogress = true;
42 } 42 }
43 } 43 }
44 ath_txq_unlock_complete(sc, txq); 44 ath_txq_unlock(sc, txq);
45 } 45 }
46 46
47 if (needreset) { 47 if (needreset) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 2ef05ebffbcf..a3eff0986a3f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -583,9 +583,9 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
583 rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate); 583 rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
584 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 584 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
585 585
586 rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
586 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 587 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
587 rs->rs_moreaggr = 588 rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
588 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
589 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 589 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
590 590
591 /* directly mapped flags for ieee80211_rx_status */ 591 /* directly mapped flags for ieee80211_rx_status */
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b02dfce964b4..bfccaceed44e 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -140,6 +140,7 @@ struct ath_rx_status {
140 int8_t rs_rssi_ext1; 140 int8_t rs_rssi_ext1;
141 int8_t rs_rssi_ext2; 141 int8_t rs_rssi_ext2;
142 u8 rs_isaggr; 142 u8 rs_isaggr;
143 u8 rs_firstaggr;
143 u8 rs_moreaggr; 144 u8 rs_moreaggr;
144 u8 rs_num_delims; 145 u8 rs_num_delims;
145 u8 rs_flags; 146 u8 rs_flags;
@@ -569,6 +570,7 @@ struct ar5416_desc {
569#define AR_RxAggr 0x00020000 570#define AR_RxAggr 0x00020000
570#define AR_PostDelimCRCErr 0x00040000 571#define AR_PostDelimCRCErr 0x00040000
571#define AR_RxStatusRsvd71 0x3ff80000 572#define AR_RxStatusRsvd71 0x3ff80000
573#define AR_RxFirstAggr 0x20000000
572#define AR_DecryptBusyErr 0x40000000 574#define AR_DecryptBusyErr 0x40000000
573#define AR_KeyMiss 0x80000000 575#define AR_KeyMiss 0x80000000
574 576
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index cb5a65553ac7..e4f65900132d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -237,9 +237,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
237 ath_restart_work(sc); 237 ath_restart_work(sc);
238 } 238 }
239 239
240 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
241 ath_ant_comb_update(sc);
242
243 ieee80211_wake_queues(sc->hw); 240 ieee80211_wake_queues(sc->hw);
244 241
245 return true; 242 return true;
@@ -965,6 +962,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
965 struct ath_softc *sc = hw->priv; 962 struct ath_softc *sc = hw->priv;
966 struct ath_hw *ah = sc->sc_ah; 963 struct ath_hw *ah = sc->sc_ah;
967 struct ath_common *common = ath9k_hw_common(ah); 964 struct ath_common *common = ath9k_hw_common(ah);
965 struct ath_vif *avp = (void *)vif->drv_priv;
966 struct ath_node *an = &avp->mcast_node;
968 967
969 mutex_lock(&sc->mutex); 968 mutex_lock(&sc->mutex);
970 969
@@ -978,6 +977,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
978 if (ath9k_uses_beacons(vif->type)) 977 if (ath9k_uses_beacons(vif->type))
979 ath9k_beacon_assign_slot(sc, vif); 978 ath9k_beacon_assign_slot(sc, vif);
980 979
980 an->sc = sc;
981 an->sta = NULL;
982 an->vif = vif;
983 an->no_ps_filter = true;
984 ath_tx_node_init(sc, an);
985
981 mutex_unlock(&sc->mutex); 986 mutex_unlock(&sc->mutex);
982 return 0; 987 return 0;
983} 988}
@@ -1015,6 +1020,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1015{ 1020{
1016 struct ath_softc *sc = hw->priv; 1021 struct ath_softc *sc = hw->priv;
1017 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1022 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1023 struct ath_vif *avp = (void *)vif->drv_priv;
1018 1024
1019 ath_dbg(common, CONFIG, "Detach Interface\n"); 1025 ath_dbg(common, CONFIG, "Detach Interface\n");
1020 1026
@@ -1025,10 +1031,15 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1025 if (ath9k_uses_beacons(vif->type)) 1031 if (ath9k_uses_beacons(vif->type))
1026 ath9k_beacon_remove_slot(sc, vif); 1032 ath9k_beacon_remove_slot(sc, vif);
1027 1033
1034 if (sc->csa_vif == vif)
1035 sc->csa_vif = NULL;
1036
1028 ath9k_ps_wakeup(sc); 1037 ath9k_ps_wakeup(sc);
1029 ath9k_calculate_summary_state(hw, NULL); 1038 ath9k_calculate_summary_state(hw, NULL);
1030 ath9k_ps_restore(sc); 1039 ath9k_ps_restore(sc);
1031 1040
1041 ath_tx_node_cleanup(sc, &avp->mcast_node);
1042
1032 mutex_unlock(&sc->mutex); 1043 mutex_unlock(&sc->mutex);
1033} 1044}
1034 1045
@@ -1192,8 +1203,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1192 1203
1193 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { 1204 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
1194 struct ieee80211_channel *curchan = hw->conf.chandef.chan; 1205 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
1195 enum nl80211_channel_type channel_type =
1196 cfg80211_get_chandef_type(&conf->chandef);
1197 int pos = curchan->hw_value; 1206 int pos = curchan->hw_value;
1198 int old_pos = -1; 1207 int old_pos = -1;
1199 unsigned long flags; 1208 unsigned long flags;
@@ -1201,8 +1210,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1201 if (ah->curchan) 1210 if (ah->curchan)
1202 old_pos = ah->curchan - &ah->channels[0]; 1211 old_pos = ah->curchan - &ah->channels[0];
1203 1212
1204 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", 1213 ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
1205 curchan->center_freq, channel_type); 1214 curchan->center_freq, hw->conf.chandef.width);
1206 1215
1207 /* update survey stats for the old channel before switching */ 1216 /* update survey stats for the old channel before switching */
1208 spin_lock_irqsave(&common->cc_lock, flags); 1217 spin_lock_irqsave(&common->cc_lock, flags);
@@ -1210,7 +1219,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1210 spin_unlock_irqrestore(&common->cc_lock, flags); 1219 spin_unlock_irqrestore(&common->cc_lock, flags);
1211 1220
1212 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], 1221 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1213 curchan, channel_type); 1222 &conf->chandef);
1214 1223
1215 /* 1224 /*
1216 * If the operating channel changes, change the survey in-use flags 1225 * If the operating channel changes, change the survey in-use flags
@@ -1373,9 +1382,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
1373 struct ath_softc *sc = hw->priv; 1382 struct ath_softc *sc = hw->priv;
1374 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1383 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1375 1384
1376 if (!sta->ht_cap.ht_supported)
1377 return;
1378
1379 switch (cmd) { 1385 switch (cmd) {
1380 case STA_NOTIFY_SLEEP: 1386 case STA_NOTIFY_SLEEP:
1381 an->sleeping = true; 1387 an->sleeping = true;
@@ -2093,7 +2099,7 @@ static void ath9k_wow_add_pattern(struct ath_softc *sc,
2093{ 2099{
2094 struct ath_hw *ah = sc->sc_ah; 2100 struct ath_hw *ah = sc->sc_ah;
2095 struct ath9k_wow_pattern *wow_pattern = NULL; 2101 struct ath9k_wow_pattern *wow_pattern = NULL;
2096 struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns; 2102 struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
2097 int mask_len; 2103 int mask_len;
2098 s8 i = 0; 2104 s8 i = 0;
2099 2105
@@ -2314,6 +2320,19 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2314 clear_bit(SC_OP_SCANNING, &sc->sc_flags); 2320 clear_bit(SC_OP_SCANNING, &sc->sc_flags);
2315} 2321}
2316 2322
2323static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
2324 struct ieee80211_vif *vif,
2325 struct cfg80211_chan_def *chandef)
2326{
2327 struct ath_softc *sc = hw->priv;
2328
2329 /* mac80211 does not support CSA in multi-if cases (yet) */
2330 if (WARN_ON(sc->csa_vif))
2331 return;
2332
2333 sc->csa_vif = vif;
2334}
2335
2317struct ieee80211_ops ath9k_ops = { 2336struct ieee80211_ops ath9k_ops = {
2318 .tx = ath9k_tx, 2337 .tx = ath9k_tx,
2319 .start = ath9k_start, 2338 .start = ath9k_start,
@@ -2358,8 +2377,8 @@ struct ieee80211_ops ath9k_ops = {
2358 2377
2359#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS) 2378#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
2360 .sta_add_debugfs = ath9k_sta_add_debugfs, 2379 .sta_add_debugfs = ath9k_sta_add_debugfs,
2361 .sta_remove_debugfs = ath9k_sta_remove_debugfs,
2362#endif 2380#endif
2363 .sw_scan_start = ath9k_sw_scan_start, 2381 .sw_scan_start = ath9k_sw_scan_start,
2364 .sw_scan_complete = ath9k_sw_scan_complete, 2382 .sw_scan_complete = ath9k_sw_scan_complete,
2383 .channel_switch_beacon = ath9k_channel_switch_beacon,
2365}; 2384};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index c585c9b35973..d089a7cf01c4 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -29,6 +29,60 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
29 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ 29 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
30 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ 30 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
31 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ 31 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
32
33 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
34 0x002A,
35 PCI_VENDOR_ID_AZWAVE,
36 0x1C71),
37 .driver_data = ATH9K_PCI_D3_L1_WAR },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
39 0x002A,
40 PCI_VENDOR_ID_FOXCONN,
41 0xE01F),
42 .driver_data = ATH9K_PCI_D3_L1_WAR },
43 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
44 0x002A,
45 0x11AD, /* LITEON */
46 0x6632),
47 .driver_data = ATH9K_PCI_D3_L1_WAR },
48 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
49 0x002A,
50 0x11AD, /* LITEON */
51 0x6642),
52 .driver_data = ATH9K_PCI_D3_L1_WAR },
53 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
54 0x002A,
55 PCI_VENDOR_ID_QMI,
56 0x0306),
57 .driver_data = ATH9K_PCI_D3_L1_WAR },
58 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
59 0x002A,
60 0x185F, /* WNC */
61 0x309D),
62 .driver_data = ATH9K_PCI_D3_L1_WAR },
63 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
64 0x002A,
65 0x10CF, /* Fujitsu */
66 0x147C),
67 .driver_data = ATH9K_PCI_D3_L1_WAR },
68 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
69 0x002A,
70 0x10CF, /* Fujitsu */
71 0x147D),
72 .driver_data = ATH9K_PCI_D3_L1_WAR },
73 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
74 0x002A,
75 0x10CF, /* Fujitsu */
76 0x1536),
77 .driver_data = ATH9K_PCI_D3_L1_WAR },
78
79 /* AR9285 card for Asus */
80 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
81 0x002B,
82 PCI_VENDOR_ID_AZWAVE,
83 0x2C37),
84 .driver_data = ATH9K_PCI_BT_ANT_DIV },
85
32 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */ 86 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
33 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 87 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
34 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 88 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
@@ -40,29 +94,106 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
40 0x0032, 94 0x0032,
41 PCI_VENDOR_ID_AZWAVE, 95 PCI_VENDOR_ID_AZWAVE,
42 0x2086), 96 0x2086),
43 .driver_data = ATH9K_PCI_CUS198 }, 97 .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
44 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 98 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
45 0x0032, 99 0x0032,
46 PCI_VENDOR_ID_AZWAVE, 100 PCI_VENDOR_ID_AZWAVE,
47 0x1237), 101 0x1237),
48 .driver_data = ATH9K_PCI_CUS198 }, 102 .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
49 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 103 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
50 0x0032, 104 0x0032,
51 PCI_VENDOR_ID_AZWAVE, 105 PCI_VENDOR_ID_AZWAVE,
52 0x2126), 106 0x2126),
53 .driver_data = ATH9K_PCI_CUS198 }, 107 .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
108 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
109 0x0032,
110 PCI_VENDOR_ID_AZWAVE,
111 0x126A),
112 .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
54 113
55 /* PCI-E CUS230 */ 114 /* PCI-E CUS230 */
56 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 115 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
57 0x0032, 116 0x0032,
58 PCI_VENDOR_ID_AZWAVE, 117 PCI_VENDOR_ID_AZWAVE,
59 0x2152), 118 0x2152),
60 .driver_data = ATH9K_PCI_CUS230 }, 119 .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
61 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 120 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
62 0x0032, 121 0x0032,
63 PCI_VENDOR_ID_FOXCONN, 122 PCI_VENDOR_ID_FOXCONN,
64 0xE075), 123 0xE075),
65 .driver_data = ATH9K_PCI_CUS230 }, 124 .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
125
126 /* WB225 */
127 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
128 0x0032,
129 PCI_VENDOR_ID_ATHEROS,
130 0x3119),
131 .driver_data = ATH9K_PCI_BT_ANT_DIV },
132 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
133 0x0032,
134 PCI_VENDOR_ID_ATHEROS,
135 0x3122),
136 .driver_data = ATH9K_PCI_BT_ANT_DIV },
137 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
138 0x0032,
139 0x185F, /* WNC */
140 0x3119),
141 .driver_data = ATH9K_PCI_BT_ANT_DIV },
142 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
143 0x0032,
144 0x185F, /* WNC */
145 0x3027),
146 .driver_data = ATH9K_PCI_BT_ANT_DIV },
147 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
148 0x0032,
149 PCI_VENDOR_ID_SAMSUNG,
150 0x4105),
151 .driver_data = ATH9K_PCI_BT_ANT_DIV },
152 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
153 0x0032,
154 PCI_VENDOR_ID_SAMSUNG,
155 0x4106),
156 .driver_data = ATH9K_PCI_BT_ANT_DIV },
157 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
158 0x0032,
159 PCI_VENDOR_ID_SAMSUNG,
160 0x410D),
161 .driver_data = ATH9K_PCI_BT_ANT_DIV },
162 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
163 0x0032,
164 PCI_VENDOR_ID_SAMSUNG,
165 0x410E),
166 .driver_data = ATH9K_PCI_BT_ANT_DIV },
167 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
168 0x0032,
169 PCI_VENDOR_ID_SAMSUNG,
170 0x410F),
171 .driver_data = ATH9K_PCI_BT_ANT_DIV },
172 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
173 0x0032,
174 PCI_VENDOR_ID_SAMSUNG,
175 0xC706),
176 .driver_data = ATH9K_PCI_BT_ANT_DIV },
177 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
178 0x0032,
179 PCI_VENDOR_ID_SAMSUNG,
180 0xC680),
181 .driver_data = ATH9K_PCI_BT_ANT_DIV },
182 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
183 0x0032,
184 PCI_VENDOR_ID_SAMSUNG,
185 0xC708),
186 .driver_data = ATH9K_PCI_BT_ANT_DIV },
187 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
188 0x0032,
189 PCI_VENDOR_ID_LENOVO,
190 0x3218),
191 .driver_data = ATH9K_PCI_BT_ANT_DIV },
192 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
193 0x0032,
194 PCI_VENDOR_ID_LENOVO,
195 0x3219),
196 .driver_data = ATH9K_PCI_BT_ANT_DIV },
66 197
67 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ 198 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
68 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 199 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
@@ -229,6 +360,22 @@ static void ath_pci_aspm_init(struct ath_common *common)
229 return; 360 return;
230 } 361 }
231 362
363 /*
364 * 0x70c - Ack Frequency Register.
365 *
366 * Bits 27:29 - DEFAULT_L1_ENTRANCE_LATENCY.
367 *
368 * 000 : 1 us
369 * 001 : 2 us
370 * 010 : 4 us
371 * 011 : 8 us
372 * 100 : 16 us
373 * 101 : 32 us
374 * 110/111 : 64 us
375 */
376 if (AR_SREV_9462(ah))
377 pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
378
232 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm); 379 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
233 if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) { 380 if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
234 ah->aspm_enabled = true; 381 ah->aspm_enabled = true;
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 8b380305b0fc..4a1b99238ec2 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -48,4 +48,11 @@
48#define AR_PHY_PLL_CONTROL 0x16180 48#define AR_PHY_PLL_CONTROL 0x16180
49#define AR_PHY_PLL_MODE 0x16184 49#define AR_PHY_PLL_MODE 0x16184
50 50
51enum ath9k_ant_div_comb_lna_conf {
52 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
53 ATH_ANT_DIV_COMB_LNA2,
54 ATH_ANT_DIV_COMB_LNA1,
55 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
56};
57
51#endif 58#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 7eb1f4b458e4..d3d7c51fa6c8 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1275,15 +1275,21 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1275} 1275}
1276 1276
1277static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, 1277static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1278 struct cfg80211_chan_def *chandef,
1278 struct ieee80211_sta *sta, void *priv_sta) 1279 struct ieee80211_sta *sta, void *priv_sta)
1279{ 1280{
1280 struct ath_softc *sc = priv; 1281 struct ath_softc *sc = priv;
1281 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1282 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1282 struct ath_rate_priv *ath_rc_priv = priv_sta; 1283 struct ath_rate_priv *ath_rc_priv = priv_sta;
1283 int i, j = 0; 1284 int i, j = 0;
1285 u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
1284 1286
1285 for (i = 0; i < sband->n_bitrates; i++) { 1287 for (i = 0; i < sband->n_bitrates; i++) {
1286 if (sta->supp_rates[sband->band] & BIT(i)) { 1288 if (sta->supp_rates[sband->band] & BIT(i)) {
1289 if ((rate_flags & sband->bitrates[i].flags)
1290 != rate_flags)
1291 continue;
1292
1287 ath_rc_priv->neg_rates.rs_rates[j] 1293 ath_rc_priv->neg_rates.rs_rates[j]
1288 = (sband->bitrates[i].bitrate * 2) / 10; 1294 = (sband->bitrates[i].bitrate * 2) / 10;
1289 j++; 1295 j++;
@@ -1313,6 +1319,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1313} 1319}
1314 1320
1315static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, 1321static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1322 struct cfg80211_chan_def *chandef,
1316 struct ieee80211_sta *sta, void *priv_sta, 1323 struct ieee80211_sta *sta, void *priv_sta,
1317 u32 changed) 1324 u32 changed)
1318{ 1325{
@@ -1324,8 +1331,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1324 ath_rc_init(sc, priv_sta); 1331 ath_rc_init(sc, priv_sta);
1325 1332
1326 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, 1333 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1327 "Operating HT Bandwidth changed to: %d\n", 1334 "Operating Bandwidth changed to: %d\n",
1328 cfg80211_get_chandef_type(&sc->hw->conf.chandef)); 1335 sc->hw->conf.chandef.width);
1329 } 1336 }
1330} 1337}
1331 1338
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 865e043e8aa6..4ee472a5a4e4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
42 struct ath_desc *ds; 42 struct ath_desc *ds;
43 struct sk_buff *skb; 43 struct sk_buff *skb;
44 44
45 ATH_RXBUF_RESET(bf);
46
47 ds = bf->bf_desc; 45 ds = bf->bf_desc;
48 ds->ds_link = 0; /* link to null */ 46 ds->ds_link = 0; /* link to null */
49 ds->ds_data = bf->bf_buf_addr; 47 ds->ds_data = bf->bf_buf_addr;
@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
70 sc->rx.rxlink = &ds->ds_link; 68 sc->rx.rxlink = &ds->ds_link;
71} 69}
72 70
71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
72{
73 if (sc->rx.buf_hold)
74 ath_rx_buf_link(sc, sc->rx.buf_hold);
75
76 sc->rx.buf_hold = bf;
77}
78
73static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 79static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
74{ 80{
75 /* XXX block beacon interrupts */ 81 /* XXX block beacon interrupts */
@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
117 123
118 skb = bf->bf_mpdu; 124 skb = bf->bf_mpdu;
119 125
120 ATH_RXBUF_RESET(bf);
121 memset(skb->data, 0, ah->caps.rx_status_len); 126 memset(skb->data, 0, ah->caps.rx_status_len);
122 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 127 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
123 ah->caps.rx_status_len, DMA_TO_DEVICE); 128 ah->caps.rx_status_len, DMA_TO_DEVICE);
@@ -185,7 +190,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
185 190
186static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 191static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
187{ 192{
188 skb_queue_head_init(&rx_edma->rx_fifo); 193 __skb_queue_head_init(&rx_edma->rx_fifo);
189 rx_edma->rx_fifo_hwsize = size; 194 rx_edma->rx_fifo_hwsize = size;
190} 195}
191 196
@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
432 if (list_empty(&sc->rx.rxbuf)) 437 if (list_empty(&sc->rx.rxbuf))
433 goto start_recv; 438 goto start_recv;
434 439
440 sc->rx.buf_hold = NULL;
435 sc->rx.rxlink = NULL; 441 sc->rx.rxlink = NULL;
436 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 442 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
437 ath_rx_buf_link(sc, bf); 443 ath_rx_buf_link(sc, bf);
@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
677 } 683 }
678 684
679 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 685 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
686 if (bf == sc->rx.buf_hold)
687 return NULL;
688
680 ds = bf->bf_desc; 689 ds = bf->bf_desc;
681 690
682 /* 691 /*
@@ -755,7 +764,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
755 bool is_mc, is_valid_tkip, strip_mic, mic_error; 764 bool is_mc, is_valid_tkip, strip_mic, mic_error;
756 struct ath_hw *ah = common->ah; 765 struct ath_hw *ah = common->ah;
757 __le16 fc; 766 __le16 fc;
758 u8 rx_status_len = ah->caps.rx_status_len;
759 767
760 fc = hdr->frame_control; 768 fc = hdr->frame_control;
761 769
@@ -777,25 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
777 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 785 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
778 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 786 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
779 787
780 if (!rx_stats->rs_datalen) {
781 RX_STAT_INC(rx_len_err);
782 return false;
783 }
784
785 /*
786 * rs_status follows rs_datalen so if rs_datalen is too large
787 * we can take a hint that hardware corrupted it, so ignore
788 * those frames.
789 */
790 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
791 RX_STAT_INC(rx_len_err);
792 return false;
793 }
794
795 /* Only use error bits from the last fragment */
796 if (rx_stats->rs_more)
797 return true;
798
799 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 788 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
800 !ieee80211_has_morefrags(fc) && 789 !ieee80211_has_morefrags(fc) &&
801 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 790 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
@@ -814,8 +803,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
814 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 803 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
815 mic_error = false; 804 mic_error = false;
816 } 805 }
817 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
818 return false;
819 806
820 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 807 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
821 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 808 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
@@ -865,6 +852,17 @@ static int ath9k_process_rate(struct ath_common *common,
865 band = hw->conf.chandef.chan->band; 852 band = hw->conf.chandef.chan->band;
866 sband = hw->wiphy->bands[band]; 853 sband = hw->wiphy->bands[band];
867 854
855 switch (hw->conf.chandef.width) {
856 case NL80211_CHAN_WIDTH_5:
857 rxs->flag |= RX_FLAG_5MHZ;
858 break;
859 case NL80211_CHAN_WIDTH_10:
860 rxs->flag |= RX_FLAG_10MHZ;
861 break;
862 default:
863 break;
864 }
865
868 if (rx_stats->rs_rate & 0x80) { 866 if (rx_stats->rs_rate & 0x80) {
869 /* HT rate */ 867 /* HT rate */
870 rxs->flag |= RX_FLAG_HT; 868 rxs->flag |= RX_FLAG_HT;
@@ -898,129 +896,65 @@ static int ath9k_process_rate(struct ath_common *common,
898 896
899static void ath9k_process_rssi(struct ath_common *common, 897static void ath9k_process_rssi(struct ath_common *common,
900 struct ieee80211_hw *hw, 898 struct ieee80211_hw *hw,
901 struct ieee80211_hdr *hdr, 899 struct ath_rx_status *rx_stats,
902 struct ath_rx_status *rx_stats) 900 struct ieee80211_rx_status *rxs)
903{ 901{
904 struct ath_softc *sc = hw->priv; 902 struct ath_softc *sc = hw->priv;
905 struct ath_hw *ah = common->ah; 903 struct ath_hw *ah = common->ah;
906 int last_rssi; 904 int last_rssi;
907 int rssi = rx_stats->rs_rssi; 905 int rssi = rx_stats->rs_rssi;
908 906
909 if (!rx_stats->is_mybeacon || 907 /*
910 ((ah->opmode != NL80211_IFTYPE_STATION) && 908 * RSSI is not available for subframes in an A-MPDU.
911 (ah->opmode != NL80211_IFTYPE_ADHOC))) 909 */
910 if (rx_stats->rs_moreaggr) {
911 rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
912 return; 912 return;
913 913 }
914 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
915 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
916
917 last_rssi = sc->last_rssi;
918 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
919 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
920 if (rssi < 0)
921 rssi = 0;
922
923 /* Update Beacon RSSI, this is used by ANI. */
924 ah->stats.avgbrssi = rssi;
925}
926
927/*
928 * For Decrypt or Demic errors, we only mark packet status here and always push
929 * up the frame up to let mac80211 handle the actual error case, be it no
930 * decryption key or real decryption error. This let us keep statistics there.
931 */
932static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
933 struct ieee80211_hdr *hdr,
934 struct ath_rx_status *rx_stats,
935 struct ieee80211_rx_status *rx_status,
936 bool *decrypt_error)
937{
938 struct ieee80211_hw *hw = sc->hw;
939 struct ath_hw *ah = sc->sc_ah;
940 struct ath_common *common = ath9k_hw_common(ah);
941 bool discard_current = sc->rx.discard_next;
942
943 sc->rx.discard_next = rx_stats->rs_more;
944 if (discard_current)
945 return -EINVAL;
946 914
947 /* 915 /*
948 * everything but the rate is checked here, the rate check is done 916 * Check if the RSSI for the last subframe in an A-MPDU
949 * separately to avoid doing two lookups for a rate for each frame. 917 * or an unaggregated frame is valid.
950 */ 918 */
951 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 919 if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
952 return -EINVAL; 920 rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
953 921 return;
954 /* Only use status info from the last fragment */ 922 }
955 if (rx_stats->rs_more)
956 return 0;
957 923
958 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 924 /*
959 return -EINVAL; 925 * Update Beacon RSSI, this is used by ANI.
926 */
927 if (rx_stats->is_mybeacon &&
928 ((ah->opmode == NL80211_IFTYPE_STATION) ||
929 (ah->opmode == NL80211_IFTYPE_ADHOC))) {
930 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
931 last_rssi = sc->last_rssi;
960 932
961 ath9k_process_rssi(common, hw, hdr, rx_stats); 933 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
934 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
935 if (rssi < 0)
936 rssi = 0;
962 937
963 rx_status->band = hw->conf.chandef.chan->band; 938 ah->stats.avgbrssi = rssi;
964 rx_status->freq = hw->conf.chandef.chan->center_freq; 939 }
965 rx_status->signal = ah->noise + rx_stats->rs_rssi;
966 rx_status->antenna = rx_stats->rs_antenna;
967 rx_status->flag |= RX_FLAG_MACTIME_END;
968 if (rx_stats->rs_moreaggr)
969 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
970 940
971 sc->rx.discard_next = false; 941 rxs->signal = ah->noise + rx_stats->rs_rssi;
972 return 0;
973} 942}
974 943
975static void ath9k_rx_skb_postprocess(struct ath_common *common, 944static void ath9k_process_tsf(struct ath_rx_status *rs,
976 struct sk_buff *skb, 945 struct ieee80211_rx_status *rxs,
977 struct ath_rx_status *rx_stats, 946 u64 tsf)
978 struct ieee80211_rx_status *rxs,
979 bool decrypt_error)
980{ 947{
981 struct ath_hw *ah = common->ah; 948 u32 tsf_lower = tsf & 0xffffffff;
982 struct ieee80211_hdr *hdr;
983 int hdrlen, padpos, padsize;
984 u8 keyix;
985 __le16 fc;
986
987 /* see if any padding is done by the hw and remove it */
988 hdr = (struct ieee80211_hdr *) skb->data;
989 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
990 fc = hdr->frame_control;
991 padpos = ieee80211_hdrlen(fc);
992
993 /* The MAC header is padded to have 32-bit boundary if the
994 * packet payload is non-zero. The general calculation for
995 * padsize would take into account odd header lengths:
996 * padsize = (4 - padpos % 4) % 4; However, since only
997 * even-length headers are used, padding can only be 0 or 2
998 * bytes and we can optimize this a bit. In addition, we must
999 * not try to remove padding from short control frames that do
1000 * not have payload. */
1001 padsize = padpos & 3;
1002 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1003 memmove(skb->data + padsize, skb->data, padpos);
1004 skb_pull(skb, padsize);
1005 }
1006 949
1007 keyix = rx_stats->rs_keyix; 950 rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
951 if (rs->rs_tstamp > tsf_lower &&
952 unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
953 rxs->mactime -= 0x100000000ULL;
1008 954
1009 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 955 if (rs->rs_tstamp < tsf_lower &&
1010 ieee80211_has_protected(fc)) { 956 unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
1011 rxs->flag |= RX_FLAG_DECRYPTED; 957 rxs->mactime += 0x100000000ULL;
1012 } else if (ieee80211_has_protected(fc)
1013 && !decrypt_error && skb->len >= hdrlen + 4) {
1014 keyix = skb->data[hdrlen + 3] >> 6;
1015
1016 if (test_bit(keyix, common->keymap))
1017 rxs->flag |= RX_FLAG_DECRYPTED;
1018 }
1019 if (ah->sw_mgmt_crypto &&
1020 (rxs->flag & RX_FLAG_DECRYPTED) &&
1021 ieee80211_is_mgmt(fc))
1022 /* Use software decrypt for management frames. */
1023 rxs->flag &= ~RX_FLAG_DECRYPTED;
1024} 958}
1025 959
1026#ifdef CONFIG_ATH9K_DEBUGFS 960#ifdef CONFIG_ATH9K_DEBUGFS
@@ -1133,6 +1067,234 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
1133#endif 1067#endif
1134} 1068}
1135 1069
1070static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
1071{
1072 struct ath_hw *ah = sc->sc_ah;
1073 struct ath_common *common = ath9k_hw_common(ah);
1074
1075 if (ieee80211_is_beacon(hdr->frame_control)) {
1076 RX_STAT_INC(rx_beacons);
1077 if (!is_zero_ether_addr(common->curbssid) &&
1078 ether_addr_equal(hdr->addr3, common->curbssid))
1079 return true;
1080 }
1081
1082 return false;
1083}
1084
1085/*
1086 * For Decrypt or Demic errors, we only mark packet status here and always push
1087 * up the frame up to let mac80211 handle the actual error case, be it no
1088 * decryption key or real decryption error. This let us keep statistics there.
1089 */
1090static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
1091 struct sk_buff *skb,
1092 struct ath_rx_status *rx_stats,
1093 struct ieee80211_rx_status *rx_status,
1094 bool *decrypt_error, u64 tsf)
1095{
1096 struct ieee80211_hw *hw = sc->hw;
1097 struct ath_hw *ah = sc->sc_ah;
1098 struct ath_common *common = ath9k_hw_common(ah);
1099 struct ieee80211_hdr *hdr;
1100 bool discard_current = sc->rx.discard_next;
1101 int ret = 0;
1102
1103 /*
1104 * Discard corrupt descriptors which are marked in
1105 * ath_get_next_rx_buf().
1106 */
1107 sc->rx.discard_next = rx_stats->rs_more;
1108 if (discard_current)
1109 return -EINVAL;
1110
1111 /*
1112 * Discard zero-length packets.
1113 */
1114 if (!rx_stats->rs_datalen) {
1115 RX_STAT_INC(rx_len_err);
1116 return -EINVAL;
1117 }
1118
1119 /*
1120 * rs_status follows rs_datalen so if rs_datalen is too large
1121 * we can take a hint that hardware corrupted it, so ignore
1122 * those frames.
1123 */
1124 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
1125 RX_STAT_INC(rx_len_err);
1126 return -EINVAL;
1127 }
1128
1129 /* Only use status info from the last fragment */
1130 if (rx_stats->rs_more)
1131 return 0;
1132
1133 /*
1134 * Return immediately if the RX descriptor has been marked
1135 * as corrupt based on the various error bits.
1136 *
1137 * This is different from the other corrupt descriptor
1138 * condition handled above.
1139 */
1140 if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
1141 ret = -EINVAL;
1142 goto exit;
1143 }
1144
1145 hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
1146
1147 ath9k_process_tsf(rx_stats, rx_status, tsf);
1148 ath_debug_stat_rx(sc, rx_stats);
1149
1150 /*
1151 * Process PHY errors and return so that the packet
1152 * can be dropped.
1153 */
1154 if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
1155 ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
1156 if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
1157 RX_STAT_INC(rx_spectral);
1158
1159 ret = -EINVAL;
1160 goto exit;
1161 }
1162
1163 /*
1164 * everything but the rate is checked here, the rate check is done
1165 * separately to avoid doing two lookups for a rate for each frame.
1166 */
1167 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
1168 ret = -EINVAL;
1169 goto exit;
1170 }
1171
1172 rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
1173 if (rx_stats->is_mybeacon) {
1174 sc->hw_busy_count = 0;
1175 ath_start_rx_poll(sc, 3);
1176 }
1177
1178 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
1179 ret =-EINVAL;
1180 goto exit;
1181 }
1182
1183 ath9k_process_rssi(common, hw, rx_stats, rx_status);
1184
1185 rx_status->band = hw->conf.chandef.chan->band;
1186 rx_status->freq = hw->conf.chandef.chan->center_freq;
1187 rx_status->antenna = rx_stats->rs_antenna;
1188 rx_status->flag |= RX_FLAG_MACTIME_END;
1189
1190#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1191 if (ieee80211_is_data_present(hdr->frame_control) &&
1192 !ieee80211_is_qos_nullfunc(hdr->frame_control))
1193 sc->rx.num_pkts++;
1194#endif
1195
1196exit:
1197 sc->rx.discard_next = false;
1198 return ret;
1199}
1200
1201static void ath9k_rx_skb_postprocess(struct ath_common *common,
1202 struct sk_buff *skb,
1203 struct ath_rx_status *rx_stats,
1204 struct ieee80211_rx_status *rxs,
1205 bool decrypt_error)
1206{
1207 struct ath_hw *ah = common->ah;
1208 struct ieee80211_hdr *hdr;
1209 int hdrlen, padpos, padsize;
1210 u8 keyix;
1211 __le16 fc;
1212
1213 /* see if any padding is done by the hw and remove it */
1214 hdr = (struct ieee80211_hdr *) skb->data;
1215 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1216 fc = hdr->frame_control;
1217 padpos = ieee80211_hdrlen(fc);
1218
1219 /* The MAC header is padded to have 32-bit boundary if the
1220 * packet payload is non-zero. The general calculation for
1221 * padsize would take into account odd header lengths:
1222 * padsize = (4 - padpos % 4) % 4; However, since only
1223 * even-length headers are used, padding can only be 0 or 2
1224 * bytes and we can optimize this a bit. In addition, we must
1225 * not try to remove padding from short control frames that do
1226 * not have payload. */
1227 padsize = padpos & 3;
1228 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1229 memmove(skb->data + padsize, skb->data, padpos);
1230 skb_pull(skb, padsize);
1231 }
1232
1233 keyix = rx_stats->rs_keyix;
1234
1235 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1236 ieee80211_has_protected(fc)) {
1237 rxs->flag |= RX_FLAG_DECRYPTED;
1238 } else if (ieee80211_has_protected(fc)
1239 && !decrypt_error && skb->len >= hdrlen + 4) {
1240 keyix = skb->data[hdrlen + 3] >> 6;
1241
1242 if (test_bit(keyix, common->keymap))
1243 rxs->flag |= RX_FLAG_DECRYPTED;
1244 }
1245 if (ah->sw_mgmt_crypto &&
1246 (rxs->flag & RX_FLAG_DECRYPTED) &&
1247 ieee80211_is_mgmt(fc))
1248 /* Use software decrypt for management frames. */
1249 rxs->flag &= ~RX_FLAG_DECRYPTED;
1250}
1251
1252/*
1253 * Run the LNA combining algorithm only in these cases:
1254 *
1255 * Standalone WLAN cards with both LNA/Antenna diversity
1256 * enabled in the EEPROM.
1257 *
1258 * WLAN+BT cards which are in the supported card list
1259 * in ath_pci_id_table and the user has loaded the
1260 * driver with "bt_ant_diversity" set to true.
1261 */
1262static void ath9k_antenna_check(struct ath_softc *sc,
1263 struct ath_rx_status *rs)
1264{
1265 struct ath_hw *ah = sc->sc_ah;
1266 struct ath9k_hw_capabilities *pCap = &ah->caps;
1267 struct ath_common *common = ath9k_hw_common(ah);
1268
1269 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
1270 return;
1271
1272 /*
1273 * All MPDUs in an aggregate will use the same LNA
1274 * as the first MPDU.
1275 */
1276 if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 return;
1278
1279 /*
1280 * Change the default rx antenna if rx diversity
1281 * chooses the other antenna 3 times in a row.
1282 */
1283 if (sc->rx.defant != rs->rs_antenna) {
1284 if (++sc->rx.rxotherant >= 3)
1285 ath_setdefantenna(sc, rs->rs_antenna);
1286 } else {
1287 sc->rx.rxotherant = 0;
1288 }
1289
1290 if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
1291 if (common->bt_ant_diversity)
1292 ath_ant_comb_scan(sc, rs);
1293 } else {
1294 ath_ant_comb_scan(sc, rs);
1295 }
1296}
1297
1136static void ath9k_apply_ampdu_details(struct ath_softc *sc, 1298static void ath9k_apply_ampdu_details(struct ath_softc *sc,
1137 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) 1299 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
1138{ 1300{
@@ -1159,15 +1321,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1159 struct ath_hw *ah = sc->sc_ah; 1321 struct ath_hw *ah = sc->sc_ah;
1160 struct ath_common *common = ath9k_hw_common(ah); 1322 struct ath_common *common = ath9k_hw_common(ah);
1161 struct ieee80211_hw *hw = sc->hw; 1323 struct ieee80211_hw *hw = sc->hw;
1162 struct ieee80211_hdr *hdr;
1163 int retval; 1324 int retval;
1164 struct ath_rx_status rs; 1325 struct ath_rx_status rs;
1165 enum ath9k_rx_qtype qtype; 1326 enum ath9k_rx_qtype qtype;
1166 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1327 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1167 int dma_type; 1328 int dma_type;
1168 u8 rx_status_len = ah->caps.rx_status_len;
1169 u64 tsf = 0; 1329 u64 tsf = 0;
1170 u32 tsf_lower = 0;
1171 unsigned long flags; 1330 unsigned long flags;
1172 dma_addr_t new_buf_addr; 1331 dma_addr_t new_buf_addr;
1173 1332
@@ -1179,7 +1338,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1179 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1338 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1180 1339
1181 tsf = ath9k_hw_gettsf64(ah); 1340 tsf = ath9k_hw_gettsf64(ah);
1182 tsf_lower = tsf & 0xffffffff;
1183 1341
1184 do { 1342 do {
1185 bool decrypt_error = false; 1343 bool decrypt_error = false;
@@ -1206,55 +1364,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1206 else 1364 else
1207 hdr_skb = skb; 1365 hdr_skb = skb;
1208 1366
1209 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1210 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1367 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1211 if (ieee80211_is_beacon(hdr->frame_control)) {
1212 RX_STAT_INC(rx_beacons);
1213 if (!is_zero_ether_addr(common->curbssid) &&
1214 ether_addr_equal(hdr->addr3, common->curbssid))
1215 rs.is_mybeacon = true;
1216 else
1217 rs.is_mybeacon = false;
1218 }
1219 else
1220 rs.is_mybeacon = false;
1221
1222 if (ieee80211_is_data_present(hdr->frame_control) &&
1223 !ieee80211_is_qos_nullfunc(hdr->frame_control))
1224 sc->rx.num_pkts++;
1225
1226 ath_debug_stat_rx(sc, &rs);
1227
1228 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1368 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1229 1369
1230 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1370 retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
1231 if (rs.rs_tstamp > tsf_lower && 1371 &decrypt_error, tsf);
1232 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1233 rxs->mactime -= 0x100000000ULL;
1234
1235 if (rs.rs_tstamp < tsf_lower &&
1236 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1237 rxs->mactime += 0x100000000ULL;
1238
1239 if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
1240 ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
1241
1242 if (rs.rs_status & ATH9K_RXERR_PHY) {
1243 if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
1244 RX_STAT_INC(rx_spectral);
1245 goto requeue_drop_frag;
1246 }
1247 }
1248
1249 retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
1250 &decrypt_error);
1251 if (retval) 1372 if (retval)
1252 goto requeue_drop_frag; 1373 goto requeue_drop_frag;
1253 1374
1254 if (rs.is_mybeacon) {
1255 sc->hw_busy_count = 0;
1256 ath_start_rx_poll(sc, 3);
1257 }
1258 /* Ensure we always have an skb to requeue once we are done 1375 /* Ensure we always have an skb to requeue once we are done
1259 * processing the current buffer's skb */ 1376 * processing the current buffer's skb */
1260 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1377 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1308,8 +1425,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1308 sc->rx.frag = skb; 1425 sc->rx.frag = skb;
1309 goto requeue; 1426 goto requeue;
1310 } 1427 }
1311 if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
1312 goto requeue_drop_frag;
1313 1428
1314 if (sc->rx.frag) { 1429 if (sc->rx.frag) {
1315 int space = skb->len - skb_tailroom(hdr_skb); 1430 int space = skb->len - skb_tailroom(hdr_skb);
@@ -1328,22 +1443,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1328 skb = hdr_skb; 1443 skb = hdr_skb;
1329 } 1444 }
1330 1445
1331
1332 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1333
1334 /*
1335 * change the default rx antenna if rx diversity
1336 * chooses the other antenna 3 times in a row.
1337 */
1338 if (sc->rx.defant != rs.rs_antenna) {
1339 if (++sc->rx.rxotherant >= 3)
1340 ath_setdefantenna(sc, rs.rs_antenna);
1341 } else {
1342 sc->rx.rxotherant = 0;
1343 }
1344
1345 }
1346
1347 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1446 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1348 skb_trim(skb, skb->len - 8); 1447 skb_trim(skb, skb->len - 8);
1349 1448
@@ -1355,8 +1454,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1355 ath_rx_ps(sc, skb, rs.is_mybeacon); 1454 ath_rx_ps(sc, skb, rs.is_mybeacon);
1356 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1455 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1357 1456
1358 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1457 ath9k_antenna_check(sc, &rs);
1359 ath_ant_comb_scan(sc, &rs);
1360 1458
1361 ath9k_apply_ampdu_details(sc, &rs, rxs); 1459 ath9k_apply_ampdu_details(sc, &rs, rxs);
1362 1460
@@ -1375,7 +1473,7 @@ requeue:
1375 if (edma) { 1473 if (edma) {
1376 ath_rx_edma_buf_link(sc, qtype); 1474 ath_rx_edma_buf_link(sc, qtype);
1377 } else { 1475 } else {
1378 ath_rx_buf_link(sc, bf); 1476 ath_rx_buf_relink(sc, bf);
1379 ath9k_hw_rxena(ah); 1477 ath9k_hw_rxena(ah);
1380 } 1478 }
1381 } while (1); 1479 } while (1);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 5af97442ac37..a13b2d143d9e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -893,9 +893,9 @@
893 893
894#define AR_SREV_9485(_ah) \ 894#define AR_SREV_9485(_ah) \
895 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485)) 895 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
896#define AR_SREV_9485_11(_ah) \ 896#define AR_SREV_9485_11_OR_LATER(_ah) \
897 (AR_SREV_9485(_ah) && \ 897 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \
898 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11)) 898 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
899#define AR_SREV_9485_OR_LATER(_ah) \ 899#define AR_SREV_9485_OR_LATER(_ah) \
900 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485)) 900 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
901 901
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 927992732620..35b515fe3ffa 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
135 135
136static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) 136static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
137{ 137{
138 if (!tid->an->sta)
139 return;
140
138 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, 141 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
139 seqno << IEEE80211_SEQ_SEQ_SHIFT); 142 seqno << IEEE80211_SEQ_SEQ_SHIFT);
140} 143}
@@ -168,6 +171,71 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
168 } 171 }
169} 172}
170 173
174static struct ath_atx_tid *
175ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
176{
177 struct ieee80211_hdr *hdr;
178 u8 tidno = 0;
179
180 hdr = (struct ieee80211_hdr *) skb->data;
181 if (ieee80211_is_data_qos(hdr->frame_control))
182 tidno = ieee80211_get_qos_ctl(hdr)[0];
183
184 tidno &= IEEE80211_QOS_CTL_TID_MASK;
185 return ATH_AN_2_TID(an, tidno);
186}
187
188static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
189{
190 return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
191}
192
193static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
194{
195 struct sk_buff *skb;
196
197 skb = __skb_dequeue(&tid->retry_q);
198 if (!skb)
199 skb = __skb_dequeue(&tid->buf_q);
200
201 return skb;
202}
203
204/*
205 * ath_tx_tid_change_state:
206 * - clears a-mpdu flag of previous session
207 * - force sequence number allocation to fix next BlockAck Window
208 */
209static void
210ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
211{
212 struct ath_txq *txq = tid->ac->txq;
213 struct ieee80211_tx_info *tx_info;
214 struct sk_buff *skb, *tskb;
215 struct ath_buf *bf;
216 struct ath_frame_info *fi;
217
218 skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
219 fi = get_frame_info(skb);
220 bf = fi->bf;
221
222 tx_info = IEEE80211_SKB_CB(skb);
223 tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
224
225 if (bf)
226 continue;
227
228 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
229 if (!bf) {
230 __skb_unlink(skb, &tid->buf_q);
231 ath_txq_skb_done(sc, txq, skb);
232 ieee80211_free_txskb(sc->hw, skb);
233 continue;
234 }
235 }
236
237}
238
171static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 239static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
172{ 240{
173 struct ath_txq *txq = tid->ac->txq; 241 struct ath_txq *txq = tid->ac->txq;
@@ -182,28 +250,22 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
182 250
183 memset(&ts, 0, sizeof(ts)); 251 memset(&ts, 0, sizeof(ts));
184 252
185 while ((skb = __skb_dequeue(&tid->buf_q))) { 253 while ((skb = __skb_dequeue(&tid->retry_q))) {
186 fi = get_frame_info(skb); 254 fi = get_frame_info(skb);
187 bf = fi->bf; 255 bf = fi->bf;
188
189 if (!bf) { 256 if (!bf) {
190 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 257 ath_txq_skb_done(sc, txq, skb);
191 if (!bf) { 258 ieee80211_free_txskb(sc->hw, skb);
192 ath_txq_skb_done(sc, txq, skb); 259 continue;
193 ieee80211_free_txskb(sc->hw, skb);
194 continue;
195 }
196 } 260 }
197 261
198 if (fi->retries) { 262 if (fi->baw_tracked) {
199 list_add_tail(&bf->list, &bf_head);
200 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 263 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
201 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
202 sendbar = true; 264 sendbar = true;
203 } else {
204 ath_set_rates(tid->an->vif, tid->an->sta, bf);
205 ath_tx_send_normal(sc, txq, NULL, skb);
206 } 265 }
266
267 list_add_tail(&bf->list, &bf_head);
268 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
207 } 269 }
208 270
209 if (sendbar) { 271 if (sendbar) {
@@ -232,13 +294,16 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
232} 294}
233 295
234static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 296static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
235 u16 seqno) 297 struct ath_buf *bf)
236{ 298{
299 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
300 u16 seqno = bf->bf_state.seqno;
237 int index, cindex; 301 int index, cindex;
238 302
239 index = ATH_BA_INDEX(tid->seq_start, seqno); 303 index = ATH_BA_INDEX(tid->seq_start, seqno);
240 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 304 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
241 __set_bit(cindex, tid->tx_buf); 305 __set_bit(cindex, tid->tx_buf);
306 fi->baw_tracked = 1;
242 307
243 if (index >= ((tid->baw_tail - tid->baw_head) & 308 if (index >= ((tid->baw_tail - tid->baw_head) &
244 (ATH_TID_MAX_BUFS - 1))) { 309 (ATH_TID_MAX_BUFS - 1))) {
@@ -247,12 +312,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
247 } 312 }
248} 313}
249 314
250/*
251 * TODO: For frame(s) that are in the retry state, we will reuse the
252 * sequence number(s) without setting the retry bit. The
253 * alternative is to give up on these and BAR the receiver's window
254 * forward.
255 */
256static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 315static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
257 struct ath_atx_tid *tid) 316 struct ath_atx_tid *tid)
258 317
@@ -266,7 +325,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
266 memset(&ts, 0, sizeof(ts)); 325 memset(&ts, 0, sizeof(ts));
267 INIT_LIST_HEAD(&bf_head); 326 INIT_LIST_HEAD(&bf_head);
268 327
269 while ((skb = __skb_dequeue(&tid->buf_q))) { 328 while ((skb = ath_tid_dequeue(tid))) {
270 fi = get_frame_info(skb); 329 fi = get_frame_info(skb);
271 bf = fi->bf; 330 bf = fi->bf;
272 331
@@ -276,14 +335,8 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
276 } 335 }
277 336
278 list_add_tail(&bf->list, &bf_head); 337 list_add_tail(&bf->list, &bf_head);
279
280 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
281 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 338 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
282 } 339 }
283
284 tid->seq_next = tid->seq_start;
285 tid->baw_tail = tid->baw_head;
286 tid->bar_index = -1;
287} 340}
288 341
289static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 342static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
@@ -403,7 +456,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
403 struct ieee80211_tx_rate rates[4]; 456 struct ieee80211_tx_rate rates[4];
404 struct ath_frame_info *fi; 457 struct ath_frame_info *fi;
405 int nframes; 458 int nframes;
406 u8 tidno;
407 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 459 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
408 int i, retries; 460 int i, retries;
409 int bar_index = -1; 461 int bar_index = -1;
@@ -429,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
429 while (bf) { 481 while (bf) {
430 bf_next = bf->bf_next; 482 bf_next = bf->bf_next;
431 483
432 if (!bf->bf_stale || bf_next != NULL) 484 if (!bf->bf_state.stale || bf_next != NULL)
433 list_move_tail(&bf->list, &bf_head); 485 list_move_tail(&bf->list, &bf_head);
434 486
435 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); 487 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
@@ -440,8 +492,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
440 } 492 }
441 493
442 an = (struct ath_node *)sta->drv_priv; 494 an = (struct ath_node *)sta->drv_priv;
443 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 495 tid = ath_get_skb_tid(sc, an, skb);
444 tid = ATH_AN_2_TID(an, tidno);
445 seq_first = tid->seq_start; 496 seq_first = tid->seq_start;
446 isba = ts->ts_flags & ATH9K_TX_BA; 497 isba = ts->ts_flags & ATH9K_TX_BA;
447 498
@@ -453,7 +504,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
453 * Only BlockAcks have a TID and therefore normal Acks cannot be 504 * Only BlockAcks have a TID and therefore normal Acks cannot be
454 * checked 505 * checked
455 */ 506 */
456 if (isba && tidno != ts->tid) 507 if (isba && tid->tidno != ts->tid)
457 txok = false; 508 txok = false;
458 509
459 isaggr = bf_isaggr(bf); 510 isaggr = bf_isaggr(bf);
@@ -489,7 +540,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
489 tx_info = IEEE80211_SKB_CB(skb); 540 tx_info = IEEE80211_SKB_CB(skb);
490 fi = get_frame_info(skb); 541 fi = get_frame_info(skb);
491 542
492 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 543 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
544 !tid->active) {
493 /* 545 /*
494 * Outside of the current BlockAck window, 546 * Outside of the current BlockAck window,
495 * maybe part of a previous session 547 * maybe part of a previous session
@@ -522,7 +574,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
522 * not a holding desc. 574 * not a holding desc.
523 */ 575 */
524 INIT_LIST_HEAD(&bf_head); 576 INIT_LIST_HEAD(&bf_head);
525 if (bf_next != NULL || !bf_last->bf_stale) 577 if (bf_next != NULL || !bf_last->bf_state.stale)
526 list_move_tail(&bf->list, &bf_head); 578 list_move_tail(&bf->list, &bf_head);
527 579
528 if (!txpending) { 580 if (!txpending) {
@@ -546,7 +598,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
546 ieee80211_sta_eosp(sta); 598 ieee80211_sta_eosp(sta);
547 } 599 }
548 /* retry the un-acked ones */ 600 /* retry the un-acked ones */
549 if (bf->bf_next == NULL && bf_last->bf_stale) { 601 if (bf->bf_next == NULL && bf_last->bf_state.stale) {
550 struct ath_buf *tbf; 602 struct ath_buf *tbf;
551 603
552 tbf = ath_clone_txbuf(sc, bf_last); 604 tbf = ath_clone_txbuf(sc, bf_last);
@@ -583,7 +635,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
583 if (an->sleeping) 635 if (an->sleeping)
584 ieee80211_sta_set_buffered(sta, tid->tidno, true); 636 ieee80211_sta_set_buffered(sta, tid->tidno, true);
585 637
586 skb_queue_splice(&bf_pending, &tid->buf_q); 638 skb_queue_splice_tail(&bf_pending, &tid->retry_q);
587 if (!an->sleeping) { 639 if (!an->sleeping) {
588 ath_tx_queue_tid(txq, tid); 640 ath_tx_queue_tid(txq, tid);
589 641
@@ -641,7 +693,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
641 } else 693 } else
642 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); 694 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
643 695
644 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush) 696 if (!flush)
645 ath_txq_schedule(sc, txq); 697 ath_txq_schedule(sc, txq);
646} 698}
647 699
@@ -815,15 +867,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
815 867
816static struct ath_buf * 868static struct ath_buf *
817ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, 869ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
818 struct ath_atx_tid *tid) 870 struct ath_atx_tid *tid, struct sk_buff_head **q)
819{ 871{
872 struct ieee80211_tx_info *tx_info;
820 struct ath_frame_info *fi; 873 struct ath_frame_info *fi;
821 struct sk_buff *skb; 874 struct sk_buff *skb;
822 struct ath_buf *bf; 875 struct ath_buf *bf;
823 u16 seqno; 876 u16 seqno;
824 877
825 while (1) { 878 while (1) {
826 skb = skb_peek(&tid->buf_q); 879 *q = &tid->retry_q;
880 if (skb_queue_empty(*q))
881 *q = &tid->buf_q;
882
883 skb = skb_peek(*q);
827 if (!skb) 884 if (!skb)
828 break; 885 break;
829 886
@@ -831,14 +888,26 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
831 bf = fi->bf; 888 bf = fi->bf;
832 if (!fi->bf) 889 if (!fi->bf)
833 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 890 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
891 else
892 bf->bf_state.stale = false;
834 893
835 if (!bf) { 894 if (!bf) {
836 __skb_unlink(skb, &tid->buf_q); 895 __skb_unlink(skb, *q);
837 ath_txq_skb_done(sc, txq, skb); 896 ath_txq_skb_done(sc, txq, skb);
838 ieee80211_free_txskb(sc->hw, skb); 897 ieee80211_free_txskb(sc->hw, skb);
839 continue; 898 continue;
840 } 899 }
841 900
901 bf->bf_next = NULL;
902 bf->bf_lastbf = bf;
903
904 tx_info = IEEE80211_SKB_CB(skb);
905 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
906 if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
907 bf->bf_state.bf_type = 0;
908 return bf;
909 }
910
842 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 911 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
843 seqno = bf->bf_state.seqno; 912 seqno = bf->bf_state.seqno;
844 913
@@ -852,73 +921,52 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
852 921
853 INIT_LIST_HEAD(&bf_head); 922 INIT_LIST_HEAD(&bf_head);
854 list_add(&bf->list, &bf_head); 923 list_add(&bf->list, &bf_head);
855 __skb_unlink(skb, &tid->buf_q); 924 __skb_unlink(skb, *q);
856 ath_tx_update_baw(sc, tid, seqno); 925 ath_tx_update_baw(sc, tid, seqno);
857 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 926 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
858 continue; 927 continue;
859 } 928 }
860 929
861 bf->bf_next = NULL;
862 bf->bf_lastbf = bf;
863 return bf; 930 return bf;
864 } 931 }
865 932
866 return NULL; 933 return NULL;
867} 934}
868 935
869static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 936static bool
870 struct ath_txq *txq, 937ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
871 struct ath_atx_tid *tid, 938 struct ath_atx_tid *tid, struct list_head *bf_q,
872 struct list_head *bf_q, 939 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
873 int *aggr_len) 940 int *aggr_len)
874{ 941{
875#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 942#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
876 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; 943 struct ath_buf *bf = bf_first, *bf_prev = NULL;
877 int rl = 0, nframes = 0, ndelim, prev_al = 0; 944 int nframes = 0, ndelim;
878 u16 aggr_limit = 0, al = 0, bpad = 0, 945 u16 aggr_limit = 0, al = 0, bpad = 0,
879 al_delta, h_baw = tid->baw_size / 2; 946 al_delta, h_baw = tid->baw_size / 2;
880 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
881 struct ieee80211_tx_info *tx_info; 947 struct ieee80211_tx_info *tx_info;
882 struct ath_frame_info *fi; 948 struct ath_frame_info *fi;
883 struct sk_buff *skb; 949 struct sk_buff *skb;
950 bool closed = false;
884 951
885 do { 952 bf = bf_first;
886 bf = ath_tx_get_tid_subframe(sc, txq, tid); 953 aggr_limit = ath_lookup_rate(sc, bf, tid);
887 if (!bf) {
888 status = ATH_AGGR_BAW_CLOSED;
889 break;
890 }
891 954
955 do {
892 skb = bf->bf_mpdu; 956 skb = bf->bf_mpdu;
893 fi = get_frame_info(skb); 957 fi = get_frame_info(skb);
894 958
895 if (!bf_first)
896 bf_first = bf;
897
898 if (!rl) {
899 ath_set_rates(tid->an->vif, tid->an->sta, bf);
900 aggr_limit = ath_lookup_rate(sc, bf, tid);
901 rl = 1;
902 }
903
904 /* do not exceed aggregation limit */ 959 /* do not exceed aggregation limit */
905 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 960 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
961 if (nframes) {
962 if (aggr_limit < al + bpad + al_delta ||
963 ath_lookup_legacy(bf) || nframes >= h_baw)
964 break;
906 965
907 if (nframes && 966 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
908 ((aggr_limit < (al + bpad + al_delta + prev_al)) || 967 if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
909 ath_lookup_legacy(bf))) { 968 !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
910 status = ATH_AGGR_LIMITED; 969 break;
911 break;
912 }
913
914 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
915 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
916 break;
917
918 /* do not exceed subframe limit */
919 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
920 status = ATH_AGGR_LIMITED;
921 break;
922 } 970 }
923 971
924 /* add padding for previous frame to aggregation length */ 972 /* add padding for previous frame to aggregation length */
@@ -936,22 +984,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
936 bf->bf_next = NULL; 984 bf->bf_next = NULL;
937 985
938 /* link buffers of this frame to the aggregate */ 986 /* link buffers of this frame to the aggregate */
939 if (!fi->retries) 987 if (!fi->baw_tracked)
940 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 988 ath_tx_addto_baw(sc, tid, bf);
941 bf->bf_state.ndelim = ndelim; 989 bf->bf_state.ndelim = ndelim;
942 990
943 __skb_unlink(skb, &tid->buf_q); 991 __skb_unlink(skb, tid_q);
944 list_add_tail(&bf->list, bf_q); 992 list_add_tail(&bf->list, bf_q);
945 if (bf_prev) 993 if (bf_prev)
946 bf_prev->bf_next = bf; 994 bf_prev->bf_next = bf;
947 995
948 bf_prev = bf; 996 bf_prev = bf;
949 997
950 } while (!skb_queue_empty(&tid->buf_q)); 998 bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
999 if (!bf) {
1000 closed = true;
1001 break;
1002 }
1003 } while (ath_tid_has_buffered(tid));
1004
1005 bf = bf_first;
1006 bf->bf_lastbf = bf_prev;
1007
1008 if (bf == bf_prev) {
1009 al = get_frame_info(bf->bf_mpdu)->framelen;
1010 bf->bf_state.bf_type = BUF_AMPDU;
1011 } else {
1012 TX_STAT_INC(txq->axq_qnum, a_aggr);
1013 }
951 1014
952 *aggr_len = al; 1015 *aggr_len = al;
953 1016
954 return status; 1017 return closed;
955#undef PADBYTES 1018#undef PADBYTES
956} 1019}
957 1020
@@ -1023,7 +1086,7 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
1023} 1086}
1024 1087
1025static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 1088static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1026 struct ath_tx_info *info, int len) 1089 struct ath_tx_info *info, int len, bool rts)
1027{ 1090{
1028 struct ath_hw *ah = sc->sc_ah; 1091 struct ath_hw *ah = sc->sc_ah;
1029 struct sk_buff *skb; 1092 struct sk_buff *skb;
@@ -1032,6 +1095,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1032 const struct ieee80211_rate *rate; 1095 const struct ieee80211_rate *rate;
1033 struct ieee80211_hdr *hdr; 1096 struct ieee80211_hdr *hdr;
1034 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 1097 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
1098 u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1035 int i; 1099 int i;
1036 u8 rix = 0; 1100 u8 rix = 0;
1037 1101
@@ -1054,7 +1118,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1054 rix = rates[i].idx; 1118 rix = rates[i].idx;
1055 info->rates[i].Tries = rates[i].count; 1119 info->rates[i].Tries = rates[i].count;
1056 1120
1057 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 1121 /*
1122 * Handle RTS threshold for unaggregated HT frames.
1123 */
1124 if (bf_isampdu(bf) && !bf_isaggr(bf) &&
1125 (rates[i].flags & IEEE80211_TX_RC_MCS) &&
1126 unlikely(rts_thresh != (u32) -1)) {
1127 if (!rts_thresh || (len > rts_thresh))
1128 rts = true;
1129 }
1130
1131 if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1058 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1132 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1059 info->flags |= ATH9K_TXDESC_RTSENA; 1133 info->flags |= ATH9K_TXDESC_RTSENA;
1060 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1134 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -1147,6 +1221,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1147 struct ath_hw *ah = sc->sc_ah; 1221 struct ath_hw *ah = sc->sc_ah;
1148 struct ath_buf *bf_first = NULL; 1222 struct ath_buf *bf_first = NULL;
1149 struct ath_tx_info info; 1223 struct ath_tx_info info;
1224 u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1225 bool rts = false;
1150 1226
1151 memset(&info, 0, sizeof(info)); 1227 memset(&info, 0, sizeof(info));
1152 info.is_first = true; 1228 info.is_first = true;
@@ -1183,7 +1259,22 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1183 info.flags |= (u32) bf->bf_state.bfs_paprd << 1259 info.flags |= (u32) bf->bf_state.bfs_paprd <<
1184 ATH9K_TXDESC_PAPRD_S; 1260 ATH9K_TXDESC_PAPRD_S;
1185 1261
1186 ath_buf_set_rate(sc, bf, &info, len); 1262 /*
1263 * mac80211 doesn't handle RTS threshold for HT because
1264 * the decision has to be taken based on AMPDU length
1265 * and aggregation is done entirely inside ath9k.
1266 * Set the RTS/CTS flag for the first subframe based
1267 * on the threshold.
1268 */
1269 if (aggr && (bf == bf_first) &&
1270 unlikely(rts_thresh != (u32) -1)) {
1271 /*
1272 * "len" is the size of the entire AMPDU.
1273 */
1274 if (!rts_thresh || (len > rts_thresh))
1275 rts = true;
1276 }
1277 ath_buf_set_rate(sc, bf, &info, len, rts);
1187 } 1278 }
1188 1279
1189 info.buf_addr[0] = bf->bf_buf_addr; 1280 info.buf_addr[0] = bf->bf_buf_addr;
@@ -1212,53 +1303,86 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1212 } 1303 }
1213} 1304}
1214 1305
1215static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 1306static void
1216 struct ath_atx_tid *tid) 1307ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
1308 struct ath_atx_tid *tid, struct list_head *bf_q,
1309 struct ath_buf *bf_first, struct sk_buff_head *tid_q)
1217{ 1310{
1218 struct ath_buf *bf; 1311 struct ath_buf *bf = bf_first, *bf_prev = NULL;
1219 enum ATH_AGGR_STATUS status; 1312 struct sk_buff *skb;
1220 struct ieee80211_tx_info *tx_info; 1313 int nframes = 0;
1221 struct list_head bf_q;
1222 int aggr_len;
1223 1314
1224 do { 1315 do {
1225 if (skb_queue_empty(&tid->buf_q)) 1316 struct ieee80211_tx_info *tx_info;
1226 return; 1317 skb = bf->bf_mpdu;
1227 1318
1228 INIT_LIST_HEAD(&bf_q); 1319 nframes++;
1320 __skb_unlink(skb, tid_q);
1321 list_add_tail(&bf->list, bf_q);
1322 if (bf_prev)
1323 bf_prev->bf_next = bf;
1324 bf_prev = bf;
1229 1325
1230 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 1326 if (nframes >= 2)
1327 break;
1231 1328
1232 /* 1329 bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
1233 * no frames picked up to be aggregated; 1330 if (!bf)
1234 * block-ack window is not open.
1235 */
1236 if (list_empty(&bf_q))
1237 break; 1331 break;
1238 1332
1239 bf = list_first_entry(&bf_q, struct ath_buf, list);
1240 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1241 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1333 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1334 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1335 break;
1242 1336
1243 if (tid->ac->clear_ps_filter) { 1337 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1244 tid->ac->clear_ps_filter = false; 1338 } while (1);
1245 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1339}
1246 } else {
1247 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1248 }
1249 1340
1250 /* if only one frame, send as non-aggregate */ 1341static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1251 if (bf == bf->bf_lastbf) { 1342 struct ath_atx_tid *tid, bool *stop)
1252 aggr_len = get_frame_info(bf->bf_mpdu)->framelen; 1343{
1253 bf->bf_state.bf_type = BUF_AMPDU; 1344 struct ath_buf *bf;
1254 } else { 1345 struct ieee80211_tx_info *tx_info;
1255 TX_STAT_INC(txq->axq_qnum, a_aggr); 1346 struct sk_buff_head *tid_q;
1256 } 1347 struct list_head bf_q;
1348 int aggr_len = 0;
1349 bool aggr, last = true;
1350
1351 if (!ath_tid_has_buffered(tid))
1352 return false;
1353
1354 INIT_LIST_HEAD(&bf_q);
1355
1356 bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
1357 if (!bf)
1358 return false;
1359
1360 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1361 aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
1362 if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
1363 (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
1364 *stop = true;
1365 return false;
1366 }
1257 1367
1258 ath_tx_fill_desc(sc, bf, txq, aggr_len); 1368 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1259 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1369 if (aggr)
1260 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 1370 last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
1261 status != ATH_AGGR_BAW_CLOSED); 1371 tid_q, &aggr_len);
1372 else
1373 ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1374
1375 if (list_empty(&bf_q))
1376 return false;
1377
1378 if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1379 tid->ac->clear_ps_filter = false;
1380 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1381 }
1382
1383 ath_tx_fill_desc(sc, bf, txq, aggr_len);
1384 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1385 return true;
1262} 1386}
1263 1387
1264int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1388int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1282,6 +1406,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1282 an->mpdudensity = density; 1406 an->mpdudensity = density;
1283 } 1407 }
1284 1408
1409 /* force sequence number allocation for pending frames */
1410 ath_tx_tid_change_state(sc, txtid);
1411
1285 txtid->active = true; 1412 txtid->active = true;
1286 txtid->paused = true; 1413 txtid->paused = true;
1287 *ssn = txtid->seq_start = txtid->seq_next; 1414 *ssn = txtid->seq_start = txtid->seq_next;
@@ -1301,8 +1428,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1301 1428
1302 ath_txq_lock(sc, txq); 1429 ath_txq_lock(sc, txq);
1303 txtid->active = false; 1430 txtid->active = false;
1304 txtid->paused = true; 1431 txtid->paused = false;
1305 ath_tx_flush_tid(sc, txtid); 1432 ath_tx_flush_tid(sc, txtid);
1433 ath_tx_tid_change_state(sc, txtid);
1306 ath_txq_unlock_complete(sc, txq); 1434 ath_txq_unlock_complete(sc, txq);
1307} 1435}
1308 1436
@@ -1326,7 +1454,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1326 1454
1327 ath_txq_lock(sc, txq); 1455 ath_txq_lock(sc, txq);
1328 1456
1329 buffered = !skb_queue_empty(&tid->buf_q); 1457 buffered = ath_tid_has_buffered(tid);
1330 1458
1331 tid->sched = false; 1459 tid->sched = false;
1332 list_del(&tid->list); 1460 list_del(&tid->list);
@@ -1358,7 +1486,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1358 ath_txq_lock(sc, txq); 1486 ath_txq_lock(sc, txq);
1359 ac->clear_ps_filter = true; 1487 ac->clear_ps_filter = true;
1360 1488
1361 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { 1489 if (!tid->paused && ath_tid_has_buffered(tid)) {
1362 ath_tx_queue_tid(txq, tid); 1490 ath_tx_queue_tid(txq, tid);
1363 ath_txq_schedule(sc, txq); 1491 ath_txq_schedule(sc, txq);
1364 } 1492 }
@@ -1383,7 +1511,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
1383 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1511 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1384 tid->paused = false; 1512 tid->paused = false;
1385 1513
1386 if (!skb_queue_empty(&tid->buf_q)) { 1514 if (ath_tid_has_buffered(tid)) {
1387 ath_tx_queue_tid(txq, tid); 1515 ath_tx_queue_tid(txq, tid);
1388 ath_txq_schedule(sc, txq); 1516 ath_txq_schedule(sc, txq);
1389 } 1517 }
@@ -1403,6 +1531,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1403 struct ieee80211_tx_info *info; 1531 struct ieee80211_tx_info *info;
1404 struct list_head bf_q; 1532 struct list_head bf_q;
1405 struct ath_buf *bf_tail = NULL, *bf; 1533 struct ath_buf *bf_tail = NULL, *bf;
1534 struct sk_buff_head *tid_q;
1406 int sent = 0; 1535 int sent = 0;
1407 int i; 1536 int i;
1408 1537
@@ -1418,15 +1547,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1418 continue; 1547 continue;
1419 1548
1420 ath_txq_lock(sc, tid->ac->txq); 1549 ath_txq_lock(sc, tid->ac->txq);
1421 while (!skb_queue_empty(&tid->buf_q) && nframes > 0) { 1550 while (nframes > 0) {
1422 bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); 1551 bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1423 if (!bf) 1552 if (!bf)
1424 break; 1553 break;
1425 1554
1426 __skb_unlink(bf->bf_mpdu, &tid->buf_q); 1555 __skb_unlink(bf->bf_mpdu, tid_q);
1427 list_add_tail(&bf->list, &bf_q); 1556 list_add_tail(&bf->list, &bf_q);
1428 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1557 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1429 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1558 ath_tx_addto_baw(sc, tid, bf);
1430 bf->bf_state.bf_type &= ~BUF_AGGR; 1559 bf->bf_state.bf_type &= ~BUF_AGGR;
1431 if (bf_tail) 1560 if (bf_tail)
1432 bf_tail->bf_next = bf; 1561 bf_tail->bf_next = bf;
@@ -1436,7 +1565,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1436 sent++; 1565 sent++;
1437 TX_STAT_INC(txq->axq_qnum, a_queued_hw); 1566 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
1438 1567
1439 if (skb_queue_empty(&tid->buf_q)) 1568 if (an->sta && !ath_tid_has_buffered(tid))
1440 ieee80211_sta_set_buffered(an->sta, i, false); 1569 ieee80211_sta_set_buffered(an->sta, i, false);
1441 } 1570 }
1442 ath_txq_unlock_complete(sc, tid->ac->txq); 1571 ath_txq_unlock_complete(sc, tid->ac->txq);
@@ -1595,7 +1724,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1595 while (!list_empty(list)) { 1724 while (!list_empty(list)) {
1596 bf = list_first_entry(list, struct ath_buf, list); 1725 bf = list_first_entry(list, struct ath_buf, list);
1597 1726
1598 if (bf->bf_stale) { 1727 if (bf->bf_state.stale) {
1599 list_del(&bf->list); 1728 list_del(&bf->list);
1600 1729
1601 ath_tx_return_buffer(sc, bf); 1730 ath_tx_return_buffer(sc, bf);
@@ -1689,25 +1818,27 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1689 */ 1818 */
1690void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1819void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1691{ 1820{
1692 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1821 struct ath_atx_ac *ac, *last_ac;
1693 struct ath_atx_tid *tid, *last_tid; 1822 struct ath_atx_tid *tid, *last_tid;
1823 bool sent = false;
1694 1824
1695 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || 1825 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
1696 list_empty(&txq->axq_acq) || 1826 list_empty(&txq->axq_acq))
1697 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1698 return; 1827 return;
1699 1828
1700 rcu_read_lock(); 1829 rcu_read_lock();
1701 1830
1702 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1703 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1831 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1832 while (!list_empty(&txq->axq_acq)) {
1833 bool stop = false;
1704 1834
1705 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1835 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1706 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1836 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1707 list_del(&ac->list); 1837 list_del(&ac->list);
1708 ac->sched = false; 1838 ac->sched = false;
1709 1839
1710 while (!list_empty(&ac->tid_q)) { 1840 while (!list_empty(&ac->tid_q)) {
1841
1711 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1842 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1712 list); 1843 list);
1713 list_del(&tid->list); 1844 list_del(&tid->list);
@@ -1716,17 +1847,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1716 if (tid->paused) 1847 if (tid->paused)
1717 continue; 1848 continue;
1718 1849
1719 ath_tx_sched_aggr(sc, txq, tid); 1850 if (ath_tx_sched_aggr(sc, txq, tid, &stop))
1851 sent = true;
1720 1852
1721 /* 1853 /*
1722 * add tid to round-robin queue if more frames 1854 * add tid to round-robin queue if more frames
1723 * are pending for the tid 1855 * are pending for the tid
1724 */ 1856 */
1725 if (!skb_queue_empty(&tid->buf_q)) 1857 if (ath_tid_has_buffered(tid))
1726 ath_tx_queue_tid(txq, tid); 1858 ath_tx_queue_tid(txq, tid);
1727 1859
1728 if (tid == last_tid || 1860 if (stop || tid == last_tid)
1729 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1730 break; 1861 break;
1731 } 1862 }
1732 1863
@@ -1735,9 +1866,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1735 list_add_tail(&ac->list, &txq->axq_acq); 1866 list_add_tail(&ac->list, &txq->axq_acq);
1736 } 1867 }
1737 1868
1738 if (ac == last_ac || 1869 if (stop)
1739 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1740 break; 1870 break;
1871
1872 if (ac == last_ac) {
1873 if (!sent)
1874 break;
1875
1876 sent = false;
1877 last_ac = list_entry(txq->axq_acq.prev,
1878 struct ath_atx_ac, list);
1879 }
1741 } 1880 }
1742 1881
1743 rcu_read_unlock(); 1882 rcu_read_unlock();
@@ -1816,58 +1955,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1816 } 1955 }
1817} 1956}
1818 1957
1819static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
1820 struct ath_atx_tid *tid, struct sk_buff *skb,
1821 struct ath_tx_control *txctl)
1822{
1823 struct ath_frame_info *fi = get_frame_info(skb);
1824 struct list_head bf_head;
1825 struct ath_buf *bf;
1826
1827 /*
1828 * Do not queue to h/w when any of the following conditions is true:
1829 * - there are pending frames in software queue
1830 * - the TID is currently paused for ADDBA/BAR request
1831 * - seqno is not within block-ack window
1832 * - h/w queue depth exceeds low water mark
1833 */
1834 if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
1835 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1836 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
1837 txq != sc->tx.uapsdq) {
1838 /*
1839 * Add this frame to software queue for scheduling later
1840 * for aggregation.
1841 */
1842 TX_STAT_INC(txq->axq_qnum, a_queued_sw);
1843 __skb_queue_tail(&tid->buf_q, skb);
1844 if (!txctl->an || !txctl->an->sleeping)
1845 ath_tx_queue_tid(txq, tid);
1846 return;
1847 }
1848
1849 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1850 if (!bf) {
1851 ath_txq_skb_done(sc, txq, skb);
1852 ieee80211_free_txskb(sc->hw, skb);
1853 return;
1854 }
1855
1856 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1857 bf->bf_state.bf_type = BUF_AMPDU;
1858 INIT_LIST_HEAD(&bf_head);
1859 list_add(&bf->list, &bf_head);
1860
1861 /* Add sub-frame to BAW */
1862 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1863
1864 /* Queue to h/w without aggregation */
1865 TX_STAT_INC(txq->axq_qnum, a_queued_hw);
1866 bf->bf_lastbf = bf;
1867 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1868 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1869}
1870
1871static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1958static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1872 struct ath_atx_tid *tid, struct sk_buff *skb) 1959 struct ath_atx_tid *tid, struct sk_buff *skb)
1873{ 1960{
@@ -2010,6 +2097,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
2010 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2097 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2011 struct ieee80211_sta *sta = txctl->sta; 2098 struct ieee80211_sta *sta = txctl->sta;
2012 struct ieee80211_vif *vif = info->control.vif; 2099 struct ieee80211_vif *vif = info->control.vif;
2100 struct ath_vif *avp;
2013 struct ath_softc *sc = hw->priv; 2101 struct ath_softc *sc = hw->priv;
2014 int frmlen = skb->len + FCS_LEN; 2102 int frmlen = skb->len + FCS_LEN;
2015 int padpos, padsize; 2103 int padpos, padsize;
@@ -2017,6 +2105,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
2017 /* NOTE: sta can be NULL according to net/mac80211.h */ 2105 /* NOTE: sta can be NULL according to net/mac80211.h */
2018 if (sta) 2106 if (sta)
2019 txctl->an = (struct ath_node *)sta->drv_priv; 2107 txctl->an = (struct ath_node *)sta->drv_priv;
2108 else if (vif && ieee80211_is_data(hdr->frame_control)) {
2109 avp = (void *)vif->drv_priv;
2110 txctl->an = &avp->mcast_node;
2111 }
2020 2112
2021 if (info->control.hw_key) 2113 if (info->control.hw_key)
2022 frmlen += info->control.hw_key->icv_len; 2114 frmlen += info->control.hw_key->icv_len;
@@ -2066,7 +2158,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2066 struct ath_txq *txq = txctl->txq; 2158 struct ath_txq *txq = txctl->txq;
2067 struct ath_atx_tid *tid = NULL; 2159 struct ath_atx_tid *tid = NULL;
2068 struct ath_buf *bf; 2160 struct ath_buf *bf;
2069 u8 tidno;
2070 int q; 2161 int q;
2071 int ret; 2162 int ret;
2072 2163
@@ -2094,22 +2185,25 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2094 ath_txq_unlock(sc, txq); 2185 ath_txq_unlock(sc, txq);
2095 txq = sc->tx.uapsdq; 2186 txq = sc->tx.uapsdq;
2096 ath_txq_lock(sc, txq); 2187 ath_txq_lock(sc, txq);
2097 } 2188 } else if (txctl->an &&
2098 2189 ieee80211_is_data_present(hdr->frame_control)) {
2099 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { 2190 tid = ath_get_skb_tid(sc, txctl->an, skb);
2100 tidno = ieee80211_get_qos_ctl(hdr)[0] &
2101 IEEE80211_QOS_CTL_TID_MASK;
2102 tid = ATH_AN_2_TID(txctl->an, tidno);
2103 2191
2104 WARN_ON(tid->ac->txq != txctl->txq); 2192 WARN_ON(tid->ac->txq != txctl->txq);
2105 }
2106 2193
2107 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 2194 if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
2195 tid->ac->clear_ps_filter = true;
2196
2108 /* 2197 /*
2109 * Try aggregation if it's a unicast data frame 2198 * Add this frame to software queue for scheduling later
2110 * and the destination is HT capable. 2199 * for aggregation.
2111 */ 2200 */
2112 ath_tx_send_ampdu(sc, txq, tid, skb, txctl); 2201 TX_STAT_INC(txq->axq_qnum, a_queued_sw);
2202 __skb_queue_tail(&tid->buf_q, skb);
2203 if (!txctl->an->sleeping)
2204 ath_tx_queue_tid(txq, tid);
2205
2206 ath_txq_schedule(sc, txq);
2113 goto out; 2207 goto out;
2114 } 2208 }
2115 2209
@@ -2168,7 +2262,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2168 2262
2169 bf->bf_lastbf = bf; 2263 bf->bf_lastbf = bf;
2170 ath_set_rates(vif, NULL, bf); 2264 ath_set_rates(vif, NULL, bf);
2171 ath_buf_set_rate(sc, bf, &info, fi->framelen); 2265 ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2172 duration += info.rates[0].PktDuration; 2266 duration += info.rates[0].PktDuration;
2173 if (bf_tail) 2267 if (bf_tail)
2174 bf_tail->bf_next = bf; 2268 bf_tail->bf_next = bf;
@@ -2372,8 +2466,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2372 2466
2373 if (list_empty(&txq->axq_q)) { 2467 if (list_empty(&txq->axq_q)) {
2374 txq->axq_link = NULL; 2468 txq->axq_link = NULL;
2375 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2469 ath_txq_schedule(sc, txq);
2376 ath_txq_schedule(sc, txq);
2377 break; 2470 break;
2378 } 2471 }
2379 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2472 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
@@ -2387,7 +2480,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2387 * it with the STALE flag. 2480 * it with the STALE flag.
2388 */ 2481 */
2389 bf_held = NULL; 2482 bf_held = NULL;
2390 if (bf->bf_stale) { 2483 if (bf->bf_state.stale) {
2391 bf_held = bf; 2484 bf_held = bf;
2392 if (list_is_last(&bf_held->list, &txq->axq_q)) 2485 if (list_is_last(&bf_held->list, &txq->axq_q))
2393 break; 2486 break;
@@ -2411,7 +2504,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2411 * however leave the last descriptor back as the holding 2504 * however leave the last descriptor back as the holding
2412 * descriptor for hw. 2505 * descriptor for hw.
2413 */ 2506 */
2414 lastbf->bf_stale = true; 2507 lastbf->bf_state.stale = true;
2415 INIT_LIST_HEAD(&bf_head); 2508 INIT_LIST_HEAD(&bf_head);
2416 if (!list_is_singular(&lastbf->list)) 2509 if (!list_is_singular(&lastbf->list))
2417 list_cut_position(&bf_head, 2510 list_cut_position(&bf_head,
@@ -2466,6 +2559,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2466 if (ts.qid == sc->beacon.beaconq) { 2559 if (ts.qid == sc->beacon.beaconq) {
2467 sc->beacon.tx_processed = true; 2560 sc->beacon.tx_processed = true;
2468 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2561 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2562
2563 ath9k_csa_is_finished(sc);
2469 continue; 2564 continue;
2470 } 2565 }
2471 2566
@@ -2482,7 +2577,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2482 } 2577 }
2483 2578
2484 bf = list_first_entry(fifo_list, struct ath_buf, list); 2579 bf = list_first_entry(fifo_list, struct ath_buf, list);
2485 if (bf->bf_stale) { 2580 if (bf->bf_state.stale) {
2486 list_del(&bf->list); 2581 list_del(&bf->list);
2487 ath_tx_return_buffer(sc, bf); 2582 ath_tx_return_buffer(sc, bf);
2488 bf = list_first_entry(fifo_list, struct ath_buf, list); 2583 bf = list_first_entry(fifo_list, struct ath_buf, list);
@@ -2504,7 +2599,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2504 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2599 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2505 } 2600 }
2506 } else { 2601 } else {
2507 lastbf->bf_stale = true; 2602 lastbf->bf_state.stale = true;
2508 if (bf != lastbf) 2603 if (bf != lastbf)
2509 list_cut_position(&bf_head, fifo_list, 2604 list_cut_position(&bf_head, fifo_list,
2510 lastbf->list.prev); 2605 lastbf->list.prev);
@@ -2595,6 +2690,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2595 tid->paused = false; 2690 tid->paused = false;
2596 tid->active = false; 2691 tid->active = false;
2597 __skb_queue_head_init(&tid->buf_q); 2692 __skb_queue_head_init(&tid->buf_q);
2693 __skb_queue_head_init(&tid->retry_q);
2598 acno = TID_TO_WME_AC(tidno); 2694 acno = TID_TO_WME_AC(tidno);
2599 tid->ac = &an->ac[acno]; 2695 tid->ac = &an->ac[acno];
2600 } 2696 }
@@ -2602,6 +2698,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2602 for (acno = 0, ac = &an->ac[acno]; 2698 for (acno = 0, ac = &an->ac[acno];
2603 acno < IEEE80211_NUM_ACS; acno++, ac++) { 2699 acno < IEEE80211_NUM_ACS; acno++, ac++) {
2604 ac->sched = false; 2700 ac->sched = false;
2701 ac->clear_ps_filter = true;
2605 ac->txq = sc->tx.txq_map[acno]; 2702 ac->txq = sc->tx.txq_map[acno];
2606 INIT_LIST_HEAD(&ac->tid_q); 2703 INIT_LIST_HEAD(&ac->tid_q);
2607 } 2704 }
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 4684dd989496..e935f61c7fad 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -602,8 +602,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
602 602
603 if (bar->start_seq_num == entry_bar->start_seq_num && 603 if (bar->start_seq_num == entry_bar->start_seq_num &&
604 TID_CHECK(bar->control, entry_bar->control) && 604 TID_CHECK(bar->control, entry_bar->control) &&
605 compare_ether_addr(bar->ra, entry_bar->ta) == 0 && 605 ether_addr_equal(bar->ra, entry_bar->ta) &&
606 compare_ether_addr(bar->ta, entry_bar->ra) == 0) { 606 ether_addr_equal(bar->ta, entry_bar->ra)) {
607 struct ieee80211_tx_info *tx_info; 607 struct ieee80211_tx_info *tx_info;
608 608
609 tx_info = IEEE80211_SKB_CB(entry_skb); 609 tx_info = IEEE80211_SKB_CB(entry_skb);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index f891d514d881..990dd42ae79e 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -11,9 +11,6 @@ wil6210-y += txrx.o
11wil6210-y += debug.o 11wil6210-y += debug.o
12wil6210-$(CONFIG_WIL6210_TRACING) += trace.o 12wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
13 13
14ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
15 subdir-ccflags-y += -Werror
16endif
17# for tracing framework to find trace.h 14# for tracing framework to find trace.h
18CFLAGS_trace.o := -I$(src) 15CFLAGS_trace.o := -I$(src)
19 16
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ab636767fbde..1caa31992a7e 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
51 if ((i % 64) == 0 && (i != 0)) 51 if ((i % 64) == 0 && (i != 0))
52 seq_printf(s, "\n"); 52 seq_printf(s, "\n");
53 seq_printf(s, "%s", (d->dma.status & BIT(0)) ? 53 seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
54 "S" : (vring->ctx[i] ? "H" : "h")); 54 "S" : (vring->ctx[i].skb ? "H" : "h"));
55 } 55 }
56 seq_printf(s, "\n"); 56 seq_printf(s, "\n");
57 } 57 }
@@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
406 volatile struct vring_tx_desc *d = 406 volatile struct vring_tx_desc *d =
407 &(vring->va[dbg_txdesc_index].tx); 407 &(vring->va[dbg_txdesc_index].tx);
408 volatile u32 *u = (volatile u32 *)d; 408 volatile u32 *u = (volatile u32 *)d;
409 struct sk_buff *skb = vring->ctx[dbg_txdesc_index]; 409 struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
410 410
411 seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index); 411 seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
412 seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", 412 seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 29dd1e58cb17..717178f09aa8 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,6 +127,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
127 127
128 ndev->netdev_ops = &wil_netdev_ops; 128 ndev->netdev_ops = &wil_netdev_ops;
129 ndev->ieee80211_ptr = wdev; 129 ndev->ieee80211_ptr = wdev;
130 ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
131 ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
130 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 132 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
131 wdev->netdev = ndev; 133 wdev->netdev = ndev;
132 134
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index eff1239be53a..e59239d22b94 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -37,36 +37,40 @@ static inline void trace_ ## name(proto) {}
37#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ 37#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
38 38
39DECLARE_EVENT_CLASS(wil6210_wmi, 39DECLARE_EVENT_CLASS(wil6210_wmi,
40 TP_PROTO(u16 id, void *buf, u16 buf_len), 40 TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
41 41
42 TP_ARGS(id, buf, buf_len), 42 TP_ARGS(wmi, buf, buf_len),
43 43
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __field(u8, mid)
45 __field(u16, id) 46 __field(u16, id)
47 __field(u32, timestamp)
46 __field(u16, buf_len) 48 __field(u16, buf_len)
47 __dynamic_array(u8, buf, buf_len) 49 __dynamic_array(u8, buf, buf_len)
48 ), 50 ),
49 51
50 TP_fast_assign( 52 TP_fast_assign(
51 __entry->id = id; 53 __entry->mid = wmi->mid;
54 __entry->id = le16_to_cpu(wmi->id);
55 __entry->timestamp = le32_to_cpu(wmi->timestamp);
52 __entry->buf_len = buf_len; 56 __entry->buf_len = buf_len;
53 memcpy(__get_dynamic_array(buf), buf, buf_len); 57 memcpy(__get_dynamic_array(buf), buf, buf_len);
54 ), 58 ),
55 59
56 TP_printk( 60 TP_printk(
57 "id 0x%04x len %d", 61 "MID %d id 0x%04x len %d timestamp %d",
58 __entry->id, __entry->buf_len 62 __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
59 ) 63 )
60); 64);
61 65
62DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, 66DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
63 TP_PROTO(u16 id, void *buf, u16 buf_len), 67 TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
64 TP_ARGS(id, buf, buf_len) 68 TP_ARGS(wmi, buf, buf_len)
65); 69);
66 70
67DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, 71DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
68 TP_PROTO(u16 id, void *buf, u16 buf_len), 72 TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
69 TP_ARGS(id, buf, buf_len) 73 TP_ARGS(wmi, buf, buf_len)
70); 74);
71 75
72#define WIL6210_MSG_MAX (200) 76#define WIL6210_MSG_MAX (200)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d240b24e1ccf..d505b2676a73 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -18,6 +18,9 @@
18#include <net/ieee80211_radiotap.h> 18#include <net/ieee80211_radiotap.h>
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
23#include <net/ipv6.h>
21 24
22#include "wil6210.h" 25#include "wil6210.h"
23#include "wmi.h" 26#include "wmi.h"
@@ -70,7 +73,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
70 73
71 vring->swhead = 0; 74 vring->swhead = 0;
72 vring->swtail = 0; 75 vring->swtail = 0;
73 vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL); 76 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
74 if (!vring->ctx) { 77 if (!vring->ctx) {
75 vring->va = NULL; 78 vring->va = NULL;
76 return -ENOMEM; 79 return -ENOMEM;
@@ -108,39 +111,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
108 111
109 while (!wil_vring_is_empty(vring)) { 112 while (!wil_vring_is_empty(vring)) {
110 dma_addr_t pa; 113 dma_addr_t pa;
111 struct sk_buff *skb;
112 u16 dmalen; 114 u16 dmalen;
115 struct wil_ctx *ctx;
113 116
114 if (tx) { 117 if (tx) {
115 struct vring_tx_desc dd, *d = &dd; 118 struct vring_tx_desc dd, *d = &dd;
116 volatile struct vring_tx_desc *_d = 119 volatile struct vring_tx_desc *_d =
117 &vring->va[vring->swtail].tx; 120 &vring->va[vring->swtail].tx;
118 121
122 ctx = &vring->ctx[vring->swtail];
119 *d = *_d; 123 *d = *_d;
120 pa = wil_desc_addr(&d->dma.addr); 124 pa = wil_desc_addr(&d->dma.addr);
121 dmalen = le16_to_cpu(d->dma.length); 125 dmalen = le16_to_cpu(d->dma.length);
122 skb = vring->ctx[vring->swtail]; 126 if (vring->ctx[vring->swtail].mapped_as_page) {
123 if (skb) {
124 dma_unmap_single(dev, pa, dmalen,
125 DMA_TO_DEVICE);
126 dev_kfree_skb_any(skb);
127 vring->ctx[vring->swtail] = NULL;
128 } else {
129 dma_unmap_page(dev, pa, dmalen, 127 dma_unmap_page(dev, pa, dmalen,
130 DMA_TO_DEVICE); 128 DMA_TO_DEVICE);
129 } else {
130 dma_unmap_single(dev, pa, dmalen,
131 DMA_TO_DEVICE);
131 } 132 }
133 if (ctx->skb)
134 dev_kfree_skb_any(ctx->skb);
132 vring->swtail = wil_vring_next_tail(vring); 135 vring->swtail = wil_vring_next_tail(vring);
133 } else { /* rx */ 136 } else { /* rx */
134 struct vring_rx_desc dd, *d = &dd; 137 struct vring_rx_desc dd, *d = &dd;
135 volatile struct vring_rx_desc *_d = 138 volatile struct vring_rx_desc *_d =
136 &vring->va[vring->swtail].rx; 139 &vring->va[vring->swhead].rx;
137 140
141 ctx = &vring->ctx[vring->swhead];
138 *d = *_d; 142 *d = *_d;
139 pa = wil_desc_addr(&d->dma.addr); 143 pa = wil_desc_addr(&d->dma.addr);
140 dmalen = le16_to_cpu(d->dma.length); 144 dmalen = le16_to_cpu(d->dma.length);
141 skb = vring->ctx[vring->swhead];
142 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 145 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
143 kfree_skb(skb); 146 kfree_skb(ctx->skb);
144 wil_vring_advance_head(vring, 1); 147 wil_vring_advance_head(vring, 1);
145 } 148 }
146 } 149 }
@@ -187,7 +190,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
187 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 190 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
188 d->dma.length = cpu_to_le16(sz); 191 d->dma.length = cpu_to_le16(sz);
189 *_d = *d; 192 *_d = *d;
190 vring->ctx[i] = skb; 193 vring->ctx[i].skb = skb;
191 194
192 return 0; 195 return 0;
193} 196}
@@ -352,11 +355,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
352 return NULL; 355 return NULL;
353 } 356 }
354 357
355 skb = vring->ctx[vring->swhead]; 358 skb = vring->ctx[vring->swhead].skb;
356 d = wil_skb_rxdesc(skb); 359 d = wil_skb_rxdesc(skb);
357 *d = *_d; 360 *d = *_d;
358 pa = wil_desc_addr(&d->dma.addr); 361 pa = wil_desc_addr(&d->dma.addr);
359 vring->ctx[vring->swhead] = NULL; 362 vring->ctx[vring->swhead].skb = NULL;
360 wil_vring_advance_head(vring, 1); 363 wil_vring_advance_head(vring, 1);
361 364
362 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 365 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
@@ -407,6 +410,21 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
407 return NULL; 410 return NULL;
408 } 411 }
409 412
413 /* L4 IDENT is on when HW calculated checksum, check status
414 * and in case of error drop the packet
415 * higher stack layers will handle retransmission (if required)
416 */
417 if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
418 /* L4 protocol identified, csum calculated */
419 if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
420 skb->ip_summed = CHECKSUM_UNNECESSARY;
421 /* If HW reports bad checksum, let IP stack re-check it
422 * For example, HW don't understand Microsoft IP stack that
423 * mis-calculates TCP checksum - if it should be 0x0,
424 * it writes 0xffff in violation of RFC 1624
425 */
426 }
427
410 ds_bits = wil_rxdesc_ds_bits(d); 428 ds_bits = wil_rxdesc_ds_bits(d);
411 if (ds_bits == 1) { 429 if (ds_bits == 1) {
412 /* 430 /*
@@ -646,6 +664,53 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
646 return 0; 664 return 0;
647} 665}
648 666
667static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
668 struct vring_tx_desc *d,
669 struct sk_buff *skb)
670{
671 int protocol;
672
673 if (skb->ip_summed != CHECKSUM_PARTIAL)
674 return 0;
675
676 switch (skb->protocol) {
677 case cpu_to_be16(ETH_P_IP):
678 protocol = ip_hdr(skb)->protocol;
679 break;
680 case cpu_to_be16(ETH_P_IPV6):
681 protocol = ipv6_hdr(skb)->nexthdr;
682 break;
683 default:
684 return -EINVAL;
685 }
686
687 switch (protocol) {
688 case IPPROTO_TCP:
689 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
690 /* L4 header len: TCP header length */
691 d->dma.d0 |=
692 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
693 break;
694 case IPPROTO_UDP:
695 /* L4 header len: UDP header length */
696 d->dma.d0 |=
697 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
698 break;
699 default:
700 return -EINVAL;
701 }
702
703 d->dma.ip_length = skb_network_header_len(skb);
704 d->dma.b11 = ETH_HLEN; /* MAC header length */
705 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
706 /* Enable TCP/UDP checksum */
707 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
708 /* Calculate pseudo-header */
709 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
710
711 return 0;
712}
713
649static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 714static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
650 struct sk_buff *skb) 715 struct sk_buff *skb)
651{ 716{
@@ -655,7 +720,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
655 u32 swhead = vring->swhead; 720 u32 swhead = vring->swhead;
656 int avail = wil_vring_avail_tx(vring); 721 int avail = wil_vring_avail_tx(vring);
657 int nr_frags = skb_shinfo(skb)->nr_frags; 722 int nr_frags = skb_shinfo(skb)->nr_frags;
658 uint f; 723 uint f = 0;
659 int vring_index = vring - wil->vring_tx; 724 int vring_index = vring - wil->vring_tx;
660 uint i = swhead; 725 uint i = swhead;
661 dma_addr_t pa; 726 dma_addr_t pa;
@@ -686,13 +751,20 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
686 return -EINVAL; 751 return -EINVAL;
687 /* 1-st segment */ 752 /* 1-st segment */
688 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 753 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
754 /* Process TCP/UDP checksum offloading */
755 if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
756 wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
757 vring_index);
758 goto dma_error;
759 }
760
689 d->mac.d[2] |= ((nr_frags + 1) << 761 d->mac.d[2] |= ((nr_frags + 1) <<
690 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 762 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
691 if (nr_frags) 763 if (nr_frags)
692 *_d = *d; 764 *_d = *d;
693 765
694 /* middle segments */ 766 /* middle segments */
695 for (f = 0; f < nr_frags; f++) { 767 for (; f < nr_frags; f++) {
696 const struct skb_frag_struct *frag = 768 const struct skb_frag_struct *frag =
697 &skb_shinfo(skb)->frags[f]; 769 &skb_shinfo(skb)->frags[f];
698 int len = skb_frag_size(frag); 770 int len = skb_frag_size(frag);
@@ -703,7 +775,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
703 if (unlikely(dma_mapping_error(dev, pa))) 775 if (unlikely(dma_mapping_error(dev, pa)))
704 goto dma_error; 776 goto dma_error;
705 wil_tx_desc_map(d, pa, len, vring_index); 777 wil_tx_desc_map(d, pa, len, vring_index);
706 vring->ctx[i] = NULL; 778 vring->ctx[i].mapped_as_page = 1;
707 *_d = *d; 779 *_d = *d;
708 } 780 }
709 /* for the last seg only */ 781 /* for the last seg only */
@@ -712,6 +784,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
712 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 784 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
713 *_d = *d; 785 *_d = *d;
714 786
787 /* hold reference to skb
788 * to prevent skb release before accounting
789 * in case of immediate "tx done"
790 */
791 vring->ctx[i].skb = skb_get(skb);
792
715 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, 793 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
716 (const void *)d, sizeof(*d), false); 794 (const void *)d, sizeof(*d), false);
717 795
@@ -720,29 +798,31 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
720 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); 798 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
721 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 799 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
722 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); 800 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
723 /* hold reference to skb
724 * to prevent skb release before accounting
725 * in case of immediate "tx done"
726 */
727 vring->ctx[i] = skb_get(skb);
728 801
729 return 0; 802 return 0;
730 dma_error: 803 dma_error:
731 /* unmap what we have mapped */ 804 /* unmap what we have mapped */
732 /* Note: increment @f to operate with positive index */ 805 nr_frags = f + 1; /* frags mapped + one for skb head */
733 for (f++; f > 0; f--) { 806 for (f = 0; f < nr_frags; f++) {
734 u16 dmalen; 807 u16 dmalen;
808 struct wil_ctx *ctx;
735 809
736 i = (swhead + f) % vring->size; 810 i = (swhead + f) % vring->size;
811 ctx = &vring->ctx[i];
737 _d = &(vring->va[i].tx); 812 _d = &(vring->va[i].tx);
738 *d = *_d; 813 *d = *_d;
739 _d->dma.status = TX_DMA_STATUS_DU; 814 _d->dma.status = TX_DMA_STATUS_DU;
740 pa = wil_desc_addr(&d->dma.addr); 815 pa = wil_desc_addr(&d->dma.addr);
741 dmalen = le16_to_cpu(d->dma.length); 816 dmalen = le16_to_cpu(d->dma.length);
742 if (vring->ctx[i]) 817 if (ctx->mapped_as_page)
743 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
744 else
745 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 818 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
819 else
820 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
821
822 if (ctx->skb)
823 dev_kfree_skb_any(ctx->skb);
824
825 memset(ctx, 0, sizeof(*ctx));
746 } 826 }
747 827
748 return -EINVAL; 828 return -EINVAL;
@@ -821,8 +901,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
821 &vring->va[vring->swtail].tx; 901 &vring->va[vring->swtail].tx;
822 struct vring_tx_desc dd, *d = &dd; 902 struct vring_tx_desc dd, *d = &dd;
823 dma_addr_t pa; 903 dma_addr_t pa;
824 struct sk_buff *skb;
825 u16 dmalen; 904 u16 dmalen;
905 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
906 struct sk_buff *skb = ctx->skb;
826 907
827 *d = *_d; 908 *d = *_d;
828 909
@@ -840,7 +921,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
840 (const void *)d, sizeof(*d), false); 921 (const void *)d, sizeof(*d), false);
841 922
842 pa = wil_desc_addr(&d->dma.addr); 923 pa = wil_desc_addr(&d->dma.addr);
843 skb = vring->ctx[vring->swtail]; 924 if (ctx->mapped_as_page)
925 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
926 else
927 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
928
844 if (skb) { 929 if (skb) {
845 if (d->dma.error == 0) { 930 if (d->dma.error == 0) {
846 ndev->stats.tx_packets++; 931 ndev->stats.tx_packets++;
@@ -849,16 +934,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
849 ndev->stats.tx_errors++; 934 ndev->stats.tx_errors++;
850 } 935 }
851 936
852 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
853 dev_kfree_skb_any(skb); 937 dev_kfree_skb_any(skb);
854 vring->ctx[vring->swtail] = NULL;
855 } else {
856 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
857 } 938 }
858 d->dma.addr.addr_low = 0; 939 memset(ctx, 0, sizeof(*ctx));
859 d->dma.addr.addr_high = 0; 940 /*
860 d->dma.length = 0; 941 * There is no need to touch HW descriptor:
861 d->dma.status = TX_DMA_STATUS_DU; 942 * - ststus bit TX_DMA_STATUS_DU is set by design,
943 * so hardware will not try to process this desc.,
944 * - rest of descriptor will be initialized on Tx.
945 */
862 vring->swtail = wil_vring_next_tail(vring); 946 vring->swtail = wil_vring_next_tail(vring);
863 done++; 947 done++;
864 } 948 }
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 859aea68a1fa..b3828279204c 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -235,7 +235,16 @@ struct vring_tx_mac {
235 235
236#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30 236#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
237#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2 237#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
238#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 238#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
239
240
241#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
242#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
243#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
244
245#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
246#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
247#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
239 248
240 249
241#define TX_DMA_STATUS_DU BIT(0) 250#define TX_DMA_STATUS_DU BIT(0)
@@ -334,8 +343,17 @@ struct vring_rx_mac {
334 343
335#define RX_DMA_D0_CMD_DMA_IT BIT(10) 344#define RX_DMA_D0_CMD_DMA_IT BIT(10)
336 345
346/* Error field, offload bits */
347#define RX_DMA_ERROR_L3_ERR BIT(4)
348#define RX_DMA_ERROR_L4_ERR BIT(5)
349
350
351/* Status field */
337#define RX_DMA_STATUS_DU BIT(0) 352#define RX_DMA_STATUS_DU BIT(0)
338#define RX_DMA_STATUS_ERROR BIT(2) 353#define RX_DMA_STATUS_ERROR BIT(2)
354
355#define RX_DMA_STATUS_L3_IDENT BIT(4)
356#define RX_DMA_STATUS_L4_IDENT BIT(5)
339#define RX_DMA_STATUS_PHY_INFO BIT(6) 357#define RX_DMA_STATUS_PHY_INFO BIT(6)
340 358
341struct vring_rx_dma { 359struct vring_rx_dma {
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 44fdab51de7e..c4a51638736a 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -156,11 +156,22 @@ struct wil6210_mbox_hdr {
156/* max. value for wil6210_mbox_hdr.len */ 156/* max. value for wil6210_mbox_hdr.len */
157#define MAX_MBOXITEM_SIZE (240) 157#define MAX_MBOXITEM_SIZE (240)
158 158
159/**
160 * struct wil6210_mbox_hdr_wmi - WMI header
161 *
162 * @mid: MAC ID
163 * 00 - default, created by FW
164 * 01..0f - WiFi ports, driver to create
165 * 10..fe - debug
166 * ff - broadcast
167 * @id: command/event ID
168 * @timestamp: FW fills for events, free-running msec timer
169 */
159struct wil6210_mbox_hdr_wmi { 170struct wil6210_mbox_hdr_wmi {
160 u8 reserved0[2]; 171 u8 mid;
172 u8 reserved;
161 __le16 id; 173 __le16 id;
162 __le16 info1; /* bits [0..3] - device_id, rest - unused */ 174 __le32 timestamp;
163 u8 reserved1[2];
164} __packed; 175} __packed;
165 176
166struct pending_wmi_event { 177struct pending_wmi_event {
@@ -172,6 +183,14 @@ struct pending_wmi_event {
172 } __packed event; 183 } __packed event;
173}; 184};
174 185
186/**
187 * struct wil_ctx - software context for Vring descriptor
188 */
189struct wil_ctx {
190 struct sk_buff *skb;
191 u8 mapped_as_page:1;
192};
193
175union vring_desc; 194union vring_desc;
176 195
177struct vring { 196struct vring {
@@ -181,7 +200,7 @@ struct vring {
181 u32 swtail; 200 u32 swtail;
182 u32 swhead; 201 u32 swhead;
183 u32 hwtail; /* write here to inform hw */ 202 u32 hwtail; /* write here to inform hw */
184 void **ctx; /* void *ctx[size] - software context */ 203 struct wil_ctx *ctx; /* ctx[size] - software context */
185}; 204};
186 205
187enum { /* for wil6210_priv.status */ 206enum { /* for wil6210_priv.status */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index dc8059ad4bab..063963ee422a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -172,8 +172,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
172 .len = cpu_to_le16(sizeof(cmd.wmi) + len), 172 .len = cpu_to_le16(sizeof(cmd.wmi) + len),
173 }, 173 },
174 .wmi = { 174 .wmi = {
175 .mid = 0,
175 .id = cpu_to_le16(cmdid), 176 .id = cpu_to_le16(cmdid),
176 .info1 = 0,
177 }, 177 },
178 }; 178 };
179 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; 179 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -248,7 +248,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
248 iowrite32(r->head = next_head, wil->csr + HOST_MBOX + 248 iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
249 offsetof(struct wil6210_mbox_ctl, tx.head)); 249 offsetof(struct wil6210_mbox_ctl, tx.head));
250 250
251 trace_wil6210_wmi_cmd(cmdid, buf, len); 251 trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
252 252
253 /* interrupt to FW */ 253 /* interrupt to FW */
254 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); 254 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
@@ -339,7 +339,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
339 } 339 }
340 } else { 340 } else {
341 cfg80211_rx_mgmt(wil->wdev, freq, signal, 341 cfg80211_rx_mgmt(wil->wdev, freq, signal,
342 (void *)rx_mgmt_frame, d_len, GFP_KERNEL); 342 (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL);
343 } 343 }
344} 344}
345 345
@@ -640,9 +640,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
640 hdr.flags); 640 hdr.flags);
641 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 641 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
642 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 642 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
643 u16 id = le16_to_cpu(evt->event.wmi.id); 643 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
644 wil_dbg_wmi(wil, "WMI event 0x%04x\n", id); 644 u16 id = le16_to_cpu(wmi->id);
645 trace_wil6210_wmi_event(id, &evt->event.wmi, len); 645 u32 tstamp = le32_to_cpu(wmi->timestamp);
646 wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
647 id, wmi->mid, tstamp);
648 trace_wil6210_wmi_event(wmi, &wmi[1],
649 len - sizeof(*wmi));
646 } 650 }
647 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, 651 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
648 &evt->event.hdr, sizeof(hdr) + len, true); 652 &evt->event.hdr, sizeof(hdr) + len, true);
@@ -920,6 +924,12 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
920 cmd.sniffer_cfg.phy_support = 924 cmd.sniffer_cfg.phy_support =
921 cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) 925 cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
922 ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); 926 ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
927 } else {
928 /* Initialize offload (in non-sniffer mode).
929 * Linux IP stack always calculates IP checksum
930 * HW always calculate TCP/UDP checksum
931 */
932 cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
923 } 933 }
924 /* typical time for secure PCP is 840ms */ 934 /* typical time for secure PCP is 840ms */
925 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), 935 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index f7c70b3a6ea9..c51d2dc489e4 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
433 433
434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 434 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
435 ring_mem_size, &(ring->dmabase), 435 ring_mem_size, &(ring->dmabase),
436 GFP_KERNEL | __GFP_ZERO); 436 GFP_KERNEL);
437 if (!ring->descbase) 437 if (!ring->descbase)
438 return -ENOMEM; 438 return -ENOMEM;
439 439
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 0e933bb71543..ccd24f0acb8d 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4645,6 +4645,19 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4645 b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN, 4645 b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN,
4646 B43_MACCTL_PSM_JMP0); 4646 B43_MACCTL_PSM_JMP0);
4647 4647
4648 switch (dev->dev->bus_type) {
4649#ifdef CONFIG_B43_BCMA
4650 case B43_BUS_BCMA:
4651 bcma_core_pci_down(dev->dev->bdev->bus);
4652 break;
4653#endif
4654#ifdef CONFIG_B43_SSB
4655 case B43_BUS_SSB:
4656 /* TODO */
4657 break;
4658#endif
4659 }
4660
4648 b43_dma_free(dev); 4661 b43_dma_free(dev);
4649 b43_pio_free(dev); 4662 b43_pio_free(dev);
4650 b43_chip_exit(dev); 4663 b43_chip_exit(dev);
@@ -4684,6 +4697,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4684 case B43_BUS_BCMA: 4697 case B43_BUS_BCMA:
4685 bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0], 4698 bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
4686 dev->dev->bdev, true); 4699 dev->dev->bdev, true);
4700 bcma_core_pci_up(dev->dev->bdev->bus);
4687 break; 4701 break;
4688#endif 4702#endif
4689#ifdef CONFIG_B43_SSB 4703#ifdef CONFIG_B43_SSB
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index faeafe219c57..42eb26c99e11 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -331,10 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
331static int alloc_ringmemory(struct b43legacy_dmaring *ring) 331static int alloc_ringmemory(struct b43legacy_dmaring *ring)
332{ 332{
333 /* GFP flags must match the flags in free_ringmemory()! */ 333 /* GFP flags must match the flags in free_ringmemory()! */
334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 334 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE, 335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), 336 &(ring->dmabase), GFP_KERNEL);
337 GFP_KERNEL | __GFP_ZERO);
338 if (!ring->descbase) 337 if (!ring->descbase)
339 return -ENOMEM; 338 return -ENOMEM;
340 339
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e3f3c48f86d4..e13b1a65c65f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -592,6 +592,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
592 uint flags, u8 *buf, uint nbytes) 592 uint flags, u8 *buf, uint nbytes)
593{ 593{
594 struct sk_buff *mypkt; 594 struct sk_buff *mypkt;
595 struct sk_buff_head pktq;
595 int err; 596 int err;
596 597
597 mypkt = brcmu_pkt_buf_get_skb(nbytes); 598 mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -602,7 +603,10 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
602 } 603 }
603 604
604 memcpy(mypkt->data, buf, nbytes); 605 memcpy(mypkt->data, buf, nbytes);
605 err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt); 606 __skb_queue_head_init(&pktq);
607 __skb_queue_tail(&pktq, mypkt);
608 err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
609 __skb_dequeue_tail(&pktq);
606 610
607 brcmu_pkt_buf_free_skb(mypkt); 611 brcmu_pkt_buf_free_skb(mypkt);
608 return err; 612 return err;
@@ -611,22 +615,18 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
611 615
612int 616int
613brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 617brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
614 uint flags, struct sk_buff *pkt) 618 uint flags, struct sk_buff_head *pktq)
615{ 619{
616 uint width; 620 uint width;
617 int err = 0; 621 int err = 0;
618 struct sk_buff_head pkt_list;
619 622
620 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", 623 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
621 fn, addr, pkt->len); 624 fn, addr, pktq->qlen);
622 625
623 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 626 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
624 brcmf_sdio_addrprep(sdiodev, width, &addr); 627 brcmf_sdio_addrprep(sdiodev, width, &addr);
625 628
626 skb_queue_head_init(&pkt_list); 629 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
627 skb_queue_tail(&pkt_list, pkt);
628 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
629 skb_dequeue_tail(&pkt_list);
630 630
631 return err; 631 return err;
632} 632}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 289e386f01f6..64f4a2bc8dde 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -350,7 +350,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
350 350
351 sdiodev->bus_if = bus_if; 351 sdiodev->bus_if = bus_if;
352 bus_if->bus_priv.sdio = sdiodev; 352 bus_if->bus_priv.sdio = sdiodev;
353 bus_if->align = BRCMF_SDALIGN;
354 dev_set_drvdata(&func->dev, bus_if); 353 dev_set_drvdata(&func->dev, bus_if);
355 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); 354 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
356 sdiodev->dev = &sdiodev->func[1]->dev; 355 sdiodev->dev = &sdiodev->func[1]->dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 86cbfe2c7c6c..2eb9e642c9bf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -194,6 +194,8 @@
194#define BRCMF_E_IF_DEL 2 194#define BRCMF_E_IF_DEL 2
195#define BRCMF_E_IF_CHANGE 3 195#define BRCMF_E_IF_CHANGE 3
196 196
197#define BRCMF_E_IF_FLAG_NOIF 1
198
197#define BRCMF_E_IF_ROLE_STA 0 199#define BRCMF_E_IF_ROLE_STA 0
198#define BRCMF_E_IF_ROLE_AP 1 200#define BRCMF_E_IF_ROLE_AP 1
199#define BRCMF_E_IF_ROLE_WDS 2 201#define BRCMF_E_IF_ROLE_WDS 2
@@ -209,6 +211,8 @@
209#define BRCMF_DCMD_MEDLEN 1536 211#define BRCMF_DCMD_MEDLEN 1536
210#define BRCMF_DCMD_MAXLEN 8192 212#define BRCMF_DCMD_MAXLEN 8192
211 213
214#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
215
212/* Pattern matching filter. Specifies an offset within received packets to 216/* Pattern matching filter. Specifies an offset within received packets to
213 * start matching, the pattern to match, the size of the pattern, and a bitmask 217 * start matching, the pattern to match, the size of the pattern, and a bitmask
214 * that indicates which bits within the pattern should be matched. 218 * that indicates which bits within the pattern should be matched.
@@ -505,6 +509,25 @@ struct brcmf_dcmd {
505 uint needed; /* bytes needed (optional) */ 509 uint needed; /* bytes needed (optional) */
506}; 510};
507 511
512/**
513 * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
514 *
515 * @pktslots: dynamic allocated array for ordering AMPDU packets.
516 * @flow_id: AMPDU flow identifier.
517 * @cur_idx: last AMPDU index from firmware.
518 * @exp_idx: expected next AMPDU index.
519 * @max_idx: maximum amount of packets per AMPDU.
520 * @pend_pkts: number of packets currently in @pktslots.
521 */
522struct brcmf_ampdu_rx_reorder {
523 struct sk_buff **pktslots;
524 u8 flow_id;
525 u8 cur_idx;
526 u8 exp_idx;
527 u8 max_idx;
528 u8 pend_pkts;
529};
530
508/* Forward decls for struct brcmf_pub (see below) */ 531/* Forward decls for struct brcmf_pub (see below) */
509struct brcmf_proto; /* device communication protocol info */ 532struct brcmf_proto; /* device communication protocol info */
510struct brcmf_cfg80211_dev; /* cfg80211 device info */ 533struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -536,9 +559,10 @@ struct brcmf_pub {
536 559
537 struct brcmf_fweh_info fweh; 560 struct brcmf_fweh_info fweh;
538 561
539 bool fw_signals;
540 struct brcmf_fws_info *fws; 562 struct brcmf_fws_info *fws;
541 spinlock_t fws_spinlock; 563
564 struct brcmf_ampdu_rx_reorder
565 *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
542#ifdef DEBUG 566#ifdef DEBUG
543 struct dentry *dbgfs_dir; 567 struct dentry *dbgfs_dir;
544#endif 568#endif
@@ -604,6 +628,9 @@ struct brcmf_if {
604 wait_queue_head_t pend_8021x_wait; 628 wait_queue_head_t pend_8021x_wait;
605}; 629};
606 630
631struct brcmf_skb_reorder_data {
632 u8 *reorder;
633};
607 634
608extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 635extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
609 636
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 080395f49fa5..f7c1985844e4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,7 +36,11 @@ struct brcmf_bus_dcmd {
36 * 36 *
37 * @init: prepare for communication with dongle. 37 * @init: prepare for communication with dongle.
38 * @stop: clear pending frames, disable data flow. 38 * @stop: clear pending frames, disable data flow.
39 * @txdata: send a data frame to the dongle (callee disposes skb). 39 * @txdata: send a data frame to the dongle. When the data
40 * has been transferred, the common driver must be
41 * notified using brcmf_txcomplete(). The common
42 * driver calls this function with interrupts
43 * disabled.
40 * @txctl: transmit a control request message to dongle. 44 * @txctl: transmit a control request message to dongle.
41 * @rxctl: receive a control response message from dongle. 45 * @rxctl: receive a control response message from dongle.
42 * @gettxq: obtain a reference of bus transmit queue (optional). 46 * @gettxq: obtain a reference of bus transmit queue (optional).
@@ -65,7 +69,6 @@ struct brcmf_bus_ops {
65 * @maxctl: maximum size for rxctl request message. 69 * @maxctl: maximum size for rxctl request message.
66 * @tx_realloc: number of tx packets realloced for headroom. 70 * @tx_realloc: number of tx packets realloced for headroom.
67 * @dstats: dongle-based statistical data. 71 * @dstats: dongle-based statistical data.
68 * @align: alignment requirement for the bus.
69 * @dcmd_list: bus/device specific dongle initialization commands. 72 * @dcmd_list: bus/device specific dongle initialization commands.
70 * @chip: device identifier of the dongle chip. 73 * @chip: device identifier of the dongle chip.
71 * @chiprev: revision of the dongle chip. 74 * @chiprev: revision of the dongle chip.
@@ -80,7 +83,6 @@ struct brcmf_bus {
80 enum brcmf_bus_state state; 83 enum brcmf_bus_state state;
81 uint maxctl; 84 uint maxctl;
82 unsigned long tx_realloc; 85 unsigned long tx_realloc;
83 u8 align;
84 u32 chip; 86 u32 chip;
85 u32 chiprev; 87 u32 chiprev;
86 struct list_head dcmd_list; 88 struct list_head dcmd_list;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 80099016d21f..e067aec1fbf1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -38,6 +38,19 @@ MODULE_LICENSE("Dual BSD/GPL");
38 38
39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */ 39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40 40
41/* AMPDU rx reordering definitions */
42#define BRCMF_RXREORDER_FLOWID_OFFSET 0
43#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
44#define BRCMF_RXREORDER_FLAGS_OFFSET 4
45#define BRCMF_RXREORDER_CURIDX_OFFSET 6
46#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
47
48#define BRCMF_RXREORDER_DEL_FLOW 0x01
49#define BRCMF_RXREORDER_FLUSH_ALL 0x02
50#define BRCMF_RXREORDER_CURIDX_VALID 0x04
51#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
52#define BRCMF_RXREORDER_NEW_HOLE 0x10
53
41/* Error bits */ 54/* Error bits */
42int brcmf_msg_level; 55int brcmf_msg_level;
43module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR); 56module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@@ -265,17 +278,234 @@ void brcmf_txflowblock(struct device *dev, bool state)
265{ 278{
266 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 279 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
267 struct brcmf_pub *drvr = bus_if->drvr; 280 struct brcmf_pub *drvr = bus_if->drvr;
268 int i;
269 281
270 brcmf_dbg(TRACE, "Enter\n"); 282 brcmf_dbg(TRACE, "Enter\n");
271 283
272 if (brcmf_fws_fc_active(drvr->fws)) { 284 brcmf_fws_bus_blocked(drvr, state);
273 brcmf_fws_bus_blocked(drvr, state); 285}
286
287static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
288{
289 skb->dev = ifp->ndev;
290 skb->protocol = eth_type_trans(skb, skb->dev);
291
292 if (skb->pkt_type == PACKET_MULTICAST)
293 ifp->stats.multicast++;
294
295 /* Process special event packets */
296 brcmf_fweh_process_skb(ifp->drvr, skb);
297
298 if (!(ifp->ndev->flags & IFF_UP)) {
299 brcmu_pkt_buf_free_skb(skb);
300 return;
301 }
302
303 ifp->stats.rx_bytes += skb->len;
304 ifp->stats.rx_packets++;
305
306 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
307 if (in_interrupt())
308 netif_rx(skb);
309 else
310 /* If the receive is not processed inside an ISR,
311 * the softirqd must be woken explicitly to service
312 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
313 */
314 netif_rx_ni(skb);
315}
316
317static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
318 u8 start, u8 end,
319 struct sk_buff_head *skb_list)
320{
321 /* initialize return list */
322 __skb_queue_head_init(skb_list);
323
324 if (rfi->pend_pkts == 0) {
325 brcmf_dbg(INFO, "no packets in reorder queue\n");
326 return;
327 }
328
329 do {
330 if (rfi->pktslots[start]) {
331 __skb_queue_tail(skb_list, rfi->pktslots[start]);
332 rfi->pktslots[start] = NULL;
333 }
334 start++;
335 if (start > rfi->max_idx)
336 start = 0;
337 } while (start != end);
338 rfi->pend_pkts -= skb_queue_len(skb_list);
339}
340
341static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
342 struct sk_buff *pkt)
343{
344 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
345 struct brcmf_ampdu_rx_reorder *rfi;
346 struct sk_buff_head reorder_list;
347 struct sk_buff *pnext;
348 u8 flags;
349 u32 buf_size;
350
351 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
352 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
353
354 /* validate flags and flow id */
355 if (flags == 0xFF) {
356 brcmf_err("invalid flags...so ignore this packet\n");
357 brcmf_netif_rx(ifp, pkt);
358 return;
359 }
360
361 rfi = ifp->drvr->reorder_flows[flow_id];
362 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
363 brcmf_dbg(INFO, "flow-%d: delete\n",
364 flow_id);
365
366 if (rfi == NULL) {
367 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
368 flow_id);
369 brcmf_netif_rx(ifp, pkt);
370 return;
371 }
372
373 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
374 &reorder_list);
375 /* add the last packet */
376 __skb_queue_tail(&reorder_list, pkt);
377 kfree(rfi);
378 ifp->drvr->reorder_flows[flow_id] = NULL;
379 goto netif_rx;
380 }
381 /* from here on we need a flow reorder instance */
382 if (rfi == NULL) {
383 buf_size = sizeof(*rfi);
384 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
385
386 buf_size += (max_idx + 1) * sizeof(pkt);
387
388 /* allocate space for flow reorder info */
389 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
390 flow_id, max_idx);
391 rfi = kzalloc(buf_size, GFP_ATOMIC);
392 if (rfi == NULL) {
393 brcmf_err("failed to alloc buffer\n");
394 brcmf_netif_rx(ifp, pkt);
395 return;
396 }
397
398 ifp->drvr->reorder_flows[flow_id] = rfi;
399 rfi->pktslots = (struct sk_buff **)(rfi+1);
400 rfi->max_idx = max_idx;
401 }
402 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
403 if (rfi->pend_pkts) {
404 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
405 rfi->exp_idx,
406 &reorder_list);
407 WARN_ON(rfi->pend_pkts);
408 } else {
409 __skb_queue_head_init(&reorder_list);
410 }
411 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
412 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
413 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
414 rfi->pktslots[rfi->cur_idx] = pkt;
415 rfi->pend_pkts++;
416 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
417 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
418 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
419 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
420 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
421
422 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
423 /* still in the current hole */
424 /* enqueue the current on the buffer chain */
425 if (rfi->pktslots[cur_idx] != NULL) {
426 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
427 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
428 rfi->pktslots[cur_idx] = NULL;
429 }
430 rfi->pktslots[cur_idx] = pkt;
431 rfi->pend_pkts++;
432 rfi->cur_idx = cur_idx;
433 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
434 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
435
436 /* can return now as there is no reorder
437 * list to process.
438 */
439 return;
440 }
441 if (rfi->exp_idx == cur_idx) {
442 if (rfi->pktslots[cur_idx] != NULL) {
443 brcmf_dbg(INFO, "error buffer pending..free it\n");
444 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
445 rfi->pktslots[cur_idx] = NULL;
446 }
447 rfi->pktslots[cur_idx] = pkt;
448 rfi->pend_pkts++;
449
450 /* got the expected one. flush from current to expected
451 * and update expected
452 */
453 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
454 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
455
456 rfi->cur_idx = cur_idx;
457 rfi->exp_idx = exp_idx;
458
459 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
460 &reorder_list);
461 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
462 flow_id, skb_queue_len(&reorder_list),
463 rfi->pend_pkts);
464 } else {
465 u8 end_idx;
466
467 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
468 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
469 cur_idx, exp_idx);
470 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
471 end_idx = rfi->exp_idx;
472 else
473 end_idx = exp_idx;
474
475 /* flush pkts first */
476 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
477 &reorder_list);
478
479 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
480 __skb_queue_tail(&reorder_list, pkt);
481 } else {
482 rfi->pktslots[cur_idx] = pkt;
483 rfi->pend_pkts++;
484 }
485 rfi->exp_idx = exp_idx;
486 rfi->cur_idx = cur_idx;
487 }
274 } else { 488 } else {
275 for (i = 0; i < BRCMF_MAX_IFS; i++) 489 /* explicity window move updating the expected index */
276 brcmf_txflowblock_if(drvr->iflist[i], 490 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
277 BRCMF_NETIF_STOP_REASON_BLOCK_BUS, 491
278 state); 492 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
493 flow_id, flags, rfi->exp_idx, exp_idx);
494 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
495 end_idx = rfi->exp_idx;
496 else
497 end_idx = exp_idx;
498
499 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
500 &reorder_list);
501 __skb_queue_tail(&reorder_list, pkt);
502 /* set the new expected idx */
503 rfi->exp_idx = exp_idx;
504 }
505netif_rx:
506 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
507 __skb_unlink(pkt, &reorder_list);
508 brcmf_netif_rx(ifp, pkt);
279 } 509 }
280} 510}
281 511
@@ -285,16 +515,18 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
285 struct brcmf_if *ifp; 515 struct brcmf_if *ifp;
286 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 516 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
287 struct brcmf_pub *drvr = bus_if->drvr; 517 struct brcmf_pub *drvr = bus_if->drvr;
518 struct brcmf_skb_reorder_data *rd;
288 u8 ifidx; 519 u8 ifidx;
289 int ret; 520 int ret;
290 521
291 brcmf_dbg(DATA, "Enter\n"); 522 brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
523 skb_queue_len(skb_list));
292 524
293 skb_queue_walk_safe(skb_list, skb, pnext) { 525 skb_queue_walk_safe(skb_list, skb, pnext) {
294 skb_unlink(skb, skb_list); 526 skb_unlink(skb, skb_list);
295 527
296 /* process and remove protocol-specific header */ 528 /* process and remove protocol-specific header */
297 ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb); 529 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
298 ifp = drvr->iflist[ifidx]; 530 ifp = drvr->iflist[ifidx];
299 531
300 if (ret || !ifp || !ifp->ndev) { 532 if (ret || !ifp || !ifp->ndev) {
@@ -304,31 +536,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
304 continue; 536 continue;
305 } 537 }
306 538
307 skb->dev = ifp->ndev; 539 rd = (struct brcmf_skb_reorder_data *)skb->cb;
308 skb->protocol = eth_type_trans(skb, skb->dev); 540 if (rd->reorder)
309 541 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
310 if (skb->pkt_type == PACKET_MULTICAST)
311 ifp->stats.multicast++;
312
313 /* Process special event packets */
314 brcmf_fweh_process_skb(drvr, skb);
315
316 if (!(ifp->ndev->flags & IFF_UP)) {
317 brcmu_pkt_buf_free_skb(skb);
318 continue;
319 }
320
321 ifp->stats.rx_bytes += skb->len;
322 ifp->stats.rx_packets++;
323
324 if (in_interrupt())
325 netif_rx(skb);
326 else 542 else
327 /* If the receive is not processed inside an ISR, 543 brcmf_netif_rx(ifp, skb);
328 * the softirqd must be woken explicitly to service the
329 * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
330 */
331 netif_rx_ni(skb);
332 } 544 }
333} 545}
334 546
@@ -889,7 +1101,6 @@ int brcmf_bus_start(struct device *dev)
889 if (ret < 0) 1101 if (ret < 0)
890 goto fail; 1102 goto fail;
891 1103
892 drvr->fw_signals = true;
893 ret = brcmf_fws_init(drvr); 1104 ret = brcmf_fws_init(drvr);
894 if (ret < 0) 1105 if (ret < 0)
895 goto fail; 1106 goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 264111968320..1aa75d5951b8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -201,13 +201,6 @@ struct rte_console {
201#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */ 201#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
202#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */ 202#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
203 203
204/* HW frame tag */
205#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
206
207/* Total length of frame header for dongle protocol */
208#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
209#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
210
211/* 204/*
212 * Software allocation of To SB Mailbox resources 205 * Software allocation of To SB Mailbox resources
213 */ 206 */
@@ -250,38 +243,6 @@ struct rte_console {
250/* Current protocol version */ 243/* Current protocol version */
251#define SDPCM_PROT_VERSION 4 244#define SDPCM_PROT_VERSION 4
252 245
253/* SW frame header */
254#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
255
256#define SDPCM_CHANNEL_MASK 0x00000f00
257#define SDPCM_CHANNEL_SHIFT 8
258#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
259
260#define SDPCM_NEXTLEN_OFFSET 2
261
262/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
263#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
264#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
265#define SDPCM_DOFFSET_MASK 0xff000000
266#define SDPCM_DOFFSET_SHIFT 24
267#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
268#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
269#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
270#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
271
272#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
273
274/* logical channel numbers */
275#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
276#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
277#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
278#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
279#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
280
281#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
282
283#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
284
285/* 246/*
286 * Shared structure between dongle and the host. 247 * Shared structure between dongle and the host.
287 * The structure contains pointers to trap or assert information. 248 * The structure contains pointers to trap or assert information.
@@ -396,8 +357,8 @@ struct sdpcm_shared_le {
396 __le32 brpt_addr; 357 __le32 brpt_addr;
397}; 358};
398 359
399/* SDIO read frame info */ 360/* dongle SDIO bus specific header info */
400struct brcmf_sdio_read { 361struct brcmf_sdio_hdrinfo {
401 u8 seq_num; 362 u8 seq_num;
402 u8 channel; 363 u8 channel;
403 u16 len; 364 u16 len;
@@ -431,7 +392,7 @@ struct brcmf_sdio {
431 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN]; 392 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
432 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ 393 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
433 u8 rx_seq; /* Receive sequence number (expected) */ 394 u8 rx_seq; /* Receive sequence number (expected) */
434 struct brcmf_sdio_read cur_read; 395 struct brcmf_sdio_hdrinfo cur_read;
435 /* info of current read frame */ 396 /* info of current read frame */
436 bool rxskip; /* Skip receive (awaiting NAK ACK) */ 397 bool rxskip; /* Skip receive (awaiting NAK ACK) */
437 bool rxpending; /* Data frame pending in dongle */ 398 bool rxpending; /* Data frame pending in dongle */
@@ -500,6 +461,8 @@ struct brcmf_sdio {
500 struct brcmf_sdio_count sdcnt; 461 struct brcmf_sdio_count sdcnt;
501 bool sr_enabled; /* SaveRestore enabled */ 462 bool sr_enabled; /* SaveRestore enabled */
502 bool sleeping; /* SDIO bus sleeping */ 463 bool sleeping; /* SDIO bus sleeping */
464
465 u8 tx_hdrlen; /* sdio bus header length for tx packet */
503}; 466};
504 467
505/* clkstate */ 468/* clkstate */
@@ -510,7 +473,6 @@ struct brcmf_sdio {
510 473
511#ifdef DEBUG 474#ifdef DEBUG
512static int qcount[NUMPRIO]; 475static int qcount[NUMPRIO];
513static int tx_packets[NUMPRIO];
514#endif /* DEBUG */ 476#endif /* DEBUG */
515 477
516#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */ 478#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
@@ -1043,18 +1005,63 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1043 } 1005 }
1044} 1006}
1045 1007
1046static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, 1008/**
1047 struct brcmf_sdio_read *rd, 1009 * brcmfmac sdio bus specific header
1048 enum brcmf_sdio_frmtype type) 1010 * This is the lowest layer header wrapped on the packets transmitted between
1011 * host and WiFi dongle which contains information needed for SDIO core and
1012 * firmware
1013 *
1014 * It consists of 2 parts: hw header and software header
1015 * hardware header (frame tag) - 4 bytes
1016 * Byte 0~1: Frame length
1017 * Byte 2~3: Checksum, bit-wise inverse of frame length
1018 * software header - 8 bytes
1019 * Byte 0: Rx/Tx sequence number
1020 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1021 * Byte 2: Length of next data frame, reserved for Tx
1022 * Byte 3: Data offset
1023 * Byte 4: Flow control bits, reserved for Tx
1024 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1025 * Byte 6~7: Reserved
1026 */
1027#define SDPCM_HWHDR_LEN 4
1028#define SDPCM_SWHDR_LEN 8
1029#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1030/* software header */
1031#define SDPCM_SEQ_MASK 0x000000ff
1032#define SDPCM_SEQ_WRAP 256
1033#define SDPCM_CHANNEL_MASK 0x00000f00
1034#define SDPCM_CHANNEL_SHIFT 8
1035#define SDPCM_CONTROL_CHANNEL 0 /* Control */
1036#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
1037#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
1038#define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
1039#define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
1040#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
1041#define SDPCM_NEXTLEN_MASK 0x00ff0000
1042#define SDPCM_NEXTLEN_SHIFT 16
1043#define SDPCM_DOFFSET_MASK 0xff000000
1044#define SDPCM_DOFFSET_SHIFT 24
1045#define SDPCM_FCMASK_MASK 0x000000ff
1046#define SDPCM_WINDOW_MASK 0x0000ff00
1047#define SDPCM_WINDOW_SHIFT 8
1048
1049static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1050{
1051 u32 hdrvalue;
1052 hdrvalue = *(u32 *)swheader;
1053 return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1054}
1055
1056static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1057 struct brcmf_sdio_hdrinfo *rd,
1058 enum brcmf_sdio_frmtype type)
1049{ 1059{
1050 u16 len, checksum; 1060 u16 len, checksum;
1051 u8 rx_seq, fc, tx_seq_max; 1061 u8 rx_seq, fc, tx_seq_max;
1062 u32 swheader;
1052 1063
1053 /* 1064 /* hw header */
1054 * 4 bytes hardware header (frame tag)
1055 * Byte 0~1: Frame length
1056 * Byte 2~3: Checksum, bit-wise inverse of frame length
1057 */
1058 len = get_unaligned_le16(header); 1065 len = get_unaligned_le16(header);
1059 checksum = get_unaligned_le16(header + sizeof(u16)); 1066 checksum = get_unaligned_le16(header + sizeof(u16));
1060 /* All zero means no more to read */ 1067 /* All zero means no more to read */
@@ -1083,24 +1090,16 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1083 } 1090 }
1084 rd->len = len; 1091 rd->len = len;
1085 1092
1086 /* 1093 /* software header */
1087 * 8 bytes hardware header 1094 header += SDPCM_HWHDR_LEN;
1088 * Byte 0: Rx sequence number 1095 swheader = le32_to_cpu(*(__le32 *)header);
1089 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag 1096 if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1090 * Byte 2: Length of next data frame
1091 * Byte 3: Data offset
1092 * Byte 4: Flow control bits
1093 * Byte 5: Maximum Sequence number allow for Tx
1094 * Byte 6~7: Reserved
1095 */
1096 if (type == BRCMF_SDIO_FT_SUPER &&
1097 SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
1098 brcmf_err("Glom descriptor found in superframe head\n"); 1097 brcmf_err("Glom descriptor found in superframe head\n");
1099 rd->len = 0; 1098 rd->len = 0;
1100 return -EINVAL; 1099 return -EINVAL;
1101 } 1100 }
1102 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]); 1101 rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1103 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]); 1102 rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1104 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL && 1103 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1105 type != BRCMF_SDIO_FT_SUPER) { 1104 type != BRCMF_SDIO_FT_SUPER) {
1106 brcmf_err("HW header length too long\n"); 1105 brcmf_err("HW header length too long\n");
@@ -1120,7 +1119,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1120 rd->len = 0; 1119 rd->len = 0;
1121 return -EINVAL; 1120 return -EINVAL;
1122 } 1121 }
1123 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]); 1122 rd->dat_offset = brcmf_sdio_getdatoffset(header);
1124 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) { 1123 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1125 brcmf_err("seq %d: bad data offset\n", rx_seq); 1124 brcmf_err("seq %d: bad data offset\n", rx_seq);
1126 bus->sdcnt.rx_badhdr++; 1125 bus->sdcnt.rx_badhdr++;
@@ -1137,14 +1136,15 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1137 /* no need to check the reset for subframe */ 1136 /* no need to check the reset for subframe */
1138 if (type == BRCMF_SDIO_FT_SUB) 1137 if (type == BRCMF_SDIO_FT_SUB)
1139 return 0; 1138 return 0;
1140 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; 1139 rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1141 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) { 1140 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1142 /* only warm for NON glom packet */ 1141 /* only warm for NON glom packet */
1143 if (rd->channel != SDPCM_GLOM_CHANNEL) 1142 if (rd->channel != SDPCM_GLOM_CHANNEL)
1144 brcmf_err("seq %d: next length error\n", rx_seq); 1143 brcmf_err("seq %d: next length error\n", rx_seq);
1145 rd->len_nxtfrm = 0; 1144 rd->len_nxtfrm = 0;
1146 } 1145 }
1147 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]); 1146 swheader = le32_to_cpu(*(__le32 *)(header + 4));
1147 fc = swheader & SDPCM_FCMASK_MASK;
1148 if (bus->flowcontrol != fc) { 1148 if (bus->flowcontrol != fc) {
1149 if (~bus->flowcontrol & fc) 1149 if (~bus->flowcontrol & fc)
1150 bus->sdcnt.fc_xoff++; 1150 bus->sdcnt.fc_xoff++;
@@ -1153,7 +1153,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1153 bus->sdcnt.fc_rcvd++; 1153 bus->sdcnt.fc_rcvd++;
1154 bus->flowcontrol = fc; 1154 bus->flowcontrol = fc;
1155 } 1155 }
1156 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]); 1156 tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1157 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) { 1157 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1158 brcmf_err("seq %d: max tx seq number error\n", rx_seq); 1158 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1159 tx_seq_max = bus->tx_seq + 2; 1159 tx_seq_max = bus->tx_seq + 2;
@@ -1163,18 +1163,40 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1163 return 0; 1163 return 0;
1164} 1164}
1165 1165
1166static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1167{
1168 *(__le16 *)header = cpu_to_le16(frm_length);
1169 *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1170}
1171
1172static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1173 struct brcmf_sdio_hdrinfo *hd_info)
1174{
1175 u32 sw_header;
1176
1177 brcmf_sdio_update_hwhdr(header, hd_info->len);
1178
1179 sw_header = bus->tx_seq;
1180 sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1181 SDPCM_CHANNEL_MASK;
1182 sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1183 SDPCM_DOFFSET_MASK;
1184 *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
1185 *(((__le32 *)header) + 2) = 0;
1186}
1187
1166static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) 1188static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1167{ 1189{
1168 u16 dlen, totlen; 1190 u16 dlen, totlen;
1169 u8 *dptr, num = 0; 1191 u8 *dptr, num = 0;
1170 1192 u32 align = 0;
1171 u16 sublen; 1193 u16 sublen;
1172 struct sk_buff *pfirst, *pnext; 1194 struct sk_buff *pfirst, *pnext;
1173 1195
1174 int errcode; 1196 int errcode;
1175 u8 doff, sfdoff; 1197 u8 doff, sfdoff;
1176 1198
1177 struct brcmf_sdio_read rd_new; 1199 struct brcmf_sdio_hdrinfo rd_new;
1178 1200
1179 /* If packets, issue read(s) and send up packet chain */ 1201 /* If packets, issue read(s) and send up packet chain */
1180 /* Return sequence numbers consumed? */ 1202 /* Return sequence numbers consumed? */
@@ -1182,6 +1204,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1182 brcmf_dbg(SDIO, "start: glomd %p glom %p\n", 1204 brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1183 bus->glomd, skb_peek(&bus->glom)); 1205 bus->glomd, skb_peek(&bus->glom));
1184 1206
1207 if (bus->sdiodev->pdata)
1208 align = bus->sdiodev->pdata->sd_sgentry_align;
1209 if (align < 4)
1210 align = 4;
1211
1185 /* If there's a descriptor, generate the packet chain */ 1212 /* If there's a descriptor, generate the packet chain */
1186 if (bus->glomd) { 1213 if (bus->glomd) {
1187 pfirst = pnext = NULL; 1214 pfirst = pnext = NULL;
@@ -1205,9 +1232,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1205 pnext = NULL; 1232 pnext = NULL;
1206 break; 1233 break;
1207 } 1234 }
1208 if (sublen % BRCMF_SDALIGN) { 1235 if (sublen % align) {
1209 brcmf_err("sublen %d not multiple of %d\n", 1236 brcmf_err("sublen %d not multiple of %d\n",
1210 sublen, BRCMF_SDALIGN); 1237 sublen, align);
1211 } 1238 }
1212 totlen += sublen; 1239 totlen += sublen;
1213 1240
@@ -1220,7 +1247,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1220 } 1247 }
1221 1248
1222 /* Allocate/chain packet for next subframe */ 1249 /* Allocate/chain packet for next subframe */
1223 pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN); 1250 pnext = brcmu_pkt_buf_get_skb(sublen + align);
1224 if (pnext == NULL) { 1251 if (pnext == NULL) {
1225 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n", 1252 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1226 num, sublen); 1253 num, sublen);
@@ -1229,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1229 skb_queue_tail(&bus->glom, pnext); 1256 skb_queue_tail(&bus->glom, pnext);
1230 1257
1231 /* Adhere to start alignment requirements */ 1258 /* Adhere to start alignment requirements */
1232 pkt_align(pnext, sublen, BRCMF_SDALIGN); 1259 pkt_align(pnext, sublen, align);
1233 } 1260 }
1234 1261
1235 /* If all allocations succeeded, save packet chain 1262 /* If all allocations succeeded, save packet chain
@@ -1305,8 +1332,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1305 rd_new.seq_num = rxseq; 1332 rd_new.seq_num = rxseq;
1306 rd_new.len = dlen; 1333 rd_new.len = dlen;
1307 sdio_claim_host(bus->sdiodev->func[1]); 1334 sdio_claim_host(bus->sdiodev->func[1]);
1308 errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new, 1335 errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1309 BRCMF_SDIO_FT_SUPER); 1336 BRCMF_SDIO_FT_SUPER);
1310 sdio_release_host(bus->sdiodev->func[1]); 1337 sdio_release_host(bus->sdiodev->func[1]);
1311 bus->cur_read.len = rd_new.len_nxtfrm << 4; 1338 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1312 1339
@@ -1324,8 +1351,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1324 rd_new.len = pnext->len; 1351 rd_new.len = pnext->len;
1325 rd_new.seq_num = rxseq++; 1352 rd_new.seq_num = rxseq++;
1326 sdio_claim_host(bus->sdiodev->func[1]); 1353 sdio_claim_host(bus->sdiodev->func[1]);
1327 errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new, 1354 errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1328 BRCMF_SDIO_FT_SUB); 1355 BRCMF_SDIO_FT_SUB);
1329 sdio_release_host(bus->sdiodev->func[1]); 1356 sdio_release_host(bus->sdiodev->func[1]);
1330 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1357 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1331 pnext->data, 32, "subframe:\n"); 1358 pnext->data, 32, "subframe:\n");
@@ -1357,7 +1384,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1357 skb_queue_walk_safe(&bus->glom, pfirst, pnext) { 1384 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1358 dptr = (u8 *) (pfirst->data); 1385 dptr = (u8 *) (pfirst->data);
1359 sublen = get_unaligned_le16(dptr); 1386 sublen = get_unaligned_le16(dptr);
1360 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1387 doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1361 1388
1362 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), 1389 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1363 dptr, pfirst->len, 1390 dptr, pfirst->len,
@@ -1535,7 +1562,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1535 uint rxleft = 0; /* Remaining number of frames allowed */ 1562 uint rxleft = 0; /* Remaining number of frames allowed */
1536 int ret; /* Return code from calls */ 1563 int ret; /* Return code from calls */
1537 uint rxcount = 0; /* Total frames read */ 1564 uint rxcount = 0; /* Total frames read */
1538 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new; 1565 struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1539 u8 head_read = 0; 1566 u8 head_read = 0;
1540 1567
1541 brcmf_dbg(TRACE, "Enter\n"); 1568 brcmf_dbg(TRACE, "Enter\n");
@@ -1583,8 +1610,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1583 bus->rxhdr, SDPCM_HDRLEN, 1610 bus->rxhdr, SDPCM_HDRLEN,
1584 "RxHdr:\n"); 1611 "RxHdr:\n");
1585 1612
1586 if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd, 1613 if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1587 BRCMF_SDIO_FT_NORMAL)) { 1614 BRCMF_SDIO_FT_NORMAL)) {
1588 sdio_release_host(bus->sdiodev->func[1]); 1615 sdio_release_host(bus->sdiodev->func[1]);
1589 if (!bus->rxpending) 1616 if (!bus->rxpending)
1590 break; 1617 break;
@@ -1648,8 +1675,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1648 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN); 1675 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1649 rd_new.seq_num = rd->seq_num; 1676 rd_new.seq_num = rd->seq_num;
1650 sdio_claim_host(bus->sdiodev->func[1]); 1677 sdio_claim_host(bus->sdiodev->func[1]);
1651 if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new, 1678 if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
1652 BRCMF_SDIO_FT_NORMAL)) { 1679 BRCMF_SDIO_FT_NORMAL)) {
1653 rd->len = 0; 1680 rd->len = 0;
1654 brcmu_pkt_buf_free_skb(pkt); 1681 brcmu_pkt_buf_free_skb(pkt);
1655 } 1682 }
@@ -1693,7 +1720,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1693 1720
1694 /* Save superframe descriptor and allocate packet frame */ 1721 /* Save superframe descriptor and allocate packet frame */
1695 if (rd->channel == SDPCM_GLOM_CHANNEL) { 1722 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1696 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { 1723 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
1697 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n", 1724 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1698 rd->len); 1725 rd->len);
1699 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1726 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@@ -1759,85 +1786,168 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1759 return; 1786 return;
1760} 1787}
1761 1788
1789/* flag marking a dummy skb added for DMA alignment requirement */
1790#define DUMMY_SKB_FLAG 0x10000
1791/* bit mask of data length chopped from the previous packet */
1792#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
1793/**
1794 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1795 * @bus: brcmf_sdio structure pointer
1796 * @pktq: packet list pointer
1797 * @chan: virtual channel to transmit the packet
1798 *
1799 * Processes to be applied to the packet
1800 * - Align data buffer pointer
1801 * - Align data buffer length
1802 * - Prepare header
1803 * Return: negative value if there is error
1804 */
1805static int
1806brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
1807 uint chan)
1808{
1809 u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
1810 int ntail;
1811 struct sk_buff *pkt_next, *pkt_new;
1812 u8 *dat_buf;
1813 unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
1814 struct brcmf_sdio_hdrinfo hd_info = {0};
1815
1816 /* SDIO ADMA requires at least 32 bit alignment */
1817 head_align = 4;
1818 sg_align = 4;
1819 if (bus->sdiodev->pdata) {
1820 head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
1821 bus->sdiodev->pdata->sd_head_align : 4;
1822 sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
1823 bus->sdiodev->pdata->sd_sgentry_align : 4;
1824 }
1825 /* sg entry alignment should be a divisor of block size */
1826 WARN_ON(blksize % sg_align);
1827
1828 pkt_next = pktq->next;
1829 dat_buf = (u8 *)(pkt_next->data);
1830
1831 /* Check head padding */
1832 head_pad = ((unsigned long)dat_buf % head_align);
1833 if (head_pad) {
1834 if (skb_headroom(pkt_next) < head_pad) {
1835 bus->sdiodev->bus_if->tx_realloc++;
1836 head_pad = 0;
1837 if (skb_cow(pkt_next, head_pad))
1838 return -ENOMEM;
1839 }
1840 skb_push(pkt_next, head_pad);
1841 dat_buf = (u8 *)(pkt_next->data);
1842 memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
1843 }
1844
1845 /* Check tail padding */
1846 pkt_new = NULL;
1847 tail_chop = pkt_next->len % sg_align;
1848 tail_pad = sg_align - tail_chop;
1849 tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
1850 if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
1851 pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
1852 if (pkt_new == NULL)
1853 return -ENOMEM;
1854 memcpy(pkt_new->data,
1855 pkt_next->data + pkt_next->len - tail_chop,
1856 tail_chop);
1857 *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
1858 skb_trim(pkt_next, pkt_next->len - tail_chop);
1859 __skb_queue_after(pktq, pkt_next, pkt_new);
1860 } else {
1861 ntail = pkt_next->data_len + tail_pad -
1862 (pkt_next->end - pkt_next->tail);
1863 if (skb_cloned(pkt_next) || ntail > 0)
1864 if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
1865 return -ENOMEM;
1866 if (skb_linearize(pkt_next))
1867 return -ENOMEM;
1868 dat_buf = (u8 *)(pkt_next->data);
1869 __skb_put(pkt_next, tail_pad);
1870 }
1871
1872 /* Now prep the header */
1873 if (pkt_new)
1874 hd_info.len = pkt_next->len + tail_chop;
1875 else
1876 hd_info.len = pkt_next->len - tail_pad;
1877 hd_info.channel = chan;
1878 hd_info.dat_offset = head_pad + bus->tx_hdrlen;
1879 brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
1880
1881 if (BRCMF_BYTES_ON() &&
1882 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
1883 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
1884 brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
1885 else if (BRCMF_HDRS_ON())
1886 brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
1887 "Tx Header:\n");
1888
1889 return 0;
1890}
1891
1892/**
1893 * brcmf_sdio_txpkt_postp - packet post processing for transmit
1894 * @bus: brcmf_sdio structure pointer
1895 * @pktq: packet list pointer
1896 *
1897 * Processes to be applied to the packet
1898 * - Remove head padding
1899 * - Remove tail padding
1900 */
1901static void
1902brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
1903{
1904 u8 *hdr;
1905 u32 dat_offset;
1906 u32 dummy_flags, chop_len;
1907 struct sk_buff *pkt_next, *tmp, *pkt_prev;
1908
1909 skb_queue_walk_safe(pktq, pkt_next, tmp) {
1910 dummy_flags = *(u32 *)(pkt_next->cb);
1911 if (dummy_flags & DUMMY_SKB_FLAG) {
1912 chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
1913 if (chop_len) {
1914 pkt_prev = pkt_next->prev;
1915 memcpy(pkt_prev->data + pkt_prev->len,
1916 pkt_next->data, chop_len);
1917 skb_put(pkt_prev, chop_len);
1918 }
1919 __skb_unlink(pkt_next, pktq);
1920 brcmu_pkt_buf_free_skb(pkt_next);
1921 } else {
1922 hdr = pkt_next->data + SDPCM_HWHDR_LEN;
1923 dat_offset = le32_to_cpu(*(__le32 *)hdr);
1924 dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
1925 SDPCM_DOFFSET_SHIFT;
1926 skb_pull(pkt_next, dat_offset);
1927 }
1928 }
1929}
1930
1762/* Writes a HW/SW header into the packet and sends it. */ 1931/* Writes a HW/SW header into the packet and sends it. */
1763/* Assumes: (a) header space already there, (b) caller holds lock */ 1932/* Assumes: (a) header space already there, (b) caller holds lock */
1764static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, 1933static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1765 uint chan) 1934 uint chan)
1766{ 1935{
1767 int ret; 1936 int ret;
1768 u8 *frame;
1769 u16 len, pad = 0;
1770 u32 swheader;
1771 int i; 1937 int i;
1938 struct sk_buff_head localq;
1772 1939
1773 brcmf_dbg(TRACE, "Enter\n"); 1940 brcmf_dbg(TRACE, "Enter\n");
1774 1941
1775 frame = (u8 *) (pkt->data); 1942 __skb_queue_head_init(&localq);
1776 1943 __skb_queue_tail(&localq, pkt);
1777 /* Add alignment padding, allocate new packet if needed */ 1944 ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
1778 pad = ((unsigned long)frame % BRCMF_SDALIGN); 1945 if (ret)
1779 if (pad) { 1946 goto done;
1780 if (skb_headroom(pkt) < pad) {
1781 brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
1782 skb_headroom(pkt), pad);
1783 bus->sdiodev->bus_if->tx_realloc++;
1784 ret = skb_cow(pkt, BRCMF_SDALIGN);
1785 if (ret)
1786 goto done;
1787 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1788 }
1789 skb_push(pkt, pad);
1790 frame = (u8 *) (pkt->data);
1791 memset(frame, 0, pad + SDPCM_HDRLEN);
1792 }
1793 /* precondition: pad < BRCMF_SDALIGN */
1794
1795 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
1796 len = (u16) (pkt->len);
1797 *(__le16 *) frame = cpu_to_le16(len);
1798 *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
1799
1800 /* Software tag: channel, sequence number, data offset */
1801 swheader =
1802 ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
1803 (((pad +
1804 SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
1805
1806 *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
1807 *(((__le32 *) frame) + 2) = 0;
1808
1809#ifdef DEBUG
1810 tx_packets[pkt->priority]++;
1811#endif
1812
1813 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
1814 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
1815 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
1816 frame, len, "Tx Frame:\n");
1817 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1818 ((BRCMF_CTL_ON() &&
1819 chan == SDPCM_CONTROL_CHANNEL) ||
1820 (BRCMF_DATA_ON() &&
1821 chan != SDPCM_CONTROL_CHANNEL))) &&
1822 BRCMF_HDRS_ON(),
1823 frame, min_t(u16, len, 16), "TxHdr:\n");
1824
1825 /* Raise len to next SDIO block to eliminate tail command */
1826 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
1827 u16 pad = bus->blocksize - (len % bus->blocksize);
1828 if ((pad <= bus->roundup) && (pad < bus->blocksize))
1829 len += pad;
1830 } else if (len % BRCMF_SDALIGN) {
1831 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
1832 }
1833
1834 /* Some controllers have trouble with odd bytes -- round to even */
1835 if (len & (ALIGNMENT - 1))
1836 len = roundup(len, ALIGNMENT);
1837 1947
1838 sdio_claim_host(bus->sdiodev->func[1]); 1948 sdio_claim_host(bus->sdiodev->func[1]);
1839 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1949 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1840 SDIO_FUNC_2, F2SYNC, pkt); 1950 SDIO_FUNC_2, F2SYNC, &localq);
1841 bus->sdcnt.f2txdata++; 1951 bus->sdcnt.f2txdata++;
1842 1952
1843 if (ret < 0) { 1953 if (ret < 0) {
@@ -1865,11 +1975,11 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1865 } 1975 }
1866 sdio_release_host(bus->sdiodev->func[1]); 1976 sdio_release_host(bus->sdiodev->func[1]);
1867 if (ret == 0) 1977 if (ret == 0)
1868 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 1978 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
1869 1979
1870done: 1980done:
1871 /* restore pkt buffer pointer before calling tx complete routine */ 1981 brcmf_sdio_txpkt_postp(bus, &localq);
1872 skb_pull(pkt, SDPCM_HDRLEN + pad); 1982 __skb_dequeue_tail(&localq);
1873 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0); 1983 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
1874 return ret; 1984 return ret;
1875} 1985}
@@ -1880,7 +1990,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1880 u32 intstatus = 0; 1990 u32 intstatus = 0;
1881 int ret = 0, prec_out; 1991 int ret = 0, prec_out;
1882 uint cnt = 0; 1992 uint cnt = 0;
1883 uint datalen;
1884 u8 tx_prec_map; 1993 u8 tx_prec_map;
1885 1994
1886 brcmf_dbg(TRACE, "Enter\n"); 1995 brcmf_dbg(TRACE, "Enter\n");
@@ -1896,7 +2005,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1896 break; 2005 break;
1897 } 2006 }
1898 spin_unlock_bh(&bus->txqlock); 2007 spin_unlock_bh(&bus->txqlock);
1899 datalen = pkt->len - SDPCM_HDRLEN;
1900 2008
1901 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL); 2009 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
1902 2010
@@ -2221,7 +2329,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2221 } 2329 }
2222 2330
2223 } else { 2331 } else {
2224 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 2332 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2225 } 2333 }
2226 sdio_release_host(bus->sdiodev->func[1]); 2334 sdio_release_host(bus->sdiodev->func[1]);
2227 bus->ctrl_frame_stat = false; 2335 bus->ctrl_frame_stat = false;
@@ -2276,13 +2384,14 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2276 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2384 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2277 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2385 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2278 struct brcmf_sdio *bus = sdiodev->bus; 2386 struct brcmf_sdio *bus = sdiodev->bus;
2387 ulong flags;
2279 2388
2280 brcmf_dbg(TRACE, "Enter\n"); 2389 brcmf_dbg(TRACE, "Enter\n");
2281 2390
2282 datalen = pkt->len; 2391 datalen = pkt->len;
2283 2392
2284 /* Add space for the header */ 2393 /* Add space for the header */
2285 skb_push(pkt, SDPCM_HDRLEN); 2394 skb_push(pkt, bus->tx_hdrlen);
2286 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */ 2395 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2287 2396
2288 prec = prio2prec((pkt->priority & PRIOMASK)); 2397 prec = prio2prec((pkt->priority & PRIOMASK));
@@ -2293,10 +2402,9 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2293 bus->sdcnt.fcqueued++; 2402 bus->sdcnt.fcqueued++;
2294 2403
2295 /* Priority based enq */ 2404 /* Priority based enq */
2296 spin_lock_bh(&bus->txqlock); 2405 spin_lock_irqsave(&bus->txqlock, flags);
2297 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { 2406 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2298 skb_pull(pkt, SDPCM_HDRLEN); 2407 skb_pull(pkt, bus->tx_hdrlen);
2299 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2300 brcmf_err("out of bus->txq !!!\n"); 2408 brcmf_err("out of bus->txq !!!\n");
2301 ret = -ENOSR; 2409 ret = -ENOSR;
2302 } else { 2410 } else {
@@ -2307,7 +2415,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2307 bus->txoff = true; 2415 bus->txoff = true;
2308 brcmf_txflowblock(bus->sdiodev->dev, true); 2416 brcmf_txflowblock(bus->sdiodev->dev, true);
2309 } 2417 }
2310 spin_unlock_bh(&bus->txqlock); 2418 spin_unlock_irqrestore(&bus->txqlock, flags);
2311 2419
2312#ifdef DEBUG 2420#ifdef DEBUG
2313 if (pktq_plen(&bus->txq, prec) > qcount[prec]) 2421 if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2436,7 +2544,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2436 return ret; 2544 return ret;
2437 } 2545 }
2438 2546
2439 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 2547 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2440 2548
2441 return ret; 2549 return ret;
2442} 2550}
@@ -2446,19 +2554,19 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2446{ 2554{
2447 u8 *frame; 2555 u8 *frame;
2448 u16 len; 2556 u16 len;
2449 u32 swheader;
2450 uint retries = 0; 2557 uint retries = 0;
2451 u8 doff = 0; 2558 u8 doff = 0;
2452 int ret = -1; 2559 int ret = -1;
2453 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2560 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2454 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2561 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2455 struct brcmf_sdio *bus = sdiodev->bus; 2562 struct brcmf_sdio *bus = sdiodev->bus;
2563 struct brcmf_sdio_hdrinfo hd_info = {0};
2456 2564
2457 brcmf_dbg(TRACE, "Enter\n"); 2565 brcmf_dbg(TRACE, "Enter\n");
2458 2566
2459 /* Back the pointer to make a room for bus header */ 2567 /* Back the pointer to make a room for bus header */
2460 frame = msg - SDPCM_HDRLEN; 2568 frame = msg - bus->tx_hdrlen;
2461 len = (msglen += SDPCM_HDRLEN); 2569 len = (msglen += bus->tx_hdrlen);
2462 2570
2463 /* Add alignment padding (optional for ctl frames) */ 2571 /* Add alignment padding (optional for ctl frames) */
2464 doff = ((unsigned long)frame % BRCMF_SDALIGN); 2572 doff = ((unsigned long)frame % BRCMF_SDALIGN);
@@ -2466,10 +2574,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2466 frame -= doff; 2574 frame -= doff;
2467 len += doff; 2575 len += doff;
2468 msglen += doff; 2576 msglen += doff;
2469 memset(frame, 0, doff + SDPCM_HDRLEN); 2577 memset(frame, 0, doff + bus->tx_hdrlen);
2470 } 2578 }
2471 /* precondition: doff < BRCMF_SDALIGN */ 2579 /* precondition: doff < BRCMF_SDALIGN */
2472 doff += SDPCM_HDRLEN; 2580 doff += bus->tx_hdrlen;
2473 2581
2474 /* Round send length to next SDIO block */ 2582 /* Round send length to next SDIO block */
2475 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { 2583 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
@@ -2491,18 +2599,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2491 brcmf_sdbrcm_bus_sleep(bus, false, false); 2599 brcmf_sdbrcm_bus_sleep(bus, false, false);
2492 sdio_release_host(bus->sdiodev->func[1]); 2600 sdio_release_host(bus->sdiodev->func[1]);
2493 2601
2494 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ 2602 hd_info.len = (u16)msglen;
2495 *(__le16 *) frame = cpu_to_le16((u16) msglen); 2603 hd_info.channel = SDPCM_CONTROL_CHANNEL;
2496 *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen); 2604 hd_info.dat_offset = doff;
2497 2605 brcmf_sdio_hdpack(bus, frame, &hd_info);
2498 /* Software tag: channel, sequence number, data offset */
2499 swheader =
2500 ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
2501 SDPCM_CHANNEL_MASK)
2502 | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
2503 SDPCM_DOFFSET_MASK);
2504 put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
2505 put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
2506 2606
2507 if (!data_ok(bus)) { 2607 if (!data_ok(bus)) {
2508 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n", 2608 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
@@ -3733,7 +3833,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3733 struct brcmf_sdio *bus; 3833 struct brcmf_sdio *bus;
3734 struct brcmf_bus_dcmd *dlst; 3834 struct brcmf_bus_dcmd *dlst;
3735 u32 dngl_txglom; 3835 u32 dngl_txglom;
3736 u32 dngl_txglomalign; 3836 u32 txglomalign = 0;
3737 u8 idx; 3837 u8 idx;
3738 3838
3739 brcmf_dbg(TRACE, "Enter\n"); 3839 brcmf_dbg(TRACE, "Enter\n");
@@ -3752,7 +3852,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3752 bus->txbound = BRCMF_TXBOUND; 3852 bus->txbound = BRCMF_TXBOUND;
3753 bus->rxbound = BRCMF_RXBOUND; 3853 bus->rxbound = BRCMF_RXBOUND;
3754 bus->txminmax = BRCMF_TXMINMAX; 3854 bus->txminmax = BRCMF_TXMINMAX;
3755 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; 3855 bus->tx_seq = SDPCM_SEQ_WRAP - 1;
3756 3856
3757 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker); 3857 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3758 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq"); 3858 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
@@ -3794,8 +3894,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3794 bus->sdiodev->bus_if->chip = bus->ci->chip; 3894 bus->sdiodev->bus_if->chip = bus->ci->chip;
3795 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev; 3895 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
3796 3896
3797 /* Attach to the brcmf/OS/network interface */ 3897 /* default sdio bus header length for tx packet */
3798 ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev); 3898 bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3899
3900 /* Attach to the common layer, reserve hdr space */
3901 ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
3799 if (ret != 0) { 3902 if (ret != 0) {
3800 brcmf_err("brcmf_attach failed\n"); 3903 brcmf_err("brcmf_attach failed\n");
3801 goto fail; 3904 goto fail;
@@ -3827,9 +3930,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3827 dlst->param_len = sizeof(u32); 3930 dlst->param_len = sizeof(u32);
3828 } else { 3931 } else {
3829 /* otherwise, set txglomalign */ 3932 /* otherwise, set txglomalign */
3830 dngl_txglomalign = bus->sdiodev->bus_if->align; 3933 if (sdiodev->pdata)
3934 txglomalign = sdiodev->pdata->sd_sgentry_align;
3935 /* SDIO ADMA requires at least 32 bit alignment */
3936 if (txglomalign < 4)
3937 txglomalign = 4;
3831 dlst->name = "bus:txglomalign"; 3938 dlst->name = "bus:txglomalign";
3832 dlst->param = (char *)&dngl_txglomalign; 3939 dlst->param = (char *)&txglomalign;
3833 dlst->param_len = sizeof(u32); 3940 dlst->param_len = sizeof(u32);
3834 } 3941 }
3835 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list); 3942 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 83ee53a7c76e..fad77dd2a3a5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -185,6 +185,10 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
185 ifevent->action, ifevent->ifidx, ifevent->bssidx, 185 ifevent->action, ifevent->ifidx, ifevent->bssidx,
186 ifevent->flags, ifevent->role); 186 ifevent->flags, ifevent->role);
187 187
188 if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
189 brcmf_dbg(EVENT, "event can be ignored\n");
190 return;
191 }
188 if (ifevent->ifidx >= BRCMF_MAX_IFS) { 192 if (ifevent->ifidx >= BRCMF_MAX_IFS) {
189 brcmf_err("invalid interface index: %u\n", 193 brcmf_err("invalid interface index: %u\n",
190 ifevent->ifidx); 194 ifevent->ifidx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 665ef69e974b..ecabb04f33c3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -69,4 +69,25 @@ struct brcmf_fil_bss_enable_le {
69 __le32 enable; 69 __le32 enable;
70}; 70};
71 71
72/**
73 * struct tdls_iovar - common structure for tdls iovars.
74 *
75 * @ea: ether address of peer station.
76 * @mode: mode value depending on specific tdls iovar.
77 * @chanspec: channel specification.
78 * @pad: unused (for future use).
79 */
80struct brcmf_tdls_iovar_le {
81 u8 ea[ETH_ALEN]; /* Station address */
82 u8 mode; /* mode: depends on iovar */
83 __le16 chanspec;
84 __le32 pad; /* future */
85};
86
87enum brcmf_tdls_manual_ep_ops {
88 BRCMF_TDLS_MANUAL_EP_CREATE = 1,
89 BRCMF_TDLS_MANUAL_EP_DELETE = 3,
90 BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
91};
92
72#endif /* FWIL_TYPES_H_ */ 93#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 29b1f24c2d0f..82f9140f3d35 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -422,9 +422,12 @@ struct brcmf_fws_macdesc_table {
422 422
423struct brcmf_fws_info { 423struct brcmf_fws_info {
424 struct brcmf_pub *drvr; 424 struct brcmf_pub *drvr;
425 spinlock_t spinlock;
426 ulong flags;
425 struct brcmf_fws_stats stats; 427 struct brcmf_fws_stats stats;
426 struct brcmf_fws_hanger hanger; 428 struct brcmf_fws_hanger hanger;
427 enum brcmf_fws_fcmode fcmode; 429 enum brcmf_fws_fcmode fcmode;
430 bool fw_signals;
428 bool bcmc_credit_check; 431 bool bcmc_credit_check;
429 struct brcmf_fws_macdesc_table desc; 432 struct brcmf_fws_macdesc_table desc;
430 struct workqueue_struct *fws_wq; 433 struct workqueue_struct *fws_wq;
@@ -483,6 +486,18 @@ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
483} 486}
484#undef BRCMF_FWS_TLV_DEF 487#undef BRCMF_FWS_TLV_DEF
485 488
489static void brcmf_fws_lock(struct brcmf_fws_info *fws)
490 __acquires(&fws->spinlock)
491{
492 spin_lock_irqsave(&fws->spinlock, fws->flags);
493}
494
495static void brcmf_fws_unlock(struct brcmf_fws_info *fws)
496 __releases(&fws->spinlock)
497{
498 spin_unlock_irqrestore(&fws->spinlock, fws->flags);
499}
500
486static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) 501static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
487{ 502{
488 u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); 503 u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
@@ -869,8 +884,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
869 skcb->state = BRCMF_FWS_SKBSTATE_TIM; 884 skcb->state = BRCMF_FWS_SKBSTATE_TIM;
870 bus = fws->drvr->bus_if; 885 bus = fws->drvr->bus_if;
871 err = brcmf_fws_hdrpush(fws, skb); 886 err = brcmf_fws_hdrpush(fws, skb);
872 if (err == 0) 887 if (err == 0) {
888 brcmf_fws_unlock(fws);
873 err = brcmf_bus_txdata(bus, skb); 889 err = brcmf_bus_txdata(bus, skb);
890 brcmf_fws_lock(fws);
891 }
874 if (err) 892 if (err)
875 brcmu_pkt_buf_free_skb(skb); 893 brcmu_pkt_buf_free_skb(skb);
876 return true; 894 return true;
@@ -905,26 +923,10 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
905 return 0; 923 return 0;
906} 924}
907 925
908/* using macro so sparse checking does not complain
909 * about locking imbalance.
910 */
911#define brcmf_fws_lock(drvr, flags) \
912do { \
913 flags = 0; \
914 spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
915} while (0)
916
917/* using macro so sparse checking does not complain
918 * about locking imbalance.
919 */
920#define brcmf_fws_unlock(drvr, flags) \
921 spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
922
923static 926static
924int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) 927int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
925{ 928{
926 struct brcmf_fws_mac_descriptor *entry, *existing; 929 struct brcmf_fws_mac_descriptor *entry, *existing;
927 ulong flags;
928 u8 mac_handle; 930 u8 mac_handle;
929 u8 ifidx; 931 u8 ifidx;
930 u8 *addr; 932 u8 *addr;
@@ -938,10 +940,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
938 if (entry->occupied) { 940 if (entry->occupied) {
939 brcmf_dbg(TRACE, "deleting %s mac %pM\n", 941 brcmf_dbg(TRACE, "deleting %s mac %pM\n",
940 entry->name, addr); 942 entry->name, addr);
941 brcmf_fws_lock(fws->drvr, flags); 943 brcmf_fws_lock(fws);
942 brcmf_fws_macdesc_cleanup(fws, entry, -1); 944 brcmf_fws_macdesc_cleanup(fws, entry, -1);
943 brcmf_fws_macdesc_deinit(entry); 945 brcmf_fws_macdesc_deinit(entry);
944 brcmf_fws_unlock(fws->drvr, flags); 946 brcmf_fws_unlock(fws);
945 } else 947 } else
946 fws->stats.mac_update_failed++; 948 fws->stats.mac_update_failed++;
947 return 0; 949 return 0;
@@ -950,13 +952,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
950 existing = brcmf_fws_macdesc_lookup(fws, addr); 952 existing = brcmf_fws_macdesc_lookup(fws, addr);
951 if (IS_ERR(existing)) { 953 if (IS_ERR(existing)) {
952 if (!entry->occupied) { 954 if (!entry->occupied) {
953 brcmf_fws_lock(fws->drvr, flags); 955 brcmf_fws_lock(fws);
954 entry->mac_handle = mac_handle; 956 entry->mac_handle = mac_handle;
955 brcmf_fws_macdesc_init(entry, addr, ifidx); 957 brcmf_fws_macdesc_init(entry, addr, ifidx);
956 brcmf_fws_macdesc_set_name(fws, entry); 958 brcmf_fws_macdesc_set_name(fws, entry);
957 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, 959 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
958 BRCMF_FWS_PSQ_LEN); 960 BRCMF_FWS_PSQ_LEN);
959 brcmf_fws_unlock(fws->drvr, flags); 961 brcmf_fws_unlock(fws);
960 brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); 962 brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
961 } else { 963 } else {
962 fws->stats.mac_update_failed++; 964 fws->stats.mac_update_failed++;
@@ -964,13 +966,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
964 } else { 966 } else {
965 if (entry != existing) { 967 if (entry != existing) {
966 brcmf_dbg(TRACE, "copy mac %s\n", existing->name); 968 brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
967 brcmf_fws_lock(fws->drvr, flags); 969 brcmf_fws_lock(fws);
968 memcpy(entry, existing, 970 memcpy(entry, existing,
969 offsetof(struct brcmf_fws_mac_descriptor, psq)); 971 offsetof(struct brcmf_fws_mac_descriptor, psq));
970 entry->mac_handle = mac_handle; 972 entry->mac_handle = mac_handle;
971 brcmf_fws_macdesc_deinit(existing); 973 brcmf_fws_macdesc_deinit(existing);
972 brcmf_fws_macdesc_set_name(fws, entry); 974 brcmf_fws_macdesc_set_name(fws, entry);
973 brcmf_fws_unlock(fws->drvr, flags); 975 brcmf_fws_unlock(fws);
974 brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, 976 brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
975 addr); 977 addr);
976 } else { 978 } else {
@@ -986,7 +988,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
986 u8 type, u8 *data) 988 u8 type, u8 *data)
987{ 989{
988 struct brcmf_fws_mac_descriptor *entry; 990 struct brcmf_fws_mac_descriptor *entry;
989 ulong flags;
990 u8 mac_handle; 991 u8 mac_handle;
991 int ret; 992 int ret;
992 993
@@ -996,7 +997,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
996 fws->stats.mac_ps_update_failed++; 997 fws->stats.mac_ps_update_failed++;
997 return -ESRCH; 998 return -ESRCH;
998 } 999 }
999 brcmf_fws_lock(fws->drvr, flags); 1000 brcmf_fws_lock(fws);
1000 /* a state update should wipe old credits */ 1001 /* a state update should wipe old credits */
1001 entry->requested_credit = 0; 1002 entry->requested_credit = 0;
1002 entry->requested_packet = 0; 1003 entry->requested_packet = 0;
@@ -1011,7 +1012,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
1011 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); 1012 brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
1012 ret = BRCMF_FWS_RET_OK_NOSCHEDULE; 1013 ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
1013 } 1014 }
1014 brcmf_fws_unlock(fws->drvr, flags); 1015 brcmf_fws_unlock(fws);
1015 return ret; 1016 return ret;
1016} 1017}
1017 1018
@@ -1019,7 +1020,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
1019 u8 type, u8 *data) 1020 u8 type, u8 *data)
1020{ 1021{
1021 struct brcmf_fws_mac_descriptor *entry; 1022 struct brcmf_fws_mac_descriptor *entry;
1022 ulong flags;
1023 u8 ifidx; 1023 u8 ifidx;
1024 int ret; 1024 int ret;
1025 1025
@@ -1038,7 +1038,7 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
1038 1038
1039 brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type, 1039 brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
1040 entry->name); 1040 entry->name);
1041 brcmf_fws_lock(fws->drvr, flags); 1041 brcmf_fws_lock(fws);
1042 switch (type) { 1042 switch (type) {
1043 case BRCMF_FWS_TYPE_INTERFACE_OPEN: 1043 case BRCMF_FWS_TYPE_INTERFACE_OPEN:
1044 entry->state = BRCMF_FWS_STATE_OPEN; 1044 entry->state = BRCMF_FWS_STATE_OPEN;
@@ -1050,10 +1050,10 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
1050 break; 1050 break;
1051 default: 1051 default:
1052 ret = -EINVAL; 1052 ret = -EINVAL;
1053 brcmf_fws_unlock(fws->drvr, flags); 1053 brcmf_fws_unlock(fws);
1054 goto fail; 1054 goto fail;
1055 } 1055 }
1056 brcmf_fws_unlock(fws->drvr, flags); 1056 brcmf_fws_unlock(fws);
1057 return ret; 1057 return ret;
1058 1058
1059fail: 1059fail:
@@ -1065,7 +1065,6 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
1065 u8 *data) 1065 u8 *data)
1066{ 1066{
1067 struct brcmf_fws_mac_descriptor *entry; 1067 struct brcmf_fws_mac_descriptor *entry;
1068 ulong flags;
1069 1068
1070 entry = &fws->desc.nodes[data[1] & 0x1F]; 1069 entry = &fws->desc.nodes[data[1] & 0x1F];
1071 if (!entry->occupied) { 1070 if (!entry->occupied) {
@@ -1079,14 +1078,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
1079 brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", 1078 brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
1080 brcmf_fws_get_tlv_name(type), type, entry->name, 1079 brcmf_fws_get_tlv_name(type), type, entry->name,
1081 data[0], data[2]); 1080 data[0], data[2]);
1082 brcmf_fws_lock(fws->drvr, flags); 1081 brcmf_fws_lock(fws);
1083 if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) 1082 if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
1084 entry->requested_credit = data[0]; 1083 entry->requested_credit = data[0];
1085 else 1084 else
1086 entry->requested_packet = data[0]; 1085 entry->requested_packet = data[0];
1087 1086
1088 entry->ac_bitmap = data[2]; 1087 entry->ac_bitmap = data[2];
1089 brcmf_fws_unlock(fws->drvr, flags); 1088 brcmf_fws_unlock(fws);
1090 return BRCMF_FWS_RET_OK_SCHEDULE; 1089 return BRCMF_FWS_RET_OK_SCHEDULE;
1091} 1090}
1092 1091
@@ -1160,7 +1159,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
1160static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) 1159static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
1161{ 1160{
1162 /* only schedule dequeue when there are credits for delayed traffic */ 1161 /* only schedule dequeue when there are credits for delayed traffic */
1163 if (fws->fifo_credit_map & fws->fifo_delay_map) 1162 if ((fws->fifo_credit_map & fws->fifo_delay_map) ||
1163 (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map))
1164 queue_work(fws->fws_wq, &fws->fws_dequeue_work); 1164 queue_work(fws->fws_wq, &fws->fws_dequeue_work);
1165} 1165}
1166 1166
@@ -1383,7 +1383,6 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1383static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, 1383static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
1384 u8 *data) 1384 u8 *data)
1385{ 1385{
1386 ulong flags;
1387 int i; 1386 int i;
1388 1387
1389 if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) { 1388 if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@@ -1392,19 +1391,18 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
1392 } 1391 }
1393 1392
1394 brcmf_dbg(DATA, "enter: data %pM\n", data); 1393 brcmf_dbg(DATA, "enter: data %pM\n", data);
1395 brcmf_fws_lock(fws->drvr, flags); 1394 brcmf_fws_lock(fws);
1396 for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) 1395 for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
1397 brcmf_fws_return_credits(fws, i, data[i]); 1396 brcmf_fws_return_credits(fws, i, data[i]);
1398 1397
1399 brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, 1398 brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
1400 fws->fifo_delay_map); 1399 fws->fifo_delay_map);
1401 brcmf_fws_unlock(fws->drvr, flags); 1400 brcmf_fws_unlock(fws);
1402 return BRCMF_FWS_RET_OK_SCHEDULE; 1401 return BRCMF_FWS_RET_OK_SCHEDULE;
1403} 1402}
1404 1403
1405static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) 1404static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
1406{ 1405{
1407 ulong lflags;
1408 __le32 status_le; 1406 __le32 status_le;
1409 u32 status; 1407 u32 status;
1410 u32 hslot; 1408 u32 hslot;
@@ -1418,9 +1416,9 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
1418 hslot = brcmf_txstatus_get_field(status, HSLOT); 1416 hslot = brcmf_txstatus_get_field(status, HSLOT);
1419 genbit = brcmf_txstatus_get_field(status, GENERATION); 1417 genbit = brcmf_txstatus_get_field(status, GENERATION);
1420 1418
1421 brcmf_fws_lock(fws->drvr, lflags); 1419 brcmf_fws_lock(fws);
1422 brcmf_fws_txs_process(fws, flags, hslot, genbit); 1420 brcmf_fws_txs_process(fws, flags, hslot, genbit);
1423 brcmf_fws_unlock(fws->drvr, lflags); 1421 brcmf_fws_unlock(fws);
1424 return BRCMF_FWS_RET_OK_NOSCHEDULE; 1422 return BRCMF_FWS_RET_OK_NOSCHEDULE;
1425} 1423}
1426 1424
@@ -1440,7 +1438,6 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1440{ 1438{
1441 struct brcmf_fws_info *fws = ifp->drvr->fws; 1439 struct brcmf_fws_info *fws = ifp->drvr->fws;
1442 int i; 1440 int i;
1443 ulong flags;
1444 u8 *credits = data; 1441 u8 *credits = data;
1445 1442
1446 if (e->datalen < BRCMF_FWS_FIFO_COUNT) { 1443 if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
@@ -1453,7 +1450,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1453 fws->creditmap_received = true; 1450 fws->creditmap_received = true;
1454 1451
1455 brcmf_dbg(TRACE, "enter: credits %pM\n", credits); 1452 brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
1456 brcmf_fws_lock(ifp->drvr, flags); 1453 brcmf_fws_lock(fws);
1457 for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) { 1454 for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
1458 if (*credits) 1455 if (*credits)
1459 fws->fifo_credit_map |= 1 << i; 1456 fws->fifo_credit_map |= 1 << i;
@@ -1462,7 +1459,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1462 fws->fifo_credit[i] = *credits++; 1459 fws->fifo_credit[i] = *credits++;
1463 } 1460 }
1464 brcmf_fws_schedule_deq(fws); 1461 brcmf_fws_schedule_deq(fws);
1465 brcmf_fws_unlock(ifp->drvr, flags); 1462 brcmf_fws_unlock(fws);
1466 return 0; 1463 return 0;
1467} 1464}
1468 1465
@@ -1471,18 +1468,18 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
1471 void *data) 1468 void *data)
1472{ 1469{
1473 struct brcmf_fws_info *fws = ifp->drvr->fws; 1470 struct brcmf_fws_info *fws = ifp->drvr->fws;
1474 ulong flags;
1475 1471
1476 brcmf_fws_lock(ifp->drvr, flags); 1472 brcmf_fws_lock(fws);
1477 if (fws) 1473 if (fws)
1478 fws->bcmc_credit_check = true; 1474 fws->bcmc_credit_check = true;
1479 brcmf_fws_unlock(ifp->drvr, flags); 1475 brcmf_fws_unlock(fws);
1480 return 0; 1476 return 0;
1481} 1477}
1482 1478
1483int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, 1479int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1484 struct sk_buff *skb) 1480 struct sk_buff *skb)
1485{ 1481{
1482 struct brcmf_skb_reorder_data *rd;
1486 struct brcmf_fws_info *fws = drvr->fws; 1483 struct brcmf_fws_info *fws = drvr->fws;
1487 u8 *signal_data; 1484 u8 *signal_data;
1488 s16 data_len; 1485 s16 data_len;
@@ -1497,8 +1494,10 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1497 1494
1498 WARN_ON(signal_len > skb->len); 1495 WARN_ON(signal_len > skb->len);
1499 1496
1497 if (!signal_len)
1498 return 0;
1500 /* if flow control disabled, skip to packet data and leave */ 1499 /* if flow control disabled, skip to packet data and leave */
1501 if (!signal_len || !drvr->fw_signals) { 1500 if (!fws->fw_signals) {
1502 skb_pull(skb, signal_len); 1501 skb_pull(skb, signal_len);
1503 return 0; 1502 return 0;
1504 } 1503 }
@@ -1536,9 +1535,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1536 1535
1537 err = BRCMF_FWS_RET_OK_NOSCHEDULE; 1536 err = BRCMF_FWS_RET_OK_NOSCHEDULE;
1538 switch (type) { 1537 switch (type) {
1539 case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
1540 case BRCMF_FWS_TYPE_COMP_TXSTATUS: 1538 case BRCMF_FWS_TYPE_COMP_TXSTATUS:
1541 break; 1539 break;
1540 case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
1541 rd = (struct brcmf_skb_reorder_data *)skb->cb;
1542 rd->reorder = data;
1543 break;
1542 case BRCMF_FWS_TYPE_MACDESC_ADD: 1544 case BRCMF_FWS_TYPE_MACDESC_ADD:
1543 case BRCMF_FWS_TYPE_MACDESC_DEL: 1545 case BRCMF_FWS_TYPE_MACDESC_DEL:
1544 brcmf_fws_macdesc_indicate(fws, type, data); 1546 brcmf_fws_macdesc_indicate(fws, type, data);
@@ -1694,17 +1696,22 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
1694 return PTR_ERR(entry); 1696 return PTR_ERR(entry);
1695 1697
1696 brcmf_fws_precommit_skb(fws, fifo, skb); 1698 brcmf_fws_precommit_skb(fws, fifo, skb);
1699 entry->transit_count++;
1700 if (entry->suppressed)
1701 entry->suppr_transit_count++;
1702 brcmf_fws_unlock(fws);
1697 rc = brcmf_bus_txdata(bus, skb); 1703 rc = brcmf_bus_txdata(bus, skb);
1704 brcmf_fws_lock(fws);
1698 brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, 1705 brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
1699 skcb->if_flags, skcb->htod, rc); 1706 skcb->if_flags, skcb->htod, rc);
1700 if (rc < 0) { 1707 if (rc < 0) {
1708 entry->transit_count--;
1709 if (entry->suppressed)
1710 entry->suppr_transit_count--;
1701 brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); 1711 brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
1702 goto rollback; 1712 goto rollback;
1703 } 1713 }
1704 1714
1705 entry->transit_count++;
1706 if (entry->suppressed)
1707 entry->suppr_transit_count++;
1708 fws->stats.pkt2bus++; 1715 fws->stats.pkt2bus++;
1709 fws->stats.send_pkts[fifo]++; 1716 fws->stats.send_pkts[fifo]++;
1710 if (brcmf_skb_if_flags_get_field(skb, REQUESTED)) 1717 if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
@@ -1741,11 +1748,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1741 struct brcmf_fws_info *fws = drvr->fws; 1748 struct brcmf_fws_info *fws = drvr->fws;
1742 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); 1749 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
1743 struct ethhdr *eh = (struct ethhdr *)(skb->data); 1750 struct ethhdr *eh = (struct ethhdr *)(skb->data);
1744 ulong flags;
1745 int fifo = BRCMF_FWS_FIFO_BCMC; 1751 int fifo = BRCMF_FWS_FIFO_BCMC;
1746 bool multicast = is_multicast_ether_addr(eh->h_dest); 1752 bool multicast = is_multicast_ether_addr(eh->h_dest);
1747 bool pae = eh->h_proto == htons(ETH_P_PAE); 1753 bool pae = eh->h_proto == htons(ETH_P_PAE);
1748 1754
1755 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
1749 /* determine the priority */ 1756 /* determine the priority */
1750 if (!skb->priority) 1757 if (!skb->priority)
1751 skb->priority = cfg80211_classify8021d(skb); 1758 skb->priority = cfg80211_classify8021d(skb);
@@ -1754,14 +1761,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1754 if (pae) 1761 if (pae)
1755 atomic_inc(&ifp->pend_8021x_cnt); 1762 atomic_inc(&ifp->pend_8021x_cnt);
1756 1763
1757 if (!brcmf_fws_fc_active(fws)) {
1758 /* If the protocol uses a data header, apply it */
1759 brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
1760
1761 /* Use bus module to send data frame */
1762 return brcmf_bus_txdata(drvr->bus_if, skb);
1763 }
1764
1765 /* set control buffer information */ 1764 /* set control buffer information */
1766 skcb->if_flags = 0; 1765 skcb->if_flags = 0;
1767 skcb->state = BRCMF_FWS_SKBSTATE_NEW; 1766 skcb->state = BRCMF_FWS_SKBSTATE_NEW;
@@ -1769,7 +1768,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1769 if (!multicast) 1768 if (!multicast)
1770 fifo = brcmf_fws_prio2fifo[skb->priority]; 1769 fifo = brcmf_fws_prio2fifo[skb->priority];
1771 1770
1772 brcmf_fws_lock(drvr, flags); 1771 brcmf_fws_lock(fws);
1773 if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC) 1772 if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
1774 fws->borrow_defer_timestamp = jiffies + 1773 fws->borrow_defer_timestamp = jiffies +
1775 BRCMF_FWS_BORROW_DEFER_PERIOD; 1774 BRCMF_FWS_BORROW_DEFER_PERIOD;
@@ -1789,7 +1788,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1789 } 1788 }
1790 brcmu_pkt_buf_free_skb(skb); 1789 brcmu_pkt_buf_free_skb(skb);
1791 } 1790 }
1792 brcmf_fws_unlock(drvr, flags); 1791 brcmf_fws_unlock(fws);
1793 return 0; 1792 return 0;
1794} 1793}
1795 1794
@@ -1809,7 +1808,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
1809 struct brcmf_fws_info *fws = ifp->drvr->fws; 1808 struct brcmf_fws_info *fws = ifp->drvr->fws;
1810 struct brcmf_fws_mac_descriptor *entry; 1809 struct brcmf_fws_mac_descriptor *entry;
1811 1810
1812 if (!ifp->ndev || !ifp->drvr->fw_signals) 1811 if (!ifp->ndev)
1813 return; 1812 return;
1814 1813
1815 entry = &fws->desc.iface[ifp->ifidx]; 1814 entry = &fws->desc.iface[ifp->ifidx];
@@ -1824,31 +1823,54 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
1824void brcmf_fws_del_interface(struct brcmf_if *ifp) 1823void brcmf_fws_del_interface(struct brcmf_if *ifp)
1825{ 1824{
1826 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; 1825 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
1827 ulong flags;
1828 1826
1829 if (!entry) 1827 if (!entry)
1830 return; 1828 return;
1831 1829
1832 brcmf_fws_lock(ifp->drvr, flags); 1830 brcmf_fws_lock(ifp->drvr->fws);
1833 ifp->fws_desc = NULL; 1831 ifp->fws_desc = NULL;
1834 brcmf_dbg(TRACE, "deleting %s\n", entry->name); 1832 brcmf_dbg(TRACE, "deleting %s\n", entry->name);
1835 brcmf_fws_macdesc_deinit(entry); 1833 brcmf_fws_macdesc_deinit(entry);
1836 brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); 1834 brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
1837 brcmf_fws_unlock(ifp->drvr, flags); 1835 brcmf_fws_unlock(ifp->drvr->fws);
1838} 1836}
1839 1837
1840static void brcmf_fws_dequeue_worker(struct work_struct *worker) 1838static void brcmf_fws_dequeue_worker(struct work_struct *worker)
1841{ 1839{
1842 struct brcmf_fws_info *fws; 1840 struct brcmf_fws_info *fws;
1841 struct brcmf_pub *drvr;
1843 struct sk_buff *skb; 1842 struct sk_buff *skb;
1844 ulong flags;
1845 int fifo; 1843 int fifo;
1844 u32 hslot;
1845 u32 ifidx;
1846 int ret;
1846 1847
1847 fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); 1848 fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
1849 drvr = fws->drvr;
1848 1850
1849 brcmf_fws_lock(fws->drvr, flags); 1851 brcmf_fws_lock(fws);
1850 for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked; 1852 for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
1851 fifo--) { 1853 fifo--) {
1854 if (!brcmf_fws_fc_active(fws)) {
1855 while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
1856 hslot = brcmf_skb_htod_tag_get_field(skb,
1857 HSLOT);
1858 brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
1859 &skb, true);
1860 ifidx = brcmf_skb_if_flags_get_field(skb,
1861 INDEX);
1862 brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
1863 /* Use bus module to send data frame */
1864 brcmf_fws_unlock(fws);
1865 ret = brcmf_bus_txdata(drvr->bus_if, skb);
1866 brcmf_fws_lock(fws);
1867 if (ret < 0)
1868 brcmf_txfinalize(drvr, skb, false);
1869 if (fws->bus_flow_blocked)
1870 break;
1871 }
1872 continue;
1873 }
1852 while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && 1874 while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
1853 (fifo == BRCMF_FWS_FIFO_BCMC))) { 1875 (fifo == BRCMF_FWS_FIFO_BCMC))) {
1854 skb = brcmf_fws_deq(fws, fifo); 1876 skb = brcmf_fws_deq(fws, fifo);
@@ -1876,42 +1898,43 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
1876 } 1898 }
1877 } 1899 }
1878 } 1900 }
1879 brcmf_fws_unlock(fws->drvr, flags); 1901 brcmf_fws_unlock(fws);
1880} 1902}
1881 1903
1882int brcmf_fws_init(struct brcmf_pub *drvr) 1904int brcmf_fws_init(struct brcmf_pub *drvr)
1883{ 1905{
1906 struct brcmf_fws_info *fws;
1884 u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS; 1907 u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
1885 int rc; 1908 int rc;
1886 1909
1887 if (!drvr->fw_signals)
1888 return 0;
1889
1890 spin_lock_init(&drvr->fws_spinlock);
1891
1892 drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL); 1910 drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
1893 if (!drvr->fws) { 1911 if (!drvr->fws) {
1894 rc = -ENOMEM; 1912 rc = -ENOMEM;
1895 goto fail; 1913 goto fail;
1896 } 1914 }
1897 1915
1916 fws = drvr->fws;
1917
1918 spin_lock_init(&fws->spinlock);
1919
1898 /* set linkage back */ 1920 /* set linkage back */
1899 drvr->fws->drvr = drvr; 1921 fws->drvr = drvr;
1900 drvr->fws->fcmode = fcmode; 1922 fws->fcmode = fcmode;
1901 1923
1902 drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); 1924 fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
1903 if (drvr->fws->fws_wq == NULL) { 1925 if (fws->fws_wq == NULL) {
1904 brcmf_err("workqueue creation failed\n"); 1926 brcmf_err("workqueue creation failed\n");
1905 rc = -EBADF; 1927 rc = -EBADF;
1906 goto fail; 1928 goto fail;
1907 } 1929 }
1908 INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker); 1930 INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
1909 1931
1910 /* enable firmware signalling if fcmode active */ 1932 /* enable firmware signalling if fcmode active */
1911 if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE) 1933 if (fws->fcmode != BRCMF_FWS_FCMODE_NONE)
1912 tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS | 1934 tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
1913 BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS | 1935 BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
1914 BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE; 1936 BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
1937 BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE;
1915 1938
1916 rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP, 1939 rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
1917 brcmf_fws_notify_credit_map); 1940 brcmf_fws_notify_credit_map);
@@ -1927,31 +1950,33 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
1927 goto fail; 1950 goto fail;
1928 } 1951 }
1929 1952
1930 /* setting the iovar may fail if feature is unsupported 1953 /* Setting the iovar may fail if feature is unsupported
1931 * so leave the rc as is so driver initialization can 1954 * so leave the rc as is so driver initialization can
1932 * continue. 1955 * continue. Set mode back to none indicating not enabled.
1933 */ 1956 */
1957 fws->fw_signals = true;
1934 if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) { 1958 if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
1935 brcmf_err("failed to set bdcv2 tlv signaling\n"); 1959 brcmf_err("failed to set bdcv2 tlv signaling\n");
1936 goto fail_event; 1960 fws->fcmode = BRCMF_FWS_FCMODE_NONE;
1961 fws->fw_signals = false;
1937 } 1962 }
1938 1963
1939 brcmf_fws_hanger_init(&drvr->fws->hanger); 1964 if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
1940 brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0); 1965 brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
1941 brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other); 1966
1942 brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, 1967 brcmf_fws_hanger_init(&fws->hanger);
1968 brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
1969 brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
1970 brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
1943 BRCMF_FWS_PSQ_LEN); 1971 BRCMF_FWS_PSQ_LEN);
1944 1972
1945 /* create debugfs file for statistics */ 1973 /* create debugfs file for statistics */
1946 brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats); 1974 brcmf_debugfs_create_fws_stats(drvr, &fws->stats);
1947 1975
1948 brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", 1976 brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
1949 drvr->fw_signals ? "enabled" : "disabled", tlv); 1977 fws->fw_signals ? "enabled" : "disabled", tlv);
1950 return 0; 1978 return 0;
1951 1979
1952fail_event:
1953 brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
1954 brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
1955fail: 1980fail:
1956 brcmf_fws_deinit(drvr); 1981 brcmf_fws_deinit(drvr);
1957 return rc; 1982 return rc;
@@ -1960,24 +1985,18 @@ fail:
1960void brcmf_fws_deinit(struct brcmf_pub *drvr) 1985void brcmf_fws_deinit(struct brcmf_pub *drvr)
1961{ 1986{
1962 struct brcmf_fws_info *fws = drvr->fws; 1987 struct brcmf_fws_info *fws = drvr->fws;
1963 ulong flags;
1964 1988
1965 if (!fws) 1989 if (!fws)
1966 return; 1990 return;
1967 1991
1968 /* disable firmware signalling entirely
1969 * to avoid using the workqueue.
1970 */
1971 drvr->fw_signals = false;
1972
1973 if (drvr->fws->fws_wq) 1992 if (drvr->fws->fws_wq)
1974 destroy_workqueue(drvr->fws->fws_wq); 1993 destroy_workqueue(drvr->fws->fws_wq);
1975 1994
1976 /* cleanup */ 1995 /* cleanup */
1977 brcmf_fws_lock(drvr, flags); 1996 brcmf_fws_lock(fws);
1978 brcmf_fws_cleanup(fws, -1); 1997 brcmf_fws_cleanup(fws, -1);
1979 drvr->fws = NULL; 1998 drvr->fws = NULL;
1980 brcmf_fws_unlock(drvr, flags); 1999 brcmf_fws_unlock(fws);
1981 2000
1982 /* free top structure */ 2001 /* free top structure */
1983 kfree(fws); 2002 kfree(fws);
@@ -1985,7 +2004,7 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr)
1985 2004
1986bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) 2005bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
1987{ 2006{
1988 if (!fws) 2007 if (!fws->creditmap_received)
1989 return false; 2008 return false;
1990 2009
1991 return fws->fcmode != BRCMF_FWS_FCMODE_NONE; 2010 return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
@@ -1993,17 +2012,16 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
1993 2012
1994void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) 2013void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
1995{ 2014{
1996 ulong flags;
1997 u32 hslot; 2015 u32 hslot;
1998 2016
1999 if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) { 2017 if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
2000 brcmu_pkt_buf_free_skb(skb); 2018 brcmu_pkt_buf_free_skb(skb);
2001 return; 2019 return;
2002 } 2020 }
2003 brcmf_fws_lock(fws->drvr, flags); 2021 brcmf_fws_lock(fws);
2004 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); 2022 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
2005 brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0); 2023 brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
2006 brcmf_fws_unlock(fws->drvr, flags); 2024 brcmf_fws_unlock(fws);
2007} 2025}
2008 2026
2009void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) 2027void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 79555f006d53..d7a974532909 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1430,7 +1430,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1430 IEEE80211_BAND_5GHZ); 1430 IEEE80211_BAND_5GHZ);
1431 1431
1432 wdev = &ifp->vif->wdev; 1432 wdev = &ifp->vif->wdev;
1433 cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 1433 cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0,
1434 GFP_ATOMIC); 1434 GFP_ATOMIC);
1435 1435
1436 kfree(mgmt_frame); 1436 kfree(mgmt_frame);
@@ -1895,7 +1895,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1895 IEEE80211_BAND_2GHZ : 1895 IEEE80211_BAND_2GHZ :
1896 IEEE80211_BAND_5GHZ); 1896 IEEE80211_BAND_5GHZ);
1897 1897
1898 cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 1898 cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0,
1899 GFP_ATOMIC); 1899 GFP_ATOMIC);
1900 1900
1901 brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", 1901 brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 09786a539950..2b5407f002e5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -208,7 +208,7 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
208 */ 208 */
209extern int 209extern int
210brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 210brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
211 uint flags, struct sk_buff *pkt); 211 uint flags, struct sk_buff_head *pktq);
212extern int 212extern int
213brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 213brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
214 uint flags, u8 *buf, uint nbytes); 214 uint flags, u8 *buf, uint nbytes);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 322cadc51ded..39e01a7c8556 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -614,7 +614,6 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
614 return 0; 614 return 0;
615 615
616fail: 616fail:
617 brcmf_txcomplete(dev, skb, false);
618 return ret; 617 return ret;
619} 618}
620 619
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 7fa71f73cfe8..571f013cebbb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3155,7 +3155,9 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
3155} 3155}
3156 3156
3157#ifdef CONFIG_NL80211_TESTMODE 3157#ifdef CONFIG_NL80211_TESTMODE
3158static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len) 3158static int brcmf_cfg80211_testmode(struct wiphy *wiphy,
3159 struct wireless_dev *wdev,
3160 void *data, int len)
3159{ 3161{
3160 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 3162 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3161 struct net_device *ndev = cfg_to_ndev(cfg); 3163 struct net_device *ndev = cfg_to_ndev(cfg);
@@ -4126,6 +4128,53 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
4126 clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); 4128 clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
4127} 4129}
4128 4130
4131static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
4132{
4133 int ret;
4134
4135 switch (oper) {
4136 case NL80211_TDLS_DISCOVERY_REQ:
4137 ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY;
4138 break;
4139 case NL80211_TDLS_SETUP:
4140 ret = BRCMF_TDLS_MANUAL_EP_CREATE;
4141 break;
4142 case NL80211_TDLS_TEARDOWN:
4143 ret = BRCMF_TDLS_MANUAL_EP_DELETE;
4144 break;
4145 default:
4146 brcmf_err("unsupported operation: %d\n", oper);
4147 ret = -EOPNOTSUPP;
4148 }
4149 return ret;
4150}
4151
4152static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
4153 struct net_device *ndev, u8 *peer,
4154 enum nl80211_tdls_operation oper)
4155{
4156 struct brcmf_if *ifp;
4157 struct brcmf_tdls_iovar_le info;
4158 int ret = 0;
4159
4160 ret = brcmf_convert_nl80211_tdls_oper(oper);
4161 if (ret < 0)
4162 return ret;
4163
4164 ifp = netdev_priv(ndev);
4165 memset(&info, 0, sizeof(info));
4166 info.mode = (u8)ret;
4167 if (peer)
4168 memcpy(info.ea, peer, ETH_ALEN);
4169
4170 ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
4171 &info, sizeof(info));
4172 if (ret < 0)
4173 brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
4174
4175 return ret;
4176}
4177
4129static struct cfg80211_ops wl_cfg80211_ops = { 4178static struct cfg80211_ops wl_cfg80211_ops = {
4130 .add_virtual_intf = brcmf_cfg80211_add_iface, 4179 .add_virtual_intf = brcmf_cfg80211_add_iface,
4131 .del_virtual_intf = brcmf_cfg80211_del_iface, 4180 .del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -4164,9 +4213,8 @@ static struct cfg80211_ops wl_cfg80211_ops = {
4164 .stop_p2p_device = brcmf_p2p_stop_device, 4213 .stop_p2p_device = brcmf_p2p_stop_device,
4165 .crit_proto_start = brcmf_cfg80211_crit_proto_start, 4214 .crit_proto_start = brcmf_cfg80211_crit_proto_start,
4166 .crit_proto_stop = brcmf_cfg80211_crit_proto_stop, 4215 .crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
4167#ifdef CONFIG_NL80211_TESTMODE 4216 .tdls_oper = brcmf_cfg80211_tdls_oper,
4168 .testmode_cmd = brcmf_cfg80211_testmode 4217 CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
4169#endif
4170}; 4218};
4171 4219
4172static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type) 4220static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
@@ -4287,7 +4335,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4287 wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); 4335 wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
4288 wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | 4336 wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
4289 WIPHY_FLAG_OFFCHAN_TX | 4337 WIPHY_FLAG_OFFCHAN_TX |
4290 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 4338 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
4339 WIPHY_FLAG_SUPPORTS_TDLS;
4291 wiphy->mgmt_stypes = brcmf_txrx_stypes; 4340 wiphy->mgmt_stypes = brcmf_txrx_stypes;
4292 wiphy->max_remain_on_channel_duration = 5000; 4341 wiphy->max_remain_on_channel_duration = 5000;
4293 brcmf_wiphy_pno_params(wiphy); 4342 brcmf_wiphy_pno_params(wiphy);
@@ -4908,6 +4957,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
4908 goto cfg80211_p2p_attach_out; 4957 goto cfg80211_p2p_attach_out;
4909 } 4958 }
4910 4959
4960 err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
4961 if (err) {
4962 brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
4963 wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
4964 }
4965
4911 err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, 4966 err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
4912 &io_type); 4967 &io_type);
4913 if (err) { 4968 if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index e4fd1ee3d690..53365977bfd6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -679,27 +679,6 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
679 return mode == BCMA_CLKMODE_FAST; 679 return mode == BCMA_CLKMODE_FAST;
680} 680}
681 681
682void ai_pci_up(struct si_pub *sih)
683{
684 struct si_info *sii;
685
686 sii = container_of(sih, struct si_info, pub);
687
688 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
689 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
690}
691
692/* Unconfigure and/or apply various WARs when going down */
693void ai_pci_down(struct si_pub *sih)
694{
695 struct si_info *sii;
696
697 sii = container_of(sih, struct si_info, pub);
698
699 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
700 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
701}
702
703/* Enable BT-COEX & Ex-PA for 4313 */ 682/* Enable BT-COEX & Ex-PA for 4313 */
704void ai_epa_4313war(struct si_pub *sih) 683void ai_epa_4313war(struct si_pub *sih)
705{ 684{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 89562c1fbf49..a8a267b5b87a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -183,9 +183,6 @@ extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
183extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode); 183extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
184extern bool ai_deviceremoved(struct si_pub *sih); 184extern bool ai_deviceremoved(struct si_pub *sih);
185 185
186extern void ai_pci_down(struct si_pub *sih);
187extern void ai_pci_up(struct si_pub *sih);
188
189/* Enable Ex-PA for 4313 */ 186/* Enable Ex-PA for 4313 */
190extern void ai_epa_4313war(struct si_pub *sih); 187extern void ai_epa_4313war(struct si_pub *sih);
191 188
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index bd982856d385..fa391e4eb098 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -928,9 +928,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
928 } 928 }
929 } else if (txs->phyerr) { 929 } else if (txs->phyerr) {
930 update_rate = false; 930 update_rate = false;
931 brcms_err(wlc->hw->d11core, 931 brcms_dbg_ht(wlc->hw->d11core,
932 "%s: ampdu tx phy error (0x%x)\n", 932 "%s: ampdu tx phy error (0x%x)\n",
933 __func__, txs->phyerr); 933 __func__, txs->phyerr);
934 } 934 }
935 } 935 }
936 936
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 1860c572b3c4..4fb9635d3919 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
1015 1015
1016/* 1016/*
1017 * post receive buffers 1017 * post receive buffers
1018 * return false is refill failed completely and ring is empty this will stall 1018 * Return false if refill failed completely or dma mapping failed. The ring
1019 * the rx dma and user might want to call rxfill again asap. This unlikely 1019 * is empty, which will stall the rx dma and user might want to call rxfill
1020 * happens on memory-rich NIC, but often on memory-constrained dongle 1020 * again asap. This is unlikely to happen on a memory-rich NIC, but often on
1021 * memory-constrained dongle.
1021 */ 1022 */
1022bool dma_rxfill(struct dma_pub *pub) 1023bool dma_rxfill(struct dma_pub *pub)
1023{ 1024{
@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
1078 1079
1079 pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, 1080 pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
1080 DMA_FROM_DEVICE); 1081 DMA_FROM_DEVICE);
1082 if (dma_mapping_error(di->dmadev, pa))
1083 return false;
1081 1084
1082 /* save the free packet pointer */ 1085 /* save the free packet pointer */
1083 di->rxp[rxout] = p; 1086 di->rxp[rxout] = p;
@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
1284 1287
1285 /* get physical address of buffer start */ 1288 /* get physical address of buffer start */
1286 pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); 1289 pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
1287 1290 /* if mapping failed, free skb */
1291 if (dma_mapping_error(di->dmadev, pa)) {
1292 brcmu_pkt_buf_free_skb(p);
1293 return;
1294 }
1288 /* With a DMA segment list, Descriptor table is filled 1295 /* With a DMA segment list, Descriptor table is filled
1289 * using the segment list instead of looping over 1296 * using the segment list instead of looping over
1290 * buffers in multi-chain DMA. Therefore, EOF for SGLIST 1297 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9fd6f2fef11b..4608e0eb1493 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -882,8 +882,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
882 mcl = le16_to_cpu(txh->MacTxControlLow); 882 mcl = le16_to_cpu(txh->MacTxControlLow);
883 883
884 if (txs->phyerr) 884 if (txs->phyerr)
885 brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n", 885 brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
886 txs->phyerr, txh->MainRates); 886 txs->phyerr, txh->MainRates);
887 887
888 if (txs->frameid != le16_to_cpu(txh->TxFrameID)) { 888 if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
889 brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n"); 889 brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
@@ -4652,7 +4652,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4652 wlc->band->phyrev = wlc_hw->band->phyrev; 4652 wlc->band->phyrev = wlc_hw->band->phyrev;
4653 wlc->band->radioid = wlc_hw->band->radioid; 4653 wlc->band->radioid = wlc_hw->band->radioid;
4654 wlc->band->radiorev = wlc_hw->band->radiorev; 4654 wlc->band->radiorev = wlc_hw->band->radiorev;
4655 4655 brcms_dbg_info(core, "wl%d: phy %u/%u radio %x/%u\n", unit,
4656 wlc->band->phytype, wlc->band->phyrev,
4657 wlc->band->radioid, wlc->band->radiorev);
4656 /* default contention windows size limits */ 4658 /* default contention windows size limits */
4657 wlc_hw->band->CWmin = APHY_CWMIN; 4659 wlc_hw->band->CWmin = APHY_CWMIN;
4658 wlc_hw->band->CWmax = PHY_CWMAX; 4660 wlc_hw->band->CWmax = PHY_CWMAX;
@@ -4667,7 +4669,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4667 brcms_c_coredisable(wlc_hw); 4669 brcms_c_coredisable(wlc_hw);
4668 4670
4669 /* Match driver "down" state */ 4671 /* Match driver "down" state */
4670 ai_pci_down(wlc_hw->sih); 4672 bcma_core_pci_down(wlc_hw->d11core->bus);
4671 4673
4672 /* turn off pll and xtal to match driver "down" state */ 4674 /* turn off pll and xtal to match driver "down" state */
4673 brcms_b_xtal(wlc_hw, OFF); 4675 brcms_b_xtal(wlc_hw, OFF);
@@ -5010,12 +5012,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
5010 */ 5012 */
5011 if (brcms_b_radio_read_hwdisabled(wlc_hw)) { 5013 if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
5012 /* put SB PCI in down state again */ 5014 /* put SB PCI in down state again */
5013 ai_pci_down(wlc_hw->sih); 5015 bcma_core_pci_down(wlc_hw->d11core->bus);
5014 brcms_b_xtal(wlc_hw, OFF); 5016 brcms_b_xtal(wlc_hw, OFF);
5015 return -ENOMEDIUM; 5017 return -ENOMEDIUM;
5016 } 5018 }
5017 5019
5018 ai_pci_up(wlc_hw->sih); 5020 bcma_core_pci_up(wlc_hw->d11core->bus);
5019 5021
5020 /* reset the d11 core */ 5022 /* reset the d11 core */
5021 brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); 5023 brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -5212,7 +5214,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
5212 5214
5213 /* turn off primary xtal and pll */ 5215 /* turn off primary xtal and pll */
5214 if (!wlc_hw->noreset) { 5216 if (!wlc_hw->noreset) {
5215 ai_pci_down(wlc_hw->sih); 5217 bcma_core_pci_down(wlc_hw->d11core->bus);
5216 brcms_b_xtal(wlc_hw, OFF); 5218 brcms_b_xtal(wlc_hw, OFF);
5217 } 5219 }
5218 } 5220 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 3d6b16ce4687..b2d6d6da3daf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
1137 gain0_15 = ((biq1 & 0xf) << 12) | 1137 gain0_15 = ((biq1 & 0xf) << 12) |
1138 ((tia & 0xf) << 8) | 1138 ((tia & 0xf) << 8) |
1139 ((lna2 & 0x3) << 6) | 1139 ((lna2 & 0x3) << 6) |
1140 ((lna2 & 1140 ((lna2 & 0x3) << 4) |
1141 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); 1141 ((lna1 & 0x3) << 2) |
1142 ((lna1 & 0x3) << 0);
1142 1143
1143 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); 1144 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
1144 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); 1145 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1328,6 +1329,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
1328 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; 1329 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
1329} 1330}
1330 1331
1332static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
1333 u16 tia_gain, u16 lna2_gain)
1334{
1335 u32 i_thresh_l, q_thresh_l;
1336 u32 i_thresh_h, q_thresh_h;
1337 struct lcnphy_iq_est iq_est_h, iq_est_l;
1338
1339 wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
1340 lna2_gain, 0);
1341
1342 wlc_lcnphy_rx_gain_override_enable(pi, true);
1343 wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
1344 udelay(500);
1345 write_radio_reg(pi, RADIO_2064_REG112, 0);
1346 if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
1347 return false;
1348
1349 wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
1350 udelay(500);
1351 write_radio_reg(pi, RADIO_2064_REG112, 0);
1352 if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
1353 return false;
1354
1355 i_thresh_l = (iq_est_l.i_pwr << 1);
1356 i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
1357
1358 q_thresh_l = (iq_est_l.q_pwr << 1);
1359 q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
1360 if ((iq_est_h.i_pwr > i_thresh_l) &&
1361 (iq_est_h.i_pwr < i_thresh_h) &&
1362 (iq_est_h.q_pwr > q_thresh_l) &&
1363 (iq_est_h.q_pwr < q_thresh_h))
1364 return true;
1365
1366 return false;
1367}
1368
1331static bool 1369static bool
1332wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, 1370wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1333 const struct lcnphy_rx_iqcomp *iqcomp, 1371 const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1342,8 +1380,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1342 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, 1380 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
1343 rfoverride3_old, rfoverride3val_old, rfoverride4_old, 1381 rfoverride3_old, rfoverride3val_old, rfoverride4_old,
1344 rfoverride4val_old, afectrlovr_old, afectrlovrval_old; 1382 rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
1345 int tia_gain; 1383 int tia_gain, lna2_gain, biq1_gain;
1346 u32 received_power, rx_pwr_threshold; 1384 bool set_gain;
1347 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; 1385 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
1348 u16 values_to_save[11]; 1386 u16 values_to_save[11];
1349 s16 *ptr; 1387 s16 *ptr;
@@ -1368,126 +1406,125 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1368 goto cal_done; 1406 goto cal_done;
1369 } 1407 }
1370 1408
1371 if (module == 1) { 1409 WARN_ON(module != 1);
1410 tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
1411 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
1372 1412
1373 tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); 1413 for (i = 0; i < 11; i++)
1374 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); 1414 values_to_save[i] =
1415 read_radio_reg(pi, rxiq_cal_rf_reg[i]);
1416 Core1TxControl_old = read_phy_reg(pi, 0x631);
1417
1418 or_phy_reg(pi, 0x631, 0x0015);
1419
1420 RFOverride0_old = read_phy_reg(pi, 0x44c);
1421 RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
1422 rfoverride2_old = read_phy_reg(pi, 0x4b0);
1423 rfoverride2val_old = read_phy_reg(pi, 0x4b1);
1424 rfoverride3_old = read_phy_reg(pi, 0x4f9);
1425 rfoverride3val_old = read_phy_reg(pi, 0x4fa);
1426 rfoverride4_old = read_phy_reg(pi, 0x938);
1427 rfoverride4val_old = read_phy_reg(pi, 0x939);
1428 afectrlovr_old = read_phy_reg(pi, 0x43b);
1429 afectrlovrval_old = read_phy_reg(pi, 0x43c);
1430 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
1431 old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
1375 1432
1376 for (i = 0; i < 11; i++) 1433 tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
1377 values_to_save[i] = 1434 if (tx_gain_override_old) {
1378 read_radio_reg(pi, rxiq_cal_rf_reg[i]); 1435 wlc_lcnphy_get_tx_gain(pi, &old_gains);
1379 Core1TxControl_old = read_phy_reg(pi, 0x631); 1436 tx_gain_index_old = pi_lcn->lcnphy_current_index;
1380 1437 }
1381 or_phy_reg(pi, 0x631, 0x0015);
1382
1383 RFOverride0_old = read_phy_reg(pi, 0x44c);
1384 RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
1385 rfoverride2_old = read_phy_reg(pi, 0x4b0);
1386 rfoverride2val_old = read_phy_reg(pi, 0x4b1);
1387 rfoverride3_old = read_phy_reg(pi, 0x4f9);
1388 rfoverride3val_old = read_phy_reg(pi, 0x4fa);
1389 rfoverride4_old = read_phy_reg(pi, 0x938);
1390 rfoverride4val_old = read_phy_reg(pi, 0x939);
1391 afectrlovr_old = read_phy_reg(pi, 0x43b);
1392 afectrlovrval_old = read_phy_reg(pi, 0x43c);
1393 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
1394 old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
1395
1396 tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
1397 if (tx_gain_override_old) {
1398 wlc_lcnphy_get_tx_gain(pi, &old_gains);
1399 tx_gain_index_old = pi_lcn->lcnphy_current_index;
1400 }
1401 1438
1402 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); 1439 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
1403 1440
1404 mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); 1441 mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
1405 mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); 1442 mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
1406 1443
1407 mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); 1444 mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
1408 mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); 1445 mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
1409 1446
1410 write_radio_reg(pi, RADIO_2064_REG116, 0x06); 1447 write_radio_reg(pi, RADIO_2064_REG116, 0x06);
1411 write_radio_reg(pi, RADIO_2064_REG12C, 0x07); 1448 write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
1412 write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); 1449 write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
1413 write_radio_reg(pi, RADIO_2064_REG098, 0x03); 1450 write_radio_reg(pi, RADIO_2064_REG098, 0x03);
1414 write_radio_reg(pi, RADIO_2064_REG00B, 0x7); 1451 write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
1415 mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); 1452 mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
1416 write_radio_reg(pi, RADIO_2064_REG01D, 0x01); 1453 write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
1417 write_radio_reg(pi, RADIO_2064_REG114, 0x01); 1454 write_radio_reg(pi, RADIO_2064_REG114, 0x01);
1418 write_radio_reg(pi, RADIO_2064_REG02E, 0x10); 1455 write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
1419 write_radio_reg(pi, RADIO_2064_REG12A, 0x08); 1456 write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
1420 1457
1421 mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); 1458 mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
1422 mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); 1459 mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
1423 mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); 1460 mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
1424 mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); 1461 mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
1425 mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); 1462 mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
1426 mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); 1463 mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
1427 mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); 1464 mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
1428 mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); 1465 mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
1429 mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); 1466 mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
1430 mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); 1467 mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
1431
1432 mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
1433 mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
1434
1435 wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
1436 write_phy_reg(pi, 0x6da, 0xffff);
1437 or_phy_reg(pi, 0x6db, 0x3);
1438 wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
1439 wlc_lcnphy_rx_gain_override_enable(pi, true);
1440
1441 tia_gain = 8;
1442 rx_pwr_threshold = 950;
1443 while (tia_gain > 0) {
1444 tia_gain -= 1;
1445 wlc_lcnphy_set_rx_gain_by_distribution(pi,
1446 0, 0, 2, 2,
1447 (u16)
1448 tia_gain, 1, 0);
1449 udelay(500);
1450 1468
1451 received_power = 1469 mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
1452 wlc_lcnphy_measure_digital_power(pi, 2000); 1470 mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
1453 if (received_power < rx_pwr_threshold)
1454 break;
1455 }
1456 result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
1457 1471
1458 wlc_lcnphy_stop_tx_tone(pi); 1472 write_phy_reg(pi, 0x6da, 0xffff);
1473 or_phy_reg(pi, 0x6db, 0x3);
1474
1475 wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
1476 for (lna2_gain = 3; lna2_gain >= 0; lna2_gain--) {
1477 for (tia_gain = 4; tia_gain >= 0; tia_gain--) {
1478 for (biq1_gain = 6; biq1_gain >= 0; biq1_gain--) {
1479 set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
1480 (u16)
1481 biq1_gain,
1482 (u16)
1483 tia_gain,
1484 (u16)
1485 lna2_gain);
1486 if (!set_gain)
1487 continue;
1488
1489 result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
1490 goto stop_tone;
1491 }
1492 }
1493 }
1459 1494
1460 write_phy_reg(pi, 0x631, Core1TxControl_old); 1495stop_tone:
1496 wlc_lcnphy_stop_tx_tone(pi);
1461 1497
1462 write_phy_reg(pi, 0x44c, RFOverrideVal0_old); 1498 write_phy_reg(pi, 0x631, Core1TxControl_old);
1463 write_phy_reg(pi, 0x44d, RFOverrideVal0_old); 1499
1464 write_phy_reg(pi, 0x4b0, rfoverride2_old); 1500 write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
1465 write_phy_reg(pi, 0x4b1, rfoverride2val_old); 1501 write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
1466 write_phy_reg(pi, 0x4f9, rfoverride3_old); 1502 write_phy_reg(pi, 0x4b0, rfoverride2_old);
1467 write_phy_reg(pi, 0x4fa, rfoverride3val_old); 1503 write_phy_reg(pi, 0x4b1, rfoverride2val_old);
1468 write_phy_reg(pi, 0x938, rfoverride4_old); 1504 write_phy_reg(pi, 0x4f9, rfoverride3_old);
1469 write_phy_reg(pi, 0x939, rfoverride4val_old); 1505 write_phy_reg(pi, 0x4fa, rfoverride3val_old);
1470 write_phy_reg(pi, 0x43b, afectrlovr_old); 1506 write_phy_reg(pi, 0x938, rfoverride4_old);
1471 write_phy_reg(pi, 0x43c, afectrlovrval_old); 1507 write_phy_reg(pi, 0x939, rfoverride4val_old);
1472 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); 1508 write_phy_reg(pi, 0x43b, afectrlovr_old);
1473 write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); 1509 write_phy_reg(pi, 0x43c, afectrlovrval_old);
1510 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
1511 write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
1474 1512
1475 wlc_lcnphy_clear_trsw_override(pi); 1513 wlc_lcnphy_clear_trsw_override(pi);
1476 1514
1477 mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); 1515 mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
1478 1516
1479 for (i = 0; i < 11; i++) 1517 for (i = 0; i < 11; i++)
1480 write_radio_reg(pi, rxiq_cal_rf_reg[i], 1518 write_radio_reg(pi, rxiq_cal_rf_reg[i],
1481 values_to_save[i]); 1519 values_to_save[i]);
1482 1520
1483 if (tx_gain_override_old) 1521 if (tx_gain_override_old)
1484 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); 1522 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
1485 else 1523 else
1486 wlc_lcnphy_disable_tx_gain_override(pi); 1524 wlc_lcnphy_disable_tx_gain_override(pi);
1487 1525
1488 wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); 1526 wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
1489 wlc_lcnphy_rx_gain_override_enable(pi, false); 1527 wlc_lcnphy_rx_gain_override_enable(pi, false);
1490 }
1491 1528
1492cal_done: 1529cal_done:
1493 kfree(ptr); 1530 kfree(ptr);
@@ -1789,6 +1826,19 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
1789 write_radio_reg(pi, RADIO_2064_REG038, 3); 1826 write_radio_reg(pi, RADIO_2064_REG038, 3);
1790 write_radio_reg(pi, RADIO_2064_REG091, 7); 1827 write_radio_reg(pi, RADIO_2064_REG091, 7);
1791 } 1828 }
1829
1830 if (!(pi->sh->boardflags & BFL_FEM)) {
1831 static const u8 reg038[14] = {
1832 0xd, 0xe, 0xd, 0xd, 0xd, 0xc, 0xa,
1833 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0
1834 };
1835
1836 write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
1837 write_radio_reg(pi, RADIO_2064_REG091, 0x3);
1838 write_radio_reg(pi, RADIO_2064_REG038, 0x3);
1839
1840 write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
1841 }
1792} 1842}
1793 1843
1794static int 1844static int
@@ -1983,6 +2033,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
1983 } else { 2033 } else {
1984 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); 2034 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
1985 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); 2035 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
2036 mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
2037 mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
2038 mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
2039 mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
2040 mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
2041 mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
2042 mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
2043 mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
2044 mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
2045 mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
1986 } 2046 }
1987 } else { 2047 } else {
1988 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); 2048 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2069,13 +2129,23 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
2069 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); 2129 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
2070 2130
2071 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); 2131 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
2132 mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
2072} 2133}
2073 2134
2074static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) 2135static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2075{ 2136{
2076 struct phytbl_info tab; 2137 struct phytbl_info tab;
2077 u32 rfseq, ind; 2138 u32 rfseq, ind;
2139 enum lcnphy_tssi_mode mode;
2140 u8 tssi_sel;
2078 2141
2142 if (pi->sh->boardflags & BFL_FEM) {
2143 tssi_sel = 0x1;
2144 mode = LCNPHY_TSSI_EXT;
2145 } else {
2146 tssi_sel = 0xe;
2147 mode = LCNPHY_TSSI_POST_PA;
2148 }
2079 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 2149 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
2080 tab.tbl_width = 32; 2150 tab.tbl_width = 32;
2081 tab.tbl_ptr = &ind; 2151 tab.tbl_ptr = &ind;
@@ -2096,7 +2166,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2096 2166
2097 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); 2167 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
2098 2168
2099 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); 2169 wlc_lcnphy_set_tssi_mux(pi, mode);
2100 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); 2170 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
2101 2171
2102 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); 2172 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2132,9 +2202,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2132 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); 2202 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
2133 2203
2134 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { 2204 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
2135 mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); 2205 mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
2136 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); 2206 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
2137 } else { 2207 } else {
2208 mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
2138 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); 2209 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
2139 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); 2210 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
2140 } 2211 }
@@ -2181,6 +2252,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2181 2252
2182 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); 2253 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
2183 2254
2255 mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
2256 mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
2257 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
2258
2184 wlc_lcnphy_pwrctrl_rssiparams(pi); 2259 wlc_lcnphy_pwrctrl_rssiparams(pi);
2185} 2260}
2186 2261
@@ -2799,6 +2874,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2799 read_radio_reg(pi, RADIO_2064_REG007) & 1; 2874 read_radio_reg(pi, RADIO_2064_REG007) & 1;
2800 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; 2875 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
2801 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; 2876 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
2877 u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
2878
2802 idleTssi = read_phy_reg(pi, 0x4ab); 2879 idleTssi = read_phy_reg(pi, 0x4ab);
2803 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & 2880 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
2804 MCTL_EN_MAC)); 2881 MCTL_EN_MAC));
@@ -2816,6 +2893,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2816 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); 2893 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
2817 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); 2894 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
2818 wlc_lcnphy_tssi_setup(pi); 2895 wlc_lcnphy_tssi_setup(pi);
2896
2897 mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
2898 mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
2899
2900 wlc_lcnphy_set_bbmult(pi, 0x0);
2901
2819 wlc_phy_do_dummy_tx(pi, true, OFF); 2902 wlc_phy_do_dummy_tx(pi, true, OFF);
2820 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) 2903 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
2821 >> 0); 2904 >> 0);
@@ -2837,6 +2920,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2837 2920
2838 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); 2921 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
2839 2922
2923 wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
2840 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); 2924 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
2841 wlc_lcnphy_set_tx_gain(pi, &old_gains); 2925 wlc_lcnphy_set_tx_gain(pi, &old_gains);
2842 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); 2926 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3050,6 +3134,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
3050 wlc_lcnphy_write_table(pi, &tab); 3134 wlc_lcnphy_write_table(pi, &tab);
3051 tab.tbl_offset++; 3135 tab.tbl_offset++;
3052 } 3136 }
3137 mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
3138 mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
3139 mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
3140 mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
3141 mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
3053 3142
3054 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); 3143 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
3055 3144
@@ -3851,7 +3940,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
3851 target_gains.pad_gain = 21; 3940 target_gains.pad_gain = 21;
3852 target_gains.dac_gain = 0; 3941 target_gains.dac_gain = 0;
3853 wlc_lcnphy_set_tx_gain(pi, &target_gains); 3942 wlc_lcnphy_set_tx_gain(pi, &target_gains);
3854 wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
3855 3943
3856 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { 3944 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
3857 3945
@@ -3862,6 +3950,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
3862 lcnphy_recal ? LCNPHY_CAL_RECAL : 3950 lcnphy_recal ? LCNPHY_CAL_RECAL :
3863 LCNPHY_CAL_FULL), false); 3951 LCNPHY_CAL_FULL), false);
3864 } else { 3952 } else {
3953 wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
3865 wlc_lcnphy_tx_iqlo_soft_cal_full(pi); 3954 wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
3866 } 3955 }
3867 3956
@@ -4283,20 +4372,20 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
4283 u16 pa_gain; 4372 u16 pa_gain;
4284 u16 gm_gain; 4373 u16 gm_gain;
4285 4374
4286 if (CHSPEC_IS5G(pi->radio_chanspec))
4287 pa_gain = 0x70;
4288 else
4289 pa_gain = 0x70;
4290
4291 if (pi->sh->boardflags & BFL_FEM) 4375 if (pi->sh->boardflags & BFL_FEM)
4292 pa_gain = 0x10; 4376 pa_gain = 0x10;
4377 else
4378 pa_gain = 0x60;
4293 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 4379 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
4294 tab.tbl_width = 32; 4380 tab.tbl_width = 32;
4295 tab.tbl_len = 1; 4381 tab.tbl_len = 1;
4296 tab.tbl_ptr = &val; 4382 tab.tbl_ptr = &val;
4297 4383
4384 /* fixed gm_gain value for iPA */
4385 gm_gain = 15;
4298 for (j = 0; j < 128; j++) { 4386 for (j = 0; j < 128; j++) {
4299 gm_gain = gain_table[j].gm; 4387 if (pi->sh->boardflags & BFL_FEM)
4388 gm_gain = gain_table[j].gm;
4300 val = (((u32) pa_gain << 24) | 4389 val = (((u32) pa_gain << 24) |
4301 (gain_table[j].pad << 16) | 4390 (gain_table[j].pad << 16) |
4302 (gain_table[j].pga << 8) | gm_gain); 4391 (gain_table[j].pga << 8) | gm_gain);
@@ -4507,7 +4596,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
4507 4596
4508 write_phy_reg(pi, 0x4ea, 0x4688); 4597 write_phy_reg(pi, 0x4ea, 0x4688);
4509 4598
4510 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); 4599 if (pi->sh->boardflags & BFL_FEM)
4600 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
4601 else
4602 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
4511 4603
4512 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); 4604 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
4513 4605
@@ -4518,6 +4610,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
4518 wlc_lcnphy_rcal(pi); 4610 wlc_lcnphy_rcal(pi);
4519 4611
4520 wlc_lcnphy_rc_cal(pi); 4612 wlc_lcnphy_rc_cal(pi);
4613
4614 if (!(pi->sh->boardflags & BFL_FEM)) {
4615 write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
4616 write_radio_reg(pi, RADIO_2064_REG033, 0x19);
4617 write_radio_reg(pi, RADIO_2064_REG039, 0xe);
4618 }
4619
4521} 4620}
4522 4621
4523static void wlc_lcnphy_radio_init(struct brcms_phy *pi) 4622static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4530,6 +4629,7 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
4530 uint idx; 4629 uint idx;
4531 u8 phybw40; 4630 u8 phybw40;
4532 struct phytbl_info tab; 4631 struct phytbl_info tab;
4632 const struct phytbl_info *tb;
4533 u32 val; 4633 u32 val;
4534 4634
4535 phybw40 = CHSPEC_IS40(pi->radio_chanspec); 4635 phybw40 = CHSPEC_IS40(pi->radio_chanspec);
@@ -4547,22 +4647,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
4547 wlc_lcnphy_write_table(pi, &tab); 4647 wlc_lcnphy_write_table(pi, &tab);
4548 } 4648 }
4549 4649
4550 tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; 4650 if (!(pi->sh->boardflags & BFL_FEM)) {
4551 tab.tbl_width = 16; 4651 tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
4552 tab.tbl_ptr = &val; 4652 tab.tbl_width = 16;
4553 tab.tbl_len = 1; 4653 tab.tbl_ptr = &val;
4554 4654 tab.tbl_len = 1;
4555 val = 114;
4556 tab.tbl_offset = 0;
4557 wlc_lcnphy_write_table(pi, &tab);
4558 4655
4559 val = 130; 4656 val = 150;
4560 tab.tbl_offset = 1; 4657 tab.tbl_offset = 0;
4561 wlc_lcnphy_write_table(pi, &tab); 4658 wlc_lcnphy_write_table(pi, &tab);
4562 4659
4563 val = 6; 4660 val = 220;
4564 tab.tbl_offset = 8; 4661 tab.tbl_offset = 1;
4565 wlc_lcnphy_write_table(pi, &tab); 4662 wlc_lcnphy_write_table(pi, &tab);
4663 }
4566 4664
4567 if (CHSPEC_IS2G(pi->radio_chanspec)) { 4665 if (CHSPEC_IS2G(pi->radio_chanspec)) {
4568 if (pi->sh->boardflags & BFL_FEM) 4666 if (pi->sh->boardflags & BFL_FEM)
@@ -4576,7 +4674,6 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
4576 } 4674 }
4577 4675
4578 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { 4676 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
4579 const struct phytbl_info *tb;
4580 int l; 4677 int l;
4581 4678
4582 if (CHSPEC_IS2G(pi->radio_chanspec)) { 4679 if (CHSPEC_IS2G(pi->radio_chanspec)) {
@@ -4597,21 +4694,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
4597 wlc_lcnphy_write_table(pi, &tb[idx]); 4694 wlc_lcnphy_write_table(pi, &tb[idx]);
4598 } 4695 }
4599 4696
4600 if ((pi->sh->boardflags & BFL_FEM) 4697 if (pi->sh->boardflags & BFL_FEM) {
4601 && !(pi->sh->boardflags & BFL_FEM_BT)) 4698 if (pi->sh->boardflags & BFL_FEM_BT) {
4602 wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa); 4699 if (pi->sh->boardrev < 0x1250)
4603 else if (pi->sh->boardflags & BFL_FEM_BT) { 4700 tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
4604 if (pi->sh->boardrev < 0x1250) 4701 else
4605 wlc_lcnphy_write_table( 4702 tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
4606 pi, 4703 } else {
4607 &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa); 4704 tb = &dot11lcn_sw_ctrl_tbl_info_4313_epa;
4705 }
4706 } else {
4707 if (pi->sh->boardflags & BFL_FEM_BT)
4708 tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
4608 else 4709 else
4609 wlc_lcnphy_write_table( 4710 tb = &dot11lcn_sw_ctrl_tbl_info_4313;
4610 pi, 4711 }
4611 &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250); 4712 wlc_lcnphy_write_table(pi, tb);
4612 } else
4613 wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313);
4614
4615 wlc_lcnphy_load_rfpower(pi); 4713 wlc_lcnphy_load_rfpower(pi);
4616 4714
4617 wlc_lcnphy_clear_papd_comptable(pi); 4715 wlc_lcnphy_clear_papd_comptable(pi);
@@ -4955,6 +5053,8 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
4955 wlc_lcnphy_load_tx_iir_filter(pi, true, 3); 5053 wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
4956 5054
4957 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); 5055 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
5056 if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
5057 wlc_lcnphy_tssi_setup(pi);
4958} 5058}
4959 5059
4960void wlc_phy_detach_lcnphy(struct brcms_phy *pi) 5060void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -4993,8 +5093,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
4993 if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) 5093 if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
4994 return false; 5094 return false;
4995 5095
4996 if ((pi->sh->boardflags & BFL_FEM) && 5096 if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
4997 (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
4998 if (pi_lcn->lcnphy_tempsense_option == 3) { 5097 if (pi_lcn->lcnphy_tempsense_option == 3) {
4999 pi->hwpwrctrl = true; 5098 pi->hwpwrctrl = true;
5000 pi->hwpwrctrl_capable = true; 5099 pi->hwpwrctrl_capable = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 622c01ca72c5..d7fa312214f3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1507,117 +1507,103 @@ static const u32 dot11lcn_gain_tbl_5G[] = {
1507 1507
1508const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = { 1508const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
1509 {&dot11lcn_gain_tbl_rev0, 1509 {&dot11lcn_gain_tbl_rev0,
1510 sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18, 1510 ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
1511 0, 32} 1511 0, 32}
1512 , 1512 ,
1513 {&dot11lcn_aux_gain_idx_tbl_rev0, 1513 {&dot11lcn_aux_gain_idx_tbl_rev0,
1514 sizeof(dot11lcn_aux_gain_idx_tbl_rev0) / 1514 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
1515 sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
1516 , 1515 ,
1517 {&dot11lcn_gain_idx_tbl_rev0, 1516 {&dot11lcn_gain_idx_tbl_rev0,
1518 sizeof(dot11lcn_gain_idx_tbl_rev0) / 1517 ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
1519 sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
1520 , 1518 ,
1521}; 1519};
1522 1520
1523static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = { 1521static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
1524 {&dot11lcn_gain_tbl_rev1, 1522 {&dot11lcn_gain_tbl_rev1,
1525 sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18, 1523 ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18,
1526 0, 32} 1524 0, 32}
1527 , 1525 ,
1528 {&dot11lcn_aux_gain_idx_tbl_rev0, 1526 {&dot11lcn_aux_gain_idx_tbl_rev0,
1529 sizeof(dot11lcn_aux_gain_idx_tbl_rev0) / 1527 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
1530 sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
1531 , 1528 ,
1532 {&dot11lcn_gain_idx_tbl_rev0, 1529 {&dot11lcn_gain_idx_tbl_rev0,
1533 sizeof(dot11lcn_gain_idx_tbl_rev0) / 1530 ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
1534 sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
1535 , 1531 ,
1536}; 1532};
1537 1533
1538const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = { 1534const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
1539 {&dot11lcn_gain_tbl_2G, 1535 {&dot11lcn_gain_tbl_2G,
1540 sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0, 1536 ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0,
1541 32} 1537 32}
1542 , 1538 ,
1543 {&dot11lcn_aux_gain_idx_tbl_2G, 1539 {&dot11lcn_aux_gain_idx_tbl_2G,
1544 sizeof(dot11lcn_aux_gain_idx_tbl_2G) / 1540 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_2G), 14, 0, 16}
1545 sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16}
1546 , 1541 ,
1547 {&dot11lcn_gain_idx_tbl_2G, 1542 {&dot11lcn_gain_idx_tbl_2G,
1548 sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]), 1543 ARRAY_SIZE(dot11lcn_gain_idx_tbl_2G),
1549 13, 0, 32} 1544 13, 0, 32}
1550 , 1545 ,
1551 {&dot11lcn_gain_val_tbl_2G, 1546 {&dot11lcn_gain_val_tbl_2G,
1552 sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]), 1547 ARRAY_SIZE(dot11lcn_gain_val_tbl_2G),
1553 17, 0, 8} 1548 17, 0, 8}
1554}; 1549};
1555 1550
1556const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = { 1551const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
1557 {&dot11lcn_gain_tbl_5G, 1552 {&dot11lcn_gain_tbl_5G,
1558 sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0, 1553 ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
1559 32} 1554 32}
1560 , 1555 ,
1561 {&dot11lcn_aux_gain_idx_tbl_5G, 1556 {&dot11lcn_aux_gain_idx_tbl_5G,
1562 sizeof(dot11lcn_aux_gain_idx_tbl_5G) / 1557 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
1563 sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
1564 , 1558 ,
1565 {&dot11lcn_gain_idx_tbl_5G, 1559 {&dot11lcn_gain_idx_tbl_5G,
1566 sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]), 1560 ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
1567 13, 0, 32} 1561 13, 0, 32}
1568 , 1562 ,
1569 {&dot11lcn_gain_val_tbl_5G, 1563 {&dot11lcn_gain_val_tbl_5G,
1570 sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]), 1564 ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
1571 17, 0, 8} 1565 17, 0, 8}
1572}; 1566};
1573 1567
1574const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = { 1568const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
1575 {&dot11lcn_gain_tbl_extlna_2G, 1569 {&dot11lcn_gain_tbl_extlna_2G,
1576 sizeof(dot11lcn_gain_tbl_extlna_2G) / 1570 ARRAY_SIZE(dot11lcn_gain_tbl_extlna_2G), 18, 0, 32}
1577 sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
1578 , 1571 ,
1579 {&dot11lcn_aux_gain_idx_tbl_extlna_2G, 1572 {&dot11lcn_aux_gain_idx_tbl_extlna_2G,
1580 sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) / 1573 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_extlna_2G), 14, 0, 16}
1581 sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16}
1582 , 1574 ,
1583 {&dot11lcn_gain_idx_tbl_extlna_2G, 1575 {&dot11lcn_gain_idx_tbl_extlna_2G,
1584 sizeof(dot11lcn_gain_idx_tbl_extlna_2G) / 1576 ARRAY_SIZE(dot11lcn_gain_idx_tbl_extlna_2G), 13, 0, 32}
1585 sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32}
1586 , 1577 ,
1587 {&dot11lcn_gain_val_tbl_extlna_2G, 1578 {&dot11lcn_gain_val_tbl_extlna_2G,
1588 sizeof(dot11lcn_gain_val_tbl_extlna_2G) / 1579 ARRAY_SIZE(dot11lcn_gain_val_tbl_extlna_2G), 17, 0, 8}
1589 sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
1590}; 1580};
1591 1581
1592const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = { 1582const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
1593 {&dot11lcn_gain_tbl_5G, 1583 {&dot11lcn_gain_tbl_5G,
1594 sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0, 1584 ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
1595 32} 1585 32}
1596 , 1586 ,
1597 {&dot11lcn_aux_gain_idx_tbl_5G, 1587 {&dot11lcn_aux_gain_idx_tbl_5G,
1598 sizeof(dot11lcn_aux_gain_idx_tbl_5G) / 1588 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
1599 sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
1600 , 1589 ,
1601 {&dot11lcn_gain_idx_tbl_5G, 1590 {&dot11lcn_gain_idx_tbl_5G,
1602 sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]), 1591 ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
1603 13, 0, 32} 1592 13, 0, 32}
1604 , 1593 ,
1605 {&dot11lcn_gain_val_tbl_5G, 1594 {&dot11lcn_gain_val_tbl_5G,
1606 sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]), 1595 ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
1607 17, 0, 8} 1596 17, 0, 8}
1608}; 1597};
1609 1598
1610const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 = 1599const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
1611 sizeof(dot11lcnphytbl_rx_gain_info_rev0) / 1600 ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_rev0);
1612 sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
1613 1601
1614const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz = 1602const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
1615 sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) / 1603 ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_2G_rev2);
1616 sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
1617 1604
1618const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz = 1605const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
1619 sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) / 1606 ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_5G_rev2);
1620 sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
1621 1607
1622static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = { 1608static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
1623 0x014d, 1609 0x014d,
@@ -2058,6 +2044,73 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
2058 0x0005, 2044 0x0005,
2059}; 2045};
2060 2046
2047static const u16 dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo[] = {
2048 0x0005,
2049 0x0006,
2050 0x0009,
2051 0x000a,
2052 0x0005,
2053 0x0006,
2054 0x0009,
2055 0x000a,
2056 0x0005,
2057 0x0006,
2058 0x0009,
2059 0x000a,
2060 0x0005,
2061 0x0006,
2062 0x0009,
2063 0x000a,
2064 0x0005,
2065 0x0006,
2066 0x0009,
2067 0x000a,
2068 0x0005,
2069 0x0006,
2070 0x0009,
2071 0x000a,
2072 0x0005,
2073 0x0006,
2074 0x0009,
2075 0x000a,
2076 0x0005,
2077 0x0006,
2078 0x0009,
2079 0x000a,
2080 0x0005,
2081 0x0006,
2082 0x0009,
2083 0x000a,
2084 0x0005,
2085 0x0006,
2086 0x0009,
2087 0x000a,
2088 0x0005,
2089 0x0006,
2090 0x0009,
2091 0x000a,
2092 0x0005,
2093 0x0006,
2094 0x0009,
2095 0x000a,
2096 0x0005,
2097 0x0006,
2098 0x0009,
2099 0x000a,
2100 0x0005,
2101 0x0006,
2102 0x0009,
2103 0x000a,
2104 0x0005,
2105 0x0006,
2106 0x0009,
2107 0x000a,
2108 0x0005,
2109 0x0006,
2110 0x0009,
2111 0x000a,
2112};
2113
2061static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { 2114static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
2062 0x0004, 2115 0x0004,
2063 0x0004, 2116 0x0004,
@@ -2771,89 +2824,79 @@ static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
2771 2824
2772const struct phytbl_info dot11lcnphytbl_info_rev0[] = { 2825const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
2773 {&dot11lcn_min_sig_sq_tbl_rev0, 2826 {&dot11lcn_min_sig_sq_tbl_rev0,
2774 sizeof(dot11lcn_min_sig_sq_tbl_rev0) / 2827 ARRAY_SIZE(dot11lcn_min_sig_sq_tbl_rev0), 2, 0, 16}
2775 sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
2776 , 2828 ,
2777 {&dot11lcn_noise_scale_tbl_rev0, 2829 {&dot11lcn_noise_scale_tbl_rev0,
2778 sizeof(dot11lcn_noise_scale_tbl_rev0) / 2830 ARRAY_SIZE(dot11lcn_noise_scale_tbl_rev0), 1, 0, 16}
2779 sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16}
2780 , 2831 ,
2781 {&dot11lcn_fltr_ctrl_tbl_rev0, 2832 {&dot11lcn_fltr_ctrl_tbl_rev0,
2782 sizeof(dot11lcn_fltr_ctrl_tbl_rev0) / 2833 ARRAY_SIZE(dot11lcn_fltr_ctrl_tbl_rev0), 11, 0, 32}
2783 sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32}
2784 , 2834 ,
2785 {&dot11lcn_ps_ctrl_tbl_rev0, 2835 {&dot11lcn_ps_ctrl_tbl_rev0,
2786 sizeof(dot11lcn_ps_ctrl_tbl_rev0) / 2836 ARRAY_SIZE(dot11lcn_ps_ctrl_tbl_rev0), 12, 0, 32}
2787 sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32}
2788 , 2837 ,
2789 {&dot11lcn_gain_idx_tbl_rev0, 2838 {&dot11lcn_gain_idx_tbl_rev0,
2790 sizeof(dot11lcn_gain_idx_tbl_rev0) / 2839 ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
2791 sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
2792 , 2840 ,
2793 {&dot11lcn_aux_gain_idx_tbl_rev0, 2841 {&dot11lcn_aux_gain_idx_tbl_rev0,
2794 sizeof(dot11lcn_aux_gain_idx_tbl_rev0) / 2842 ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
2795 sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
2796 , 2843 ,
2797 {&dot11lcn_sw_ctrl_tbl_rev0, 2844 {&dot11lcn_sw_ctrl_tbl_rev0,
2798 sizeof(dot11lcn_sw_ctrl_tbl_rev0) / 2845 ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_rev0), 15, 0, 16}
2799 sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16}
2800 , 2846 ,
2801 {&dot11lcn_nf_table_rev0, 2847 {&dot11lcn_nf_table_rev0,
2802 sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16, 2848 ARRAY_SIZE(dot11lcn_nf_table_rev0), 16,
2803 0, 8} 2849 0, 8}
2804 , 2850 ,
2805 {&dot11lcn_gain_val_tbl_rev0, 2851 {&dot11lcn_gain_val_tbl_rev0,
2806 sizeof(dot11lcn_gain_val_tbl_rev0) / 2852 ARRAY_SIZE(dot11lcn_gain_val_tbl_rev0), 17, 0, 8}
2807 sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8}
2808 , 2853 ,
2809 {&dot11lcn_gain_tbl_rev0, 2854 {&dot11lcn_gain_tbl_rev0,
2810 sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18, 2855 ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
2811 0, 32} 2856 0, 32}
2812 , 2857 ,
2813 {&dot11lcn_spur_tbl_rev0, 2858 {&dot11lcn_spur_tbl_rev0,
2814 sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20, 2859 ARRAY_SIZE(dot11lcn_spur_tbl_rev0), 20,
2815 0, 8} 2860 0, 8}
2816 , 2861 ,
2817 {&dot11lcn_unsup_mcs_tbl_rev0, 2862 {&dot11lcn_unsup_mcs_tbl_rev0,
2818 sizeof(dot11lcn_unsup_mcs_tbl_rev0) / 2863 ARRAY_SIZE(dot11lcn_unsup_mcs_tbl_rev0), 23, 0, 16}
2819 sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16}
2820 , 2864 ,
2821 {&dot11lcn_iq_local_tbl_rev0, 2865 {&dot11lcn_iq_local_tbl_rev0,
2822 sizeof(dot11lcn_iq_local_tbl_rev0) / 2866 ARRAY_SIZE(dot11lcn_iq_local_tbl_rev0), 0, 0, 16}
2823 sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16}
2824 , 2867 ,
2825 {&dot11lcn_papd_compdelta_tbl_rev0, 2868 {&dot11lcn_papd_compdelta_tbl_rev0,
2826 sizeof(dot11lcn_papd_compdelta_tbl_rev0) / 2869 ARRAY_SIZE(dot11lcn_papd_compdelta_tbl_rev0), 24, 0, 32}
2827 sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32}
2828 , 2870 ,
2829}; 2871};
2830 2872
2831const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = { 2873const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
2832 &dot11lcn_sw_ctrl_tbl_4313_rev0, 2874 &dot11lcn_sw_ctrl_tbl_4313_rev0,
2833 sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) / 2875 ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16
2834 sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16 2876};
2877
2878const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa = {
2879 &dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo,
2880 ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo), 15, 0, 16
2835}; 2881};
2836 2882
2837const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = { 2883const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
2838 &dot11lcn_sw_ctrl_tbl_4313_epa_rev0, 2884 &dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
2839 sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) / 2885 ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16
2840 sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
2841}; 2886};
2842 2887
2843const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = { 2888const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
2844 &dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo, 2889 &dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
2845 sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) / 2890 ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo), 15, 0, 16
2846 sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
2847}; 2891};
2848 2892
2849const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = { 2893const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
2850 &dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0, 2894 &dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
2851 sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) / 2895 ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0), 15, 0, 16
2852 sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
2853}; 2896};
2854 2897
2855const u32 dot11lcnphytbl_info_sz_rev0 = 2898const u32 dot11lcnphytbl_info_sz_rev0 =
2856 sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]); 2899 ARRAY_SIZE(dot11lcnphytbl_info_rev0);
2857 2900
2858const struct lcnphy_tx_gain_tbl_entry 2901const struct lcnphy_tx_gain_tbl_entry
2859dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = { 2902dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
@@ -2988,134 +3031,134 @@ dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
2988}; 3031};
2989 3032
2990const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = { 3033const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
2991 {7, 0, 31, 0, 72}, 3034 {15, 0, 31, 0, 72},
2992 {7, 0, 31, 0, 70}, 3035 {15, 0, 31, 0, 70},
2993 {7, 0, 31, 0, 68}, 3036 {15, 0, 31, 0, 68},
2994 {7, 0, 30, 0, 67}, 3037 {15, 0, 30, 0, 68},
2995 {7, 0, 29, 0, 68}, 3038 {15, 0, 29, 0, 69},
2996 {7, 0, 28, 0, 68}, 3039 {15, 0, 28, 0, 69},
2997 {7, 0, 27, 0, 69}, 3040 {15, 0, 27, 0, 70},
2998 {7, 0, 26, 0, 70}, 3041 {15, 0, 26, 0, 70},
2999 {7, 0, 25, 0, 70}, 3042 {15, 0, 25, 0, 71},
3000 {7, 0, 24, 0, 71}, 3043 {15, 0, 24, 0, 72},
3001 {7, 0, 23, 0, 72}, 3044 {15, 0, 23, 0, 73},
3002 {7, 0, 23, 0, 70}, 3045 {15, 0, 23, 0, 71},
3003 {7, 0, 22, 0, 71}, 3046 {15, 0, 22, 0, 72},
3004 {7, 0, 21, 0, 72}, 3047 {15, 0, 21, 0, 73},
3005 {7, 0, 21, 0, 70}, 3048 {15, 0, 21, 0, 71},
3006 {7, 0, 21, 0, 68}, 3049 {15, 0, 21, 0, 69},
3007 {7, 0, 21, 0, 66}, 3050 {15, 0, 21, 0, 67},
3008 {7, 0, 21, 0, 64}, 3051 {15, 0, 21, 0, 65},
3009 {7, 0, 21, 0, 63}, 3052 {15, 0, 21, 0, 63},
3010 {7, 0, 20, 0, 64}, 3053 {15, 0, 20, 0, 65},
3011 {7, 0, 19, 0, 65}, 3054 {15, 0, 19, 0, 66},
3012 {7, 0, 19, 0, 64}, 3055 {15, 0, 19, 0, 64},
3013 {7, 0, 18, 0, 65}, 3056 {15, 0, 18, 0, 66},
3014 {7, 0, 18, 0, 64}, 3057 {15, 0, 18, 0, 64},
3015 {7, 0, 17, 0, 65}, 3058 {15, 0, 17, 0, 66},
3016 {7, 0, 17, 0, 64}, 3059 {15, 0, 17, 0, 64},
3017 {7, 0, 16, 0, 65}, 3060 {15, 0, 16, 0, 66},
3018 {7, 0, 16, 0, 64}, 3061 {15, 0, 16, 0, 64},
3019 {7, 0, 16, 0, 62}, 3062 {15, 0, 16, 0, 62},
3020 {7, 0, 16, 0, 60}, 3063 {15, 0, 16, 0, 61},
3021 {7, 0, 16, 0, 58}, 3064 {15, 0, 16, 0, 59},
3022 {7, 0, 15, 0, 61}, 3065 {15, 0, 15, 0, 61},
3023 {7, 0, 15, 0, 59}, 3066 {15, 0, 15, 0, 59},
3024 {7, 0, 14, 0, 61}, 3067 {15, 0, 14, 0, 62},
3025 {7, 0, 14, 0, 60}, 3068 {15, 0, 14, 0, 60},
3026 {7, 0, 14, 0, 58}, 3069 {15, 0, 14, 0, 58},
3027 {7, 0, 13, 0, 60}, 3070 {15, 0, 13, 0, 61},
3028 {7, 0, 13, 0, 59}, 3071 {15, 0, 13, 0, 59},
3029 {7, 0, 12, 0, 62}, 3072 {15, 0, 12, 0, 62},
3030 {7, 0, 12, 0, 60}, 3073 {15, 0, 12, 0, 61},
3031 {7, 0, 12, 0, 58}, 3074 {15, 0, 12, 0, 59},
3032 {7, 0, 11, 0, 62}, 3075 {15, 0, 11, 0, 62},
3033 {7, 0, 11, 0, 60}, 3076 {15, 0, 11, 0, 61},
3034 {7, 0, 11, 0, 59}, 3077 {15, 0, 11, 0, 59},
3035 {7, 0, 11, 0, 57}, 3078 {15, 0, 11, 0, 57},
3036 {7, 0, 10, 0, 61}, 3079 {15, 0, 10, 0, 61},
3037 {7, 0, 10, 0, 59}, 3080 {15, 0, 10, 0, 59},
3038 {7, 0, 10, 0, 57}, 3081 {15, 0, 10, 0, 58},
3039 {7, 0, 9, 0, 62}, 3082 {15, 0, 9, 0, 62},
3040 {7, 0, 9, 0, 60}, 3083 {15, 0, 9, 0, 61},
3041 {7, 0, 9, 0, 58}, 3084 {15, 0, 9, 0, 59},
3042 {7, 0, 9, 0, 57}, 3085 {15, 0, 9, 0, 57},
3043 {7, 0, 8, 0, 62}, 3086 {15, 0, 8, 0, 62},
3044 {7, 0, 8, 0, 60}, 3087 {15, 0, 8, 0, 61},
3045 {7, 0, 8, 0, 58}, 3088 {15, 0, 8, 0, 59},
3046 {7, 0, 8, 0, 57}, 3089 {15, 0, 8, 0, 57},
3047 {7, 0, 8, 0, 55}, 3090 {15, 0, 8, 0, 56},
3048 {7, 0, 7, 0, 61}, 3091 {15, 0, 8, 0, 54},
3092 {15, 0, 8, 0, 53},
3093 {15, 0, 8, 0, 51},
3094 {15, 0, 8, 0, 50},
3095 {7, 0, 7, 0, 69},
3096 {7, 0, 7, 0, 67},
3097 {7, 0, 7, 0, 65},
3098 {7, 0, 7, 0, 64},
3099 {7, 0, 7, 0, 62},
3049 {7, 0, 7, 0, 60}, 3100 {7, 0, 7, 0, 60},
3050 {7, 0, 7, 0, 58}, 3101 {7, 0, 7, 0, 58},
3051 {7, 0, 7, 0, 56}, 3102 {7, 0, 7, 0, 57},
3052 {7, 0, 7, 0, 55}, 3103 {7, 0, 7, 0, 55},
3053 {7, 0, 6, 0, 62}, 3104 {7, 0, 6, 0, 62},
3054 {7, 0, 6, 0, 60}, 3105 {7, 0, 6, 0, 61},
3055 {7, 0, 6, 0, 58}, 3106 {7, 0, 6, 0, 59},
3056 {7, 0, 6, 0, 57}, 3107 {7, 0, 6, 0, 57},
3057 {7, 0, 6, 0, 55}, 3108 {7, 0, 6, 0, 56},
3058 {7, 0, 6, 0, 54}, 3109 {7, 0, 6, 0, 54},
3059 {7, 0, 6, 0, 52}, 3110 {7, 0, 6, 0, 53},
3060 {7, 0, 5, 0, 61}, 3111 {7, 0, 5, 0, 61},
3061 {7, 0, 5, 0, 59}, 3112 {7, 0, 5, 0, 60},
3062 {7, 0, 5, 0, 57}, 3113 {7, 0, 5, 0, 58},
3063 {7, 0, 5, 0, 56}, 3114 {7, 0, 5, 0, 56},
3064 {7, 0, 5, 0, 54}, 3115 {7, 0, 5, 0, 55},
3065 {7, 0, 5, 0, 53}, 3116 {7, 0, 5, 0, 53},
3066 {7, 0, 5, 0, 51}, 3117 {7, 0, 5, 0, 52},
3067 {7, 0, 4, 0, 62}, 3118 {7, 0, 5, 0, 50},
3068 {7, 0, 4, 0, 60}, 3119 {7, 0, 5, 0, 49},
3069 {7, 0, 4, 0, 58}, 3120 {7, 0, 5, 0, 47},
3070 {7, 0, 4, 0, 57}, 3121 {7, 0, 4, 0, 57},
3071 {7, 0, 4, 0, 55}, 3122 {7, 0, 4, 0, 56},
3072 {7, 0, 4, 0, 54}, 3123 {7, 0, 4, 0, 54},
3073 {7, 0, 4, 0, 52}, 3124 {7, 0, 4, 0, 53},
3074 {7, 0, 4, 0, 51}, 3125 {7, 0, 4, 0, 51},
3075 {7, 0, 4, 0, 49}, 3126 {7, 0, 4, 0, 50},
3076 {7, 0, 4, 0, 48}, 3127 {7, 0, 4, 0, 48},
3128 {7, 0, 4, 0, 47},
3077 {7, 0, 4, 0, 46}, 3129 {7, 0, 4, 0, 46},
3078 {7, 0, 3, 0, 60}, 3130 {7, 0, 4, 0, 44},
3079 {7, 0, 3, 0, 58}, 3131 {7, 0, 4, 0, 43},
3080 {7, 0, 3, 0, 57}, 3132 {7, 0, 4, 0, 42},
3081 {7, 0, 3, 0, 55}, 3133 {7, 0, 4, 0, 41},
3082 {7, 0, 3, 0, 54}, 3134 {7, 0, 4, 0, 40},
3083 {7, 0, 3, 0, 52},
3084 {7, 0, 3, 0, 51}, 3135 {7, 0, 3, 0, 51},
3085 {7, 0, 3, 0, 49}, 3136 {7, 0, 3, 0, 50},
3086 {7, 0, 3, 0, 48}, 3137 {7, 0, 3, 0, 48},
3138 {7, 0, 3, 0, 47},
3087 {7, 0, 3, 0, 46}, 3139 {7, 0, 3, 0, 46},
3088 {7, 0, 3, 0, 45},
3089 {7, 0, 3, 0, 44}, 3140 {7, 0, 3, 0, 44},
3090 {7, 0, 3, 0, 43}, 3141 {7, 0, 3, 0, 43},
3142 {7, 0, 3, 0, 42},
3091 {7, 0, 3, 0, 41}, 3143 {7, 0, 3, 0, 41},
3092 {7, 0, 2, 0, 61}, 3144 {3, 0, 3, 0, 56},
3093 {7, 0, 2, 0, 59}, 3145 {3, 0, 3, 0, 54},
3094 {7, 0, 2, 0, 57}, 3146 {3, 0, 3, 0, 53},
3095 {7, 0, 2, 0, 56}, 3147 {3, 0, 3, 0, 51},
3096 {7, 0, 2, 0, 54}, 3148 {3, 0, 3, 0, 50},
3097 {7, 0, 2, 0, 53}, 3149 {3, 0, 3, 0, 48},
3098 {7, 0, 2, 0, 51}, 3150 {3, 0, 3, 0, 47},
3099 {7, 0, 2, 0, 50}, 3151 {3, 0, 3, 0, 46},
3100 {7, 0, 2, 0, 48}, 3152 {3, 0, 3, 0, 44},
3101 {7, 0, 2, 0, 47}, 3153 {3, 0, 3, 0, 43},
3102 {7, 0, 2, 0, 46}, 3154 {3, 0, 3, 0, 42},
3103 {7, 0, 2, 0, 44}, 3155 {3, 0, 3, 0, 41},
3104 {7, 0, 2, 0, 43}, 3156 {3, 0, 3, 0, 39},
3105 {7, 0, 2, 0, 42}, 3157 {3, 0, 3, 0, 38},
3106 {7, 0, 2, 0, 41}, 3158 {3, 0, 3, 0, 37},
3107 {7, 0, 2, 0, 39}, 3159 {3, 0, 3, 0, 36},
3108 {7, 0, 2, 0, 38}, 3160 {3, 0, 3, 0, 35},
3109 {7, 0, 2, 0, 37}, 3161 {3, 0, 3, 0, 34},
3110 {7, 0, 2, 0, 36},
3111 {7, 0, 2, 0, 35},
3112 {7, 0, 2, 0, 34},
3113 {7, 0, 2, 0, 33},
3114 {7, 0, 2, 0, 32},
3115 {7, 0, 1, 0, 63},
3116 {7, 0, 1, 0, 61},
3117 {7, 0, 1, 0, 59},
3118 {7, 0, 1, 0, 57},
3119}; 3162};
3120 3163
3121const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = { 3164const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
index 5f75e16bf5a7..489422a36085 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
@@ -20,6 +20,7 @@
20extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[]; 20extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[];
21extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0; 21extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0;
22extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313; 22extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313;
23extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
23extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa; 24extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa;
24extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo; 25extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo;
25extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa; 26extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c
index c1ec2a4dd8c0..92d299aa257c 100644
--- a/drivers/net/wireless/cw1200/bh.c
+++ b/drivers/net/wireless/cw1200/bh.c
@@ -465,8 +465,8 @@ static int cw1200_bh(void *arg)
465 (rx || tx || term || suspend || priv->bh_error); 465 (rx || tx || term || suspend || priv->bh_error);
466 }), status); 466 }), status);
467 467
468 pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n", 468 pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
469 rx, tx, term, suspend, status); 469 rx, tx, term, suspend, priv->bh_error, status);
470 470
471 /* Did an error occur? */ 471 /* Did an error occur? */
472 if ((status < 0 && status != -ERESTARTSYS) || 472 if ((status < 0 && status != -ERESTARTSYS) ||
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3724e739cbf4..090f01577dd2 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -507,7 +507,7 @@ u32 cw1200_dpll_from_clk(u16 clk_khz)
507 case 0xCB20: /* 52000 KHz */ 507 case 0xCB20: /* 52000 KHz */
508 return 0x07627091; 508 return 0x07627091;
509 default: 509 default:
510 pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n", 510 pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n",
511 clk_khz); 511 clk_khz);
512 return 0x0EC4F121; 512 return 0x0EC4F121;
513 } 513 }
diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h
index 7afc613c3706..48086e849515 100644
--- a/drivers/net/wireless/cw1200/wsm.h
+++ b/drivers/net/wireless/cw1200/wsm.h
@@ -832,7 +832,7 @@ struct wsm_tx {
832 /* the MSDU shall be terminated. Overrides the global */ 832 /* the MSDU shall be terminated. Overrides the global */
833 /* dot11MaxTransmitMsduLifeTime setting [optional] */ 833 /* dot11MaxTransmitMsduLifeTime setting [optional] */
834 /* Device will set the default value if this is 0. */ 834 /* Device will set the default value if this is 0. */
835 u32 expire_time; 835 __le32 expire_time;
836 836
837 /* WSM_HT_TX_... */ 837 /* WSM_HT_TX_... */
838 __le32 ht_tx_parameters; 838 __le32 ht_tx_parameters;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6307a4e36c85..c275dc1623fe 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1425,7 +1425,7 @@ static int prism2_hw_init2(struct net_device *dev, int initial)
1425 } 1425 }
1426 list_for_each(ptr, &local->hostap_interfaces) { 1426 list_for_each(ptr, &local->hostap_interfaces) {
1427 iface = list_entry(ptr, struct hostap_interface, list); 1427 iface = list_entry(ptr, struct hostap_interface, list);
1428 memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN); 1428 eth_hw_addr_inherit(iface->dev, dev);
1429 } 1429 }
1430 } else if (local->fw_ap) 1430 } else if (local->fw_ap)
1431 prism2_check_sta_fw_version(local); 1431 prism2_check_sta_fw_version(local);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 15f0fad39add..a1257c92afc4 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -66,7 +66,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
66 list_add(&iface->list, &local->hostap_interfaces); 66 list_add(&iface->list, &local->hostap_interfaces);
67 67
68 mdev = local->dev; 68 mdev = local->dev;
69 memcpy(dev->dev_addr, mdev->dev_addr, ETH_ALEN); 69 eth_hw_addr_inherit(dev, mdev);
70 dev->base_addr = mdev->base_addr; 70 dev->base_addr = mdev->base_addr;
71 dev->irq = mdev->irq; 71 dev->irq = mdev->irq;
72 dev->mem_start = mdev->mem_start; 72 dev->mem_start = mdev->mem_start;
@@ -667,7 +667,7 @@ static int prism2_open(struct net_device *dev)
667 if (local->no_pri) { 667 if (local->no_pri) {
668 printk(KERN_DEBUG "%s: could not set interface UP - no PRI " 668 printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
669 "f/w\n", dev->name); 669 "f/w\n", dev->name);
670 return 1; 670 return -ENODEV;
671 } 671 }
672 672
673 if ((local->func->card_present && !local->func->card_present(local)) || 673 if ((local->func->card_present && !local->func->card_present(local)) ||
@@ -682,7 +682,7 @@ static int prism2_open(struct net_device *dev)
682 printk(KERN_WARNING "%s: could not enable MAC port\n", 682 printk(KERN_WARNING "%s: could not enable MAC port\n",
683 dev->name); 683 dev->name);
684 prism2_close(dev); 684 prism2_close(dev);
685 return 1; 685 return -ENODEV;
686 } 686 }
687 if (!local->dev_enabled) 687 if (!local->dev_enabled)
688 prism2_callback(local, PRISM2_CALLBACK_ENABLE); 688 prism2_callback(local, PRISM2_CALLBACK_ENABLE);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index fe31590a51b2..aea667b430c3 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -887,6 +887,7 @@ il3945_remove_debugfs(void *il, void *il_sta)
887 */ 887 */
888static void 888static void
889il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, 889il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
890 struct cfg80211_chan_def *chandef,
890 struct ieee80211_sta *sta, void *il_sta) 891 struct ieee80211_sta *sta, void *il_sta)
891{ 892{
892} 893}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index c092033945cc..f09e257759d5 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -475,6 +475,8 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
475 } 475 }
476} 476}
477 477
478#define SMALL_PACKET_SIZE 256
479
478static void 480static void
479il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, 481il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
480 struct ieee80211_rx_status *stats) 482 struct ieee80211_rx_status *stats)
@@ -483,14 +485,13 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
483 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt); 485 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
484 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); 486 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
485 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); 487 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
486 u16 len = le16_to_cpu(rx_hdr->len); 488 u32 len = le16_to_cpu(rx_hdr->len);
487 struct sk_buff *skb; 489 struct sk_buff *skb;
488 __le16 fc = hdr->frame_control; 490 __le16 fc = hdr->frame_control;
491 u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
489 492
490 /* We received data from the HW, so stop the watchdog */ 493 /* We received data from the HW, so stop the watchdog */
491 if (unlikely 494 if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
492 (len + IL39_RX_FRAME_SIZE >
493 PAGE_SIZE << il->hw_params.rx_page_order)) {
494 D_DROP("Corruption detected!\n"); 495 D_DROP("Corruption detected!\n");
495 return; 496 return;
496 } 497 }
@@ -506,26 +507,32 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
506 D_INFO("Woke queues - frame received on passive channel\n"); 507 D_INFO("Woke queues - frame received on passive channel\n");
507 } 508 }
508 509
509 skb = dev_alloc_skb(128); 510 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
510 if (!skb) { 511 if (!skb) {
511 IL_ERR("dev_alloc_skb failed\n"); 512 IL_ERR("dev_alloc_skb failed\n");
512 return; 513 return;
513 } 514 }
514 515
515 if (!il3945_mod_params.sw_crypto) 516 if (!il3945_mod_params.sw_crypto)
516 il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb), 517 il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
517 le32_to_cpu(rx_end->status), stats); 518 le32_to_cpu(rx_end->status), stats);
518 519
519 skb_add_rx_frag(skb, 0, rxb->page, 520 /* If frame is small enough to fit into skb->head, copy it
520 (void *)rx_hdr->payload - (void *)pkt, len, 521 * and do not consume a full page
521 len); 522 */
522 523 if (len <= SMALL_PACKET_SIZE) {
524 memcpy(skb_put(skb, len), rx_hdr->payload, len);
525 } else {
526 skb_add_rx_frag(skb, 0, rxb->page,
527 (void *)rx_hdr->payload - (void *)pkt, len,
528 fraglen);
529 il->alloc_rxb_page--;
530 rxb->page = NULL;
531 }
523 il_update_stats(il, false, fc, len); 532 il_update_stats(il, false, fc, len);
524 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 533 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
525 534
526 ieee80211_rx(il->hw, skb); 535 ieee80211_rx(il->hw, skb);
527 il->alloc_rxb_page--;
528 rxb->page = NULL;
529} 536}
530 537
531#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 538#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7acf5ee23582..5ab50a5b48b1 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -574,9 +574,11 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
574 return decrypt_out; 574 return decrypt_out;
575} 575}
576 576
577#define SMALL_PACKET_SIZE 256
578
577static void 579static void
578il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, 580il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
579 u16 len, u32 ampdu_status, struct il_rx_buf *rxb, 581 u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
580 struct ieee80211_rx_status *stats) 582 struct ieee80211_rx_status *stats)
581{ 583{
582 struct sk_buff *skb; 584 struct sk_buff *skb;
@@ -598,21 +600,25 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
598 il_set_decrypted_flag(il, hdr, ampdu_status, stats)) 600 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
599 return; 601 return;
600 602
601 skb = dev_alloc_skb(128); 603 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
602 if (!skb) { 604 if (!skb) {
603 IL_ERR("dev_alloc_skb failed\n"); 605 IL_ERR("dev_alloc_skb failed\n");
604 return; 606 return;
605 } 607 }
606 608
607 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len, 609 if (len <= SMALL_PACKET_SIZE) {
608 len); 610 memcpy(skb_put(skb, len), hdr, len);
611 } else {
612 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
613 len, PAGE_SIZE << il->hw_params.rx_page_order);
614 il->alloc_rxb_page--;
615 rxb->page = NULL;
616 }
609 617
610 il_update_stats(il, false, fc, len); 618 il_update_stats(il, false, fc, len);
611 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 619 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
612 620
613 ieee80211_rx(il->hw, skb); 621 ieee80211_rx(il->hw, skb);
614 il->alloc_rxb_page--;
615 rxb->page = NULL;
616} 622}
617 623
618/* Called for N_RX (legacy ABG frames), or 624/* Called for N_RX (legacy ABG frames), or
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index ed3c42a63a43..3ccbaf791b48 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2803,6 +2803,7 @@ il4965_rs_remove_debugfs(void *il, void *il_sta)
2803 */ 2803 */
2804static void 2804static void
2805il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, 2805il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
2806 struct cfg80211_chan_def *chandef,
2806 struct ieee80211_sta *sta, void *il_sta) 2807 struct ieee80211_sta *sta, void *il_sta)
2807{ 2808{
2808} 2809}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index cbaa5c2c410f..3eb2102ce236 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -22,6 +22,8 @@ config IWLWIFI
22 Intel Wireless WiFi Link 6150BGN 2 Adapter 22 Intel Wireless WiFi Link 6150BGN 2 Adapter
23 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) 23 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
24 Intel 2000 Series Wi-Fi Adapters 24 Intel 2000 Series Wi-Fi Adapters
25 Intel 7260 Wi-Fi Adapter
26 Intel 3160 Wi-Fi Adapter
25 27
26 28
27 This driver uses the kernel's mac80211 subsystem. 29 This driver uses the kernel's mac80211 subsystem.
@@ -46,17 +48,16 @@ config IWLDVM
46 depends on IWLWIFI 48 depends on IWLWIFI
47 default IWLWIFI 49 default IWLWIFI
48 help 50 help
49 This is the driver supporting the DVM firmware which is 51 This is the driver that supports the DVM firmware which is
50 currently the only firmware available for existing devices. 52 used by most existing devices (with the exception of 7260
53 and 3160).
51 54
52config IWLMVM 55config IWLMVM
53 tristate "Intel Wireless WiFi MVM Firmware support" 56 tristate "Intel Wireless WiFi MVM Firmware support"
54 depends on IWLWIFI 57 depends on IWLWIFI
55 help 58 help
56 This is the driver supporting the MVM firmware which is 59 This is the driver that supports the MVM firmware which is
57 currently only available for 7000 series devices. 60 currently only available for 7260 and 3160 devices.
58
59 Say yes if you have such a device.
60 61
61# don't call it _MODULE -- will confuse Kconfig/fixdep/... 62# don't call it _MODULE -- will confuse Kconfig/fixdep/...
62config IWLWIFI_OPMODE_MODULAR 63config IWLWIFI_OPMODE_MODULAR
@@ -127,20 +128,3 @@ config IWLWIFI_DEVICE_TRACING
127 If unsure, say Y so we can help you better when problems 128 If unsure, say Y so we can help you better when problems
128 occur. 129 occur.
129endmenu 130endmenu
130
131config IWLWIFI_P2P
132 def_bool y
133 bool "iwlwifi experimental P2P support"
134 depends on IWLWIFI
135 help
136 This option enables experimental P2P support for some devices
137 based on microcode support. Since P2P support is still under
138 development, this option may even enable it for some devices
139 now that turn out to not support it in the future due to
140 microcode restrictions.
141
142 To determine if your microcode supports the experimental P2P
143 offered by this option, check if the driver advertises AP
144 support when it is loaded.
145
146 Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 18355110deff..f2a86ffc3b4c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -106,7 +106,6 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
106#define STATUS_CHANNEL_SWITCH_PENDING 11 106#define STATUS_CHANNEL_SWITCH_PENDING 11
107#define STATUS_SCAN_COMPLETE 12 107#define STATUS_SCAN_COMPLETE 12
108#define STATUS_POWER_PMI 13 108#define STATUS_POWER_PMI 13
109#define STATUS_SCAN_ROC_EXPIRED 14
110 109
111struct iwl_ucode_capabilities; 110struct iwl_ucode_capabilities;
112 111
@@ -250,7 +249,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
250 249
251/* scan */ 250/* scan */
252void iwlagn_post_scan(struct iwl_priv *priv); 251void iwlagn_post_scan(struct iwl_priv *priv);
253void iwlagn_disable_roc(struct iwl_priv *priv);
254int iwl_force_rf_reset(struct iwl_priv *priv, bool external); 252int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
255void iwl_init_scan_params(struct iwl_priv *priv); 253void iwl_init_scan_params(struct iwl_priv *priv);
256int iwl_scan_cancel(struct iwl_priv *priv); 254int iwl_scan_cancel(struct iwl_priv *priv);
@@ -265,10 +263,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
265 enum iwl_scan_type scan_type, 263 enum iwl_scan_type scan_type,
266 enum ieee80211_band band); 264 enum ieee80211_band band);
267 265
268void iwl_scan_roc_expired(struct iwl_priv *priv);
269void iwl_scan_offchannel_skb(struct iwl_priv *priv);
270void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
271
272/* For faster active scanning, scan will move to the next channel if fewer than 266/* For faster active scanning, scan will move to the next channel if fewer than
273 * PLCP_QUIET_THRESH packets are heard on this channel within 267 * PLCP_QUIET_THRESH packets are heard on this channel within
274 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell 268 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d5329489245a..d94f8ab15004 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -69,19 +69,7 @@
69} while (0) 69} while (0)
70 70
71/* file operation */ 71/* file operation */
72#define DEBUGFS_READ_FUNC(name) \
73static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
74 char __user *user_buf, \
75 size_t count, loff_t *ppos);
76
77#define DEBUGFS_WRITE_FUNC(name) \
78static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
79 const char __user *user_buf, \
80 size_t count, loff_t *ppos);
81
82
83#define DEBUGFS_READ_FILE_OPS(name) \ 72#define DEBUGFS_READ_FILE_OPS(name) \
84 DEBUGFS_READ_FUNC(name); \
85static const struct file_operations iwl_dbgfs_##name##_ops = { \ 73static const struct file_operations iwl_dbgfs_##name##_ops = { \
86 .read = iwl_dbgfs_##name##_read, \ 74 .read = iwl_dbgfs_##name##_read, \
87 .open = simple_open, \ 75 .open = simple_open, \
@@ -89,7 +77,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
89}; 77};
90 78
91#define DEBUGFS_WRITE_FILE_OPS(name) \ 79#define DEBUGFS_WRITE_FILE_OPS(name) \
92 DEBUGFS_WRITE_FUNC(name); \
93static const struct file_operations iwl_dbgfs_##name##_ops = { \ 80static const struct file_operations iwl_dbgfs_##name##_ops = { \
94 .write = iwl_dbgfs_##name##_write, \ 81 .write = iwl_dbgfs_##name##_write, \
95 .open = simple_open, \ 82 .open = simple_open, \
@@ -98,8 +85,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
98 85
99 86
100#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 87#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
101 DEBUGFS_READ_FUNC(name); \
102 DEBUGFS_WRITE_FUNC(name); \
103static const struct file_operations iwl_dbgfs_##name##_ops = { \ 88static const struct file_operations iwl_dbgfs_##name##_ops = { \
104 .write = iwl_dbgfs_##name##_write, \ 89 .write = iwl_dbgfs_##name##_write, \
105 .read = iwl_dbgfs_##name##_read, \ 90 .read = iwl_dbgfs_##name##_read, \
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 60a4e0d15715..a79fdd137f95 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -540,7 +540,6 @@ struct iwl_rxon_context {
540enum iwl_scan_type { 540enum iwl_scan_type {
541 IWL_SCAN_NORMAL, 541 IWL_SCAN_NORMAL,
542 IWL_SCAN_RADIO_RESET, 542 IWL_SCAN_RADIO_RESET,
543 IWL_SCAN_ROC,
544}; 543};
545 544
546/** 545/**
@@ -825,12 +824,6 @@ struct iwl_priv {
825 struct reply_tx_error_statistics reply_tx_stats; 824 struct reply_tx_error_statistics reply_tx_stats;
826 struct reply_agg_tx_error_statistics reply_agg_tx_stats; 825 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
827 826
828 /* remain-on-channel offload support */
829 struct ieee80211_channel *hw_roc_channel;
830 struct delayed_work hw_roc_disable_work;
831 int hw_roc_duration;
832 bool hw_roc_setup, hw_roc_start_notified;
833
834 /* bt coex */ 827 /* bt coex */
835 u8 bt_enable_flag; 828 u8 bt_enable_flag;
836 u8 bt_status; 829 u8 bt_status;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 319387263e12..cae4d3182e33 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -76,29 +76,6 @@ static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
76 }, 76 },
77}; 77};
78 78
79static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
80 {
81 .max = 1,
82 .types = BIT(NL80211_IFTYPE_STATION),
83 },
84 {
85 .max = 1,
86 .types = BIT(NL80211_IFTYPE_P2P_GO) |
87 BIT(NL80211_IFTYPE_AP),
88 },
89};
90
91static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
92 {
93 .max = 2,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
99 },
100};
101
102static const struct ieee80211_iface_combination 79static const struct ieee80211_iface_combination
103iwlagn_iface_combinations_dualmode[] = { 80iwlagn_iface_combinations_dualmode[] = {
104 { .num_different_channels = 1, 81 { .num_different_channels = 1,
@@ -114,21 +91,6 @@ iwlagn_iface_combinations_dualmode[] = {
114 }, 91 },
115}; 92};
116 93
117static const struct ieee80211_iface_combination
118iwlagn_iface_combinations_p2p[] = {
119 { .num_different_channels = 1,
120 .max_interfaces = 2,
121 .beacon_int_infra_match = true,
122 .limits = iwlagn_p2p_sta_go_limits,
123 .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
124 },
125 { .num_different_channels = 1,
126 .max_interfaces = 2,
127 .limits = iwlagn_p2p_2sta_limits,
128 .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
129 },
130};
131
132/* 94/*
133 * Not a mac80211 entry point function, but it fits in with all the 95 * Not a mac80211 entry point function, but it fits in with all the
134 * other mac80211 functions grouped here. 96 * other mac80211 functions grouped here.
@@ -186,19 +148,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
186 148
187 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 149 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
188 150
189 if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { 151 if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
190 hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
191 hw->wiphy->n_iface_combinations =
192 ARRAY_SIZE(iwlagn_iface_combinations_p2p);
193 } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
194 hw->wiphy->iface_combinations = 152 hw->wiphy->iface_combinations =
195 iwlagn_iface_combinations_dualmode; 153 iwlagn_iface_combinations_dualmode;
196 hw->wiphy->n_iface_combinations = 154 hw->wiphy->n_iface_combinations =
197 ARRAY_SIZE(iwlagn_iface_combinations_dualmode); 155 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
198 } 156 }
199 157
200 hw->wiphy->max_remain_on_channel_duration = 500;
201
202 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 158 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
203 WIPHY_FLAG_DISABLE_BEACON_HINTS | 159 WIPHY_FLAG_DISABLE_BEACON_HINTS |
204 WIPHY_FLAG_IBSS_RSN; 160 WIPHY_FLAG_IBSS_RSN;
@@ -1159,126 +1115,6 @@ done:
1159 IWL_DEBUG_MAC80211(priv, "leave\n"); 1115 IWL_DEBUG_MAC80211(priv, "leave\n");
1160} 1116}
1161 1117
1162static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1163 struct ieee80211_vif *vif,
1164 struct ieee80211_channel *channel,
1165 int duration,
1166 enum ieee80211_roc_type type)
1167{
1168 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1169 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
1170 int err = 0;
1171
1172 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1173 return -EOPNOTSUPP;
1174
1175 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
1176 return -EOPNOTSUPP;
1177
1178 IWL_DEBUG_MAC80211(priv, "enter\n");
1179 mutex_lock(&priv->mutex);
1180
1181 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1182 /* mac80211 should not scan while ROC or ROC while scanning */
1183 if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
1184 err = -EBUSY;
1185 goto out;
1186 }
1187
1188 iwl_scan_cancel_timeout(priv, 100);
1189
1190 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1191 err = -EBUSY;
1192 goto out;
1193 }
1194 }
1195
1196 priv->hw_roc_channel = channel;
1197 /* convert from ms to TU */
1198 priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
1199 priv->hw_roc_start_notified = false;
1200 cancel_delayed_work(&priv->hw_roc_disable_work);
1201
1202 if (!ctx->is_active) {
1203 static const struct iwl_qos_info default_qos_data = {
1204 .def_qos_parm = {
1205 .ac[0] = {
1206 .cw_min = cpu_to_le16(3),
1207 .cw_max = cpu_to_le16(7),
1208 .aifsn = 2,
1209 .edca_txop = cpu_to_le16(1504),
1210 },
1211 .ac[1] = {
1212 .cw_min = cpu_to_le16(7),
1213 .cw_max = cpu_to_le16(15),
1214 .aifsn = 2,
1215 .edca_txop = cpu_to_le16(3008),
1216 },
1217 .ac[2] = {
1218 .cw_min = cpu_to_le16(15),
1219 .cw_max = cpu_to_le16(1023),
1220 .aifsn = 3,
1221 },
1222 .ac[3] = {
1223 .cw_min = cpu_to_le16(15),
1224 .cw_max = cpu_to_le16(1023),
1225 .aifsn = 7,
1226 },
1227 },
1228 };
1229
1230 ctx->is_active = true;
1231 ctx->qos_data = default_qos_data;
1232 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
1233 memcpy(ctx->staging.node_addr,
1234 priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
1235 ETH_ALEN);
1236 memcpy(ctx->staging.bssid_addr,
1237 priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
1238 ETH_ALEN);
1239 err = iwlagn_commit_rxon(priv, ctx);
1240 if (err)
1241 goto out;
1242 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
1243 RXON_FILTER_PROMISC_MSK |
1244 RXON_FILTER_CTL2HOST_MSK;
1245
1246 err = iwlagn_commit_rxon(priv, ctx);
1247 if (err) {
1248 iwlagn_disable_roc(priv);
1249 goto out;
1250 }
1251 priv->hw_roc_setup = true;
1252 }
1253
1254 err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
1255 if (err)
1256 iwlagn_disable_roc(priv);
1257
1258 out:
1259 mutex_unlock(&priv->mutex);
1260 IWL_DEBUG_MAC80211(priv, "leave\n");
1261
1262 return err;
1263}
1264
1265static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1266{
1267 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1268
1269 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1270 return -EOPNOTSUPP;
1271
1272 IWL_DEBUG_MAC80211(priv, "enter\n");
1273 mutex_lock(&priv->mutex);
1274 iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
1275 iwlagn_disable_roc(priv);
1276 mutex_unlock(&priv->mutex);
1277 IWL_DEBUG_MAC80211(priv, "leave\n");
1278
1279 return 0;
1280}
1281
1282static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, 1118static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1283 struct ieee80211_vif *vif, 1119 struct ieee80211_vif *vif,
1284 enum ieee80211_rssi_event rssi_event) 1120 enum ieee80211_rssi_event rssi_event)
@@ -1434,12 +1270,8 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1434 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1270 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1435 viftype, vif->addr); 1271 viftype, vif->addr);
1436 1272
1437 cancel_delayed_work_sync(&priv->hw_roc_disable_work);
1438
1439 mutex_lock(&priv->mutex); 1273 mutex_lock(&priv->mutex);
1440 1274
1441 iwlagn_disable_roc(priv);
1442
1443 if (!iwl_is_ready_rf(priv)) { 1275 if (!iwl_is_ready_rf(priv)) {
1444 IWL_WARN(priv, "Try to add interface when device not ready\n"); 1276 IWL_WARN(priv, "Try to add interface when device not ready\n");
1445 err = -EINVAL; 1277 err = -EINVAL;
@@ -1766,8 +1598,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
1766 .channel_switch = iwlagn_mac_channel_switch, 1598 .channel_switch = iwlagn_mac_channel_switch,
1767 .flush = iwlagn_mac_flush, 1599 .flush = iwlagn_mac_flush,
1768 .tx_last_beacon = iwlagn_mac_tx_last_beacon, 1600 .tx_last_beacon = iwlagn_mac_tx_last_beacon,
1769 .remain_on_channel = iwlagn_mac_remain_on_channel,
1770 .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
1771 .rssi_callback = iwlagn_mac_rssi_callback, 1601 .rssi_callback = iwlagn_mac_rssi_callback,
1772 .set_tim = iwlagn_mac_set_tim, 1602 .set_tim = iwlagn_mac_set_tim,
1773}; 1603};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 1531a4fc0960..7aad766865cf 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -587,11 +587,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
587 priv->contexts[IWL_RXON_CTX_PAN].interface_modes = 587 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
588 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); 588 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
589 589
590 if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
591 priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
592 BIT(NL80211_IFTYPE_P2P_CLIENT) |
593 BIT(NL80211_IFTYPE_P2P_GO);
594
595 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; 590 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
596 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; 591 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
597 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; 592 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -854,14 +849,6 @@ void iwl_down(struct iwl_priv *priv)
854 849
855 iwl_scan_cancel_timeout(priv, 200); 850 iwl_scan_cancel_timeout(priv, 200);
856 851
857 /*
858 * If active, scanning won't cancel it, so say it expired.
859 * No race since we hold the mutex here and a new one
860 * can't come in at this time.
861 */
862 if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
863 ieee80211_remain_on_channel_expired(priv->hw);
864
865 exit_pending = 852 exit_pending =
866 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 853 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
867 854
@@ -1002,41 +989,6 @@ static void iwl_bg_restart(struct work_struct *data)
1002 } 989 }
1003} 990}
1004 991
1005
1006
1007
1008void iwlagn_disable_roc(struct iwl_priv *priv)
1009{
1010 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
1011
1012 lockdep_assert_held(&priv->mutex);
1013
1014 if (!priv->hw_roc_setup)
1015 return;
1016
1017 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
1018 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1019
1020 priv->hw_roc_channel = NULL;
1021
1022 memset(ctx->staging.node_addr, 0, ETH_ALEN);
1023
1024 iwlagn_commit_rxon(priv, ctx);
1025
1026 ctx->is_active = false;
1027 priv->hw_roc_setup = false;
1028}
1029
1030static void iwlagn_disable_roc_work(struct work_struct *work)
1031{
1032 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1033 hw_roc_disable_work.work);
1034
1035 mutex_lock(&priv->mutex);
1036 iwlagn_disable_roc(priv);
1037 mutex_unlock(&priv->mutex);
1038}
1039
1040/***************************************************************************** 992/*****************************************************************************
1041 * 993 *
1042 * driver setup and teardown 994 * driver setup and teardown
@@ -1053,8 +1005,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
1053 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); 1005 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
1054 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); 1006 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
1055 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); 1007 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
1056 INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
1057 iwlagn_disable_roc_work);
1058 1008
1059 iwl_setup_scan_deferred_work(priv); 1009 iwl_setup_scan_deferred_work(priv);
1060 1010
@@ -1082,7 +1032,6 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
1082 1032
1083 cancel_work_sync(&priv->bt_full_concurrency); 1033 cancel_work_sync(&priv->bt_full_concurrency);
1084 cancel_work_sync(&priv->bt_runtime_config); 1034 cancel_work_sync(&priv->bt_runtime_config);
1085 cancel_delayed_work_sync(&priv->hw_roc_disable_work);
1086 1035
1087 del_timer_sync(&priv->statistics_periodic); 1036 del_timer_sync(&priv->statistics_periodic);
1088 del_timer_sync(&priv->ucode_trace); 1037 del_timer_sync(&priv->ucode_trace);
@@ -1169,12 +1118,6 @@ static void iwl_option_config(struct iwl_priv *priv)
1169#else 1118#else
1170 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n"); 1119 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
1171#endif 1120#endif
1172
1173#ifdef CONFIG_IWLWIFI_P2P
1174 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
1175#else
1176 IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
1177#endif
1178} 1121}
1179 1122
1180static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) 1123static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
@@ -1315,10 +1258,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1315 1258
1316 ucode_flags = fw->ucode_capa.flags; 1259 ucode_flags = fw->ucode_capa.flags;
1317 1260
1318#ifndef CONFIG_IWLWIFI_P2P
1319 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1320#endif
1321
1322 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { 1261 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1323 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1262 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1324 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; 1263 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
@@ -1413,7 +1352,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1413 * if not PAN, then don't support P2P -- might be a uCode 1352 * if not PAN, then don't support P2P -- might be a uCode
1414 * packaging bug or due to the eeprom check above 1353 * packaging bug or due to the eeprom check above
1415 */ 1354 */
1416 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1417 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1355 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1418 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1356 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1419 1357
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 1b693944123b..b647e506564c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2826,9 +2826,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2826 2826
2827 lq_sta->flush_timer = 0; 2827 lq_sta->flush_timer = 0;
2828 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2828 lq_sta->supp_rates = sta->supp_rates[sband->band];
2829 for (j = 0; j < LQ_SIZE; j++)
2830 for (i = 0; i < IWL_RATE_COUNT; i++)
2831 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2832 2829
2833 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n", 2830 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
2834 sta_id); 2831 sta_id);
@@ -3319,7 +3316,8 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
3319 * station is added we ignore it. 3316 * station is added we ignore it.
3320 */ 3317 */
3321static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, 3318static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
3322 struct ieee80211_sta *sta, void *priv_sta) 3319 struct cfg80211_chan_def *chandef,
3320 struct ieee80211_sta *sta, void *priv_sta)
3323{ 3321{
3324} 3322}
3325static struct rate_control_ops rs_ops = { 3323static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index cd1ad0019185..d7ce2f12a907 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -564,11 +564,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
564 cmd.slots[0].type = 0; /* BSS */ 564 cmd.slots[0].type = 0; /* BSS */
565 cmd.slots[1].type = 1; /* PAN */ 565 cmd.slots[1].type = 1; /* PAN */
566 566
567 if (priv->hw_roc_setup) { 567 if (ctx_bss->vif && ctx_pan->vif) {
568 /* both contexts must be used for this to happen */
569 slot1 = IWL_MIN_SLOT_TIME;
570 slot0 = 3000;
571 } else if (ctx_bss->vif && ctx_pan->vif) {
572 int bcnint = ctx_pan->beacon_int; 568 int bcnint = ctx_pan->beacon_int;
573 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; 569 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
574 570
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 8c686a5b90ac..35e0ee8b4e5b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -100,9 +100,6 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
100 ieee80211_scan_completed(priv->hw, aborted); 100 ieee80211_scan_completed(priv->hw, aborted);
101 } 101 }
102 102
103 if (priv->scan_type == IWL_SCAN_ROC)
104 iwl_scan_roc_expired(priv);
105
106 priv->scan_type = IWL_SCAN_NORMAL; 103 priv->scan_type = IWL_SCAN_NORMAL;
107 priv->scan_vif = NULL; 104 priv->scan_vif = NULL;
108 priv->scan_request = NULL; 105 priv->scan_request = NULL;
@@ -130,9 +127,6 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
130 goto out_settings; 127 goto out_settings;
131 } 128 }
132 129
133 if (priv->scan_type == IWL_SCAN_ROC)
134 iwl_scan_roc_expired(priv);
135
136 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { 130 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
137 int err; 131 int err;
138 132
@@ -284,12 +278,6 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
284 le32_to_cpu(notif->tsf_low), 278 le32_to_cpu(notif->tsf_low),
285 notif->status, notif->beacon_timer); 279 notif->status, notif->beacon_timer);
286 280
287 if (priv->scan_type == IWL_SCAN_ROC &&
288 !priv->hw_roc_start_notified) {
289 ieee80211_ready_on_channel(priv->hw);
290 priv->hw_roc_start_notified = true;
291 }
292
293 return 0; 281 return 0;
294} 282}
295 283
@@ -697,8 +685,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
697 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 685 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
698 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 686 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
699 687
700 if (priv->scan_type != IWL_SCAN_ROC && 688 if (iwl_is_any_associated(priv)) {
701 iwl_is_any_associated(priv)) {
702 u16 interval = 0; 689 u16 interval = 0;
703 u32 extra; 690 u32 extra;
704 u32 suspend_time = 100; 691 u32 suspend_time = 100;
@@ -706,9 +693,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
706 693
707 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 694 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
708 switch (priv->scan_type) { 695 switch (priv->scan_type) {
709 case IWL_SCAN_ROC:
710 WARN_ON(1);
711 break;
712 case IWL_SCAN_RADIO_RESET: 696 case IWL_SCAN_RADIO_RESET:
713 interval = 0; 697 interval = 0;
714 break; 698 break;
@@ -728,11 +712,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
728 scan->suspend_time = cpu_to_le32(scan_suspend_time); 712 scan->suspend_time = cpu_to_le32(scan_suspend_time);
729 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", 713 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
730 scan_suspend_time, interval); 714 scan_suspend_time, interval);
731 } else if (priv->scan_type == IWL_SCAN_ROC) {
732 scan->suspend_time = 0;
733 scan->max_out_time = 0;
734 scan->quiet_time = 0;
735 scan->quiet_plcp_th = 0;
736 } 715 }
737 716
738 switch (priv->scan_type) { 717 switch (priv->scan_type) {
@@ -774,9 +753,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
774 } else 753 } else
775 IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); 754 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
776 break; 755 break;
777 case IWL_SCAN_ROC:
778 IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
779 break;
780 } 756 }
781 757
782 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 758 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
@@ -898,7 +874,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
898 scan_cmd_size - sizeof(*scan)); 874 scan_cmd_size - sizeof(*scan));
899 break; 875 break;
900 case IWL_SCAN_RADIO_RESET: 876 case IWL_SCAN_RADIO_RESET:
901 case IWL_SCAN_ROC:
902 /* use bcast addr, will not be transmitted but must be valid */ 877 /* use bcast addr, will not be transmitted but must be valid */
903 cmd_len = iwl_fill_probe_req( 878 cmd_len = iwl_fill_probe_req(
904 (struct ieee80211_mgmt *)scan->data, 879 (struct ieee80211_mgmt *)scan->data,
@@ -926,46 +901,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
926 is_active, n_probes, 901 is_active, n_probes,
927 (void *)&scan->data[cmd_len]); 902 (void *)&scan->data[cmd_len]);
928 break; 903 break;
929 case IWL_SCAN_ROC: {
930 struct iwl_scan_channel *scan_ch;
931 int n_chan, i;
932 u16 dwell;
933
934 dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
935 n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
936
937 scan->channel_count = n_chan;
938
939 scan_ch = (void *)&scan->data[cmd_len];
940
941 for (i = 0; i < n_chan; i++) {
942 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
943 scan_ch->channel =
944 cpu_to_le16(priv->hw_roc_channel->hw_value);
945
946 if (i == n_chan - 1)
947 dwell = priv->hw_roc_duration - i * dwell;
948
949 scan_ch->active_dwell =
950 scan_ch->passive_dwell = cpu_to_le16(dwell);
951
952 /* Set txpower levels to defaults */
953 scan_ch->dsp_atten = 110;
954
955 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
956 * power level:
957 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
958 */
959 if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
960 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
961 else
962 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
963
964 scan_ch++;
965 }
966 }
967
968 break;
969 } 904 }
970 905
971 if (scan->channel_count == 0) { 906 if (scan->channel_count == 0) {
@@ -1035,7 +970,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
1035 970
1036 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", 971 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
1037 scan_type == IWL_SCAN_NORMAL ? "" : 972 scan_type == IWL_SCAN_NORMAL ? "" :
1038 scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
1039 "internal short "); 973 "internal short ");
1040 974
1041 set_bit(STATUS_SCANNING, &priv->status); 975 set_bit(STATUS_SCANNING, &priv->status);
@@ -1149,40 +1083,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
1149 mutex_unlock(&priv->mutex); 1083 mutex_unlock(&priv->mutex);
1150 } 1084 }
1151} 1085}
1152
1153void iwl_scan_roc_expired(struct iwl_priv *priv)
1154{
1155 /*
1156 * The status bit should be set here, to prevent a race
1157 * where the atomic_read returns 1, but before the execution continues
1158 * iwl_scan_offchannel_skb_status() checks if the status bit is set
1159 */
1160 set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1161
1162 if (atomic_read(&priv->num_aux_in_flight) == 0) {
1163 ieee80211_remain_on_channel_expired(priv->hw);
1164 priv->hw_roc_channel = NULL;
1165 schedule_delayed_work(&priv->hw_roc_disable_work,
1166 10 * HZ);
1167
1168 clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1169 } else {
1170 IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
1171 atomic_read(&priv->num_aux_in_flight));
1172 }
1173}
1174
1175void iwl_scan_offchannel_skb(struct iwl_priv *priv)
1176{
1177 WARN_ON(!priv->hw_roc_start_notified);
1178 atomic_inc(&priv->num_aux_in_flight);
1179}
1180
1181void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
1182{
1183 if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
1184 test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
1185 IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
1186 iwl_scan_roc_expired(priv);
1187 }
1188}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5ee983faa679..da442b81370a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -87,7 +87,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
87 priv->lib->bt_params->advanced_bt_coexist && 87 priv->lib->bt_params->advanced_bt_coexist &&
88 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || 88 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
89 ieee80211_is_reassoc_req(fc) || 89 ieee80211_is_reassoc_req(fc) ||
90 skb->protocol == cpu_to_be16(ETH_P_PAE))) 90 info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
91 tx_flags |= TX_CMD_FLG_IGNORE_BT; 91 tx_flags |= TX_CMD_FLG_IGNORE_BT;
92 92
93 93
@@ -478,9 +478,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
478 if (sta_priv && sta_priv->client && !is_agg) 478 if (sta_priv && sta_priv->client && !is_agg)
479 atomic_inc(&sta_priv->pending_frames); 479 atomic_inc(&sta_priv->pending_frames);
480 480
481 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
482 iwl_scan_offchannel_skb(priv);
483
484 return 0; 481 return 0;
485 482
486drop_unlock_sta: 483drop_unlock_sta:
@@ -1158,7 +1155,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1158 struct sk_buff *skb; 1155 struct sk_buff *skb;
1159 struct iwl_rxon_context *ctx; 1156 struct iwl_rxon_context *ctx;
1160 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 1157 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1161 bool is_offchannel_skb;
1162 1158
1163 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> 1159 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1164 IWLAGN_TX_RES_TID_POS; 1160 IWLAGN_TX_RES_TID_POS;
@@ -1178,8 +1174,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1178 1174
1179 __skb_queue_head_init(&skbs); 1175 __skb_queue_head_init(&skbs);
1180 1176
1181 is_offchannel_skb = false;
1182
1183 if (tx_resp->frame_count == 1) { 1177 if (tx_resp->frame_count == 1) {
1184 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); 1178 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1185 next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10); 1179 next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1256,8 +1250,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1256 if (!is_agg) 1250 if (!is_agg)
1257 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1251 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1258 1252
1259 is_offchannel_skb =
1260 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1261 freed++; 1253 freed++;
1262 } 1254 }
1263 1255
@@ -1271,14 +1263,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1271 if (!is_agg && freed != 1) 1263 if (!is_agg && freed != 1)
1272 IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); 1264 IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
1273 1265
1274 /*
1275 * An offchannel frame can be send only on the AUX queue, where
1276 * there is no aggregation (and reordering) so it only is single
1277 * skb is expected to be processed.
1278 */
1279 if (is_offchannel_skb && freed != 1)
1280 IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
1281
1282 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id, 1266 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
1283 iwl_get_tx_fail_reason(status), status); 1267 iwl_get_tx_fail_reason(status), status);
1284 1268
@@ -1298,9 +1282,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1298 ieee80211_tx_status_ni(priv->hw, skb); 1282 ieee80211_tx_status_ni(priv->hw, skb);
1299 } 1283 }
1300 1284
1301 if (is_offchannel_skb)
1302 iwl_scan_offchannel_skb_status(priv);
1303
1304 return 0; 1285 return 0;
1305} 1286}
1306 1287
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 22b7fa5b971a..76e14c046d94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -99,6 +99,7 @@ static const struct iwl_base_params iwl7000_base_params = {
99 .wd_timeout = IWL_LONG_WD_TIMEOUT, 99 .wd_timeout = IWL_LONG_WD_TIMEOUT,
100 .max_event_log_size = 512, 100 .max_event_log_size = 512,
101 .shadow_reg_enable = true, 101 .shadow_reg_enable = true,
102 .pcie_l1_allowed = true,
102}; 103};
103 104
104static const struct iwl_ht_params iwl7000_ht_params = { 105static const struct iwl_ht_params iwl7000_ht_params = {
@@ -126,6 +127,16 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
126 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 127 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
127}; 128};
128 129
130const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
131 .name = "Intel(R) Dual Band Wireless AC 7260",
132 .fw_name_pre = IWL7260_FW_PRE,
133 IWL_DEVICE_7000,
134 .ht_params = &iwl7000_ht_params,
135 .nvm_ver = IWL7260_NVM_VERSION,
136 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
137 .high_temp = true,
138};
139
129const struct iwl_cfg iwl7260_2n_cfg = { 140const struct iwl_cfg iwl7260_2n_cfg = {
130 .name = "Intel(R) Dual Band Wireless N 7260", 141 .name = "Intel(R) Dual Band Wireless N 7260",
131 .fw_name_pre = IWL7260_FW_PRE, 142 .fw_name_pre = IWL7260_FW_PRE,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 83b9ff6ff3ad..e4d370bff306 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -152,6 +152,7 @@ struct iwl_base_params {
152 unsigned int wd_timeout; 152 unsigned int wd_timeout;
153 u32 max_event_log_size; 153 u32 max_event_log_size;
154 const bool shadow_reg_enable; 154 const bool shadow_reg_enable;
155 const bool pcie_l1_allowed;
155}; 156};
156 157
157/* 158/*
@@ -205,6 +206,7 @@ struct iwl_eeprom_params {
205 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) 206 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
206 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 207 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
207 * @internal_wimax_coex: internal wifi/wimax combo device 208 * @internal_wimax_coex: internal wifi/wimax combo device
209 * @high_temp: Is this NIC is designated to be in high temperature.
208 * 210 *
209 * We enable the driver to be backward compatible wrt. hardware features. 211 * We enable the driver to be backward compatible wrt. hardware features.
210 * API differences in uCode shouldn't be handled here but through TLVs 212 * API differences in uCode shouldn't be handled here but through TLVs
@@ -233,6 +235,7 @@ struct iwl_cfg {
233 enum iwl_led_mode led_mode; 235 enum iwl_led_mode led_mode;
234 const bool rx_with_siso_diversity; 236 const bool rx_with_siso_diversity;
235 const bool internal_wimax_coex; 237 const bool internal_wimax_coex;
238 bool high_temp;
236}; 239};
237 240
238/* 241/*
@@ -283,6 +286,7 @@ extern const struct iwl_cfg iwl135_bgn_cfg;
283#endif /* CONFIG_IWLDVM */ 286#endif /* CONFIG_IWLDVM */
284#if IS_ENABLED(CONFIG_IWLMVM) 287#if IS_ENABLED(CONFIG_IWLMVM)
285extern const struct iwl_cfg iwl7260_2ac_cfg; 288extern const struct iwl_cfg iwl7260_2ac_cfg;
289extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
286extern const struct iwl_cfg iwl7260_2n_cfg; 290extern const struct iwl_cfg iwl7260_2n_cfg;
287extern const struct iwl_cfg iwl7260_n_cfg; 291extern const struct iwl_cfg iwl7260_n_cfg;
288extern const struct iwl_cfg iwl3160_2ac_cfg; 292extern const struct iwl_cfg iwl3160_2ac_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 7edb8519c8a4..b2bb32a781dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -145,6 +145,7 @@ do { \
145#define IWL_DL_RX 0x01000000 145#define IWL_DL_RX 0x01000000
146#define IWL_DL_ISR 0x02000000 146#define IWL_DL_ISR 0x02000000
147#define IWL_DL_HT 0x04000000 147#define IWL_DL_HT 0x04000000
148#define IWL_DL_EXTERNAL 0x08000000
148/* 0xF0000000 - 0x10000000 */ 149/* 0xF0000000 - 0x10000000 */
149#define IWL_DL_11H 0x10000000 150#define IWL_DL_11H 0x10000000
150#define IWL_DL_STATS 0x20000000 151#define IWL_DL_STATS 0x20000000
@@ -153,6 +154,7 @@ do { \
153 154
154#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) 155#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
155#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) 156#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
157#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
156#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) 158#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
157#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) 159#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
158#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) 160#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 4491c1c72cc7..684c416d3493 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -33,10 +33,11 @@
33static inline bool iwl_trace_data(struct sk_buff *skb) 33static inline bool iwl_trace_data(struct sk_buff *skb)
34{ 34{
35 struct ieee80211_hdr *hdr = (void *)skb->data; 35 struct ieee80211_hdr *hdr = (void *)skb->data;
36 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
36 37
37 if (ieee80211_is_data(hdr->frame_control)) 38 if (!ieee80211_is_data(hdr->frame_control))
38 return skb->protocol != cpu_to_be16(ETH_P_PAE); 39 return false;
39 return false; 40 return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
40} 41}
41 42
42static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, 43static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d0162d426f88..99e1da3123c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
843 int i; 843 int i;
844 bool load_module = false; 844 bool load_module = false;
845 845
846 fw->ucode_capa.max_probe_length = 200; 846 fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
847 fw->ucode_capa.standard_phy_calibration_size = 847 fw->ucode_capa.standard_phy_calibration_size =
848 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; 848 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
849 849
@@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
1032 int ret; 1032 int ret;
1033 1033
1034 drv = kzalloc(sizeof(*drv), GFP_KERNEL); 1034 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
1035 if (!drv) 1035 if (!drv) {
1036 return NULL; 1036 ret = -ENOMEM;
1037 goto err;
1038 }
1037 1039
1038 drv->trans = trans; 1040 drv->trans = trans;
1039 drv->dev = trans->dev; 1041 drv->dev = trans->dev;
@@ -1078,7 +1080,7 @@ err_free_dbgfs:
1078err_free_drv: 1080err_free_drv:
1079#endif 1081#endif
1080 kfree(drv); 1082 kfree(drv);
1081 1083err:
1082 return ERR_PTR(ret); 1084 return ERR_PTR(ret);
1083} 1085}
1084 1086
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index f844d5c748c0..a1223680bc70 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,13 +74,24 @@
74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). 74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. 75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS 76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
77 * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
78 * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
79 * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
80 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
81 * (rather than two) IPv6 addresses
82 * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
77 */ 83 */
78enum iwl_ucode_tlv_flag { 84enum iwl_ucode_tlv_flag {
79 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 85 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
80 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), 86 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
81 IWL_UCODE_TLV_FLAGS_MFP = BIT(2), 87 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
82 IWL_UCODE_TLV_FLAGS_P2P = BIT(3), 88 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
83 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), 89 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
90 IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
91 IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
92 IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
93 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
94 IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
84}; 95};
85 96
86/* The default calibrate table size if not specified by firmware file */ 97/* The default calibrate table size if not specified by firmware file */
@@ -88,6 +99,9 @@ enum iwl_ucode_tlv_flag {
88#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 99#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
89#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 100#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
90 101
102/* The default max probe length if not specified by the firmware file */
103#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
104
91/** 105/**
92 * enum iwl_ucode_type 106 * enum iwl_ucode_type
93 * 107 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 305c81f2c2b4..dfa4d2e3aaa2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,8 @@
33#include "iwl-io.h" 33#include "iwl-io.h"
34#include "iwl-csr.h" 34#include "iwl-csr.h"
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-fh.h"
37#include "iwl-csr.h"
36 38
37#define IWL_POLL_INTERVAL 10 /* microseconds */ 39#define IWL_POLL_INTERVAL 10 /* microseconds */
38 40
@@ -166,3 +168,68 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
166 } 168 }
167} 169}
168IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); 170IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
171
172static const char *get_fh_string(int cmd)
173{
174#define IWL_CMD(x) case x: return #x
175 switch (cmd) {
176 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
177 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
178 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
179 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
180 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
181 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
182 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
183 IWL_CMD(FH_TSSR_TX_STATUS_REG);
184 IWL_CMD(FH_TSSR_TX_ERROR_REG);
185 default:
186 return "UNKNOWN";
187 }
188#undef IWL_CMD
189}
190
191int iwl_dump_fh(struct iwl_trans *trans, char **buf)
192{
193 int i;
194 static const u32 fh_tbl[] = {
195 FH_RSCSR_CHNL0_STTS_WPTR_REG,
196 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
197 FH_RSCSR_CHNL0_WPTR,
198 FH_MEM_RCSR_CHNL0_CONFIG_REG,
199 FH_MEM_RSSR_SHARED_CTRL_REG,
200 FH_MEM_RSSR_RX_STATUS_REG,
201 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
202 FH_TSSR_TX_STATUS_REG,
203 FH_TSSR_TX_ERROR_REG
204 };
205
206#ifdef CONFIG_IWLWIFI_DEBUGFS
207 if (buf) {
208 int pos = 0;
209 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
210
211 *buf = kmalloc(bufsz, GFP_KERNEL);
212 if (!*buf)
213 return -ENOMEM;
214
215 pos += scnprintf(*buf + pos, bufsz - pos,
216 "FH register values:\n");
217
218 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
219 pos += scnprintf(*buf + pos, bufsz - pos,
220 " %34s: 0X%08x\n",
221 get_fh_string(fh_tbl[i]),
222 iwl_read_direct32(trans, fh_tbl[i]));
223
224 return pos;
225 }
226#endif
227
228 IWL_ERR(trans, "FH register values:\n");
229 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
230 IWL_ERR(trans, " %34s: 0X%08x\n",
231 get_fh_string(fh_tbl[i]),
232 iwl_read_direct32(trans, fh_tbl[i]));
233
234 return 0;
235}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index fd9f5b97fff3..63d10ec08dbc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -77,4 +77,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
77 u32 bits, u32 mask); 77 u32 bits, u32 mask);
78void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); 78void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
79 79
80/* Error handling */
81int iwl_dump_fh(struct iwl_trans *trans, char **buf);
82
80#endif 83#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index acd2665afb8c..b76a9a8fc0b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -118,6 +118,7 @@ static const u8 iwl_nvm_channels[] = {
118#define LAST_2GHZ_HT_PLUS 9 118#define LAST_2GHZ_HT_PLUS 9
119#define LAST_5GHZ_HT 161 119#define LAST_5GHZ_HT 161
120 120
121#define DEFAULT_MAX_TX_POWER 16
121 122
122/* rate data (static) */ 123/* rate data (static) */
123static struct ieee80211_rate iwl_cfg80211_rates[] = { 124static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -232,8 +233,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
232 233
233 /* Initialize regulatory-based run-time data */ 234 /* Initialize regulatory-based run-time data */
234 235
235 /* TODO: read the real value from the NVM */ 236 /*
236 channel->max_power = 0; 237 * Default value - highest tx power value. max_power
238 * is not used in mvm, and is used for backwards compatibility
239 */
240 channel->max_power = DEFAULT_MAX_TX_POWER;
237 is_5ghz = channel->band == IEEE80211_BAND_5GHZ; 241 is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
238 IWL_DEBUG_EEPROM(dev, 242 IWL_DEBUG_EEPROM(dev,
239 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", 243 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 98c7aa7346da..976448a57d02 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -93,7 +93,7 @@ struct iwl_cfg;
93 * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the 93 * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
94 * capabilities advertized by the fw file (in TLV format). 94 * capabilities advertized by the fw file (in TLV format).
95 * 2) The driver layer starts the op_mode (ops->start) 95 * 2) The driver layer starts the op_mode (ops->start)
96 * 3) The op_mode registers registers mac80211 96 * 3) The op_mode registers mac80211
97 * 4) The op_mode is governed by mac80211 97 * 4) The op_mode is governed by mac80211
98 * 5) The driver layer stops the op_mode 98 * 5) The driver layer stops the op_mode
99 */ 99 */
@@ -112,7 +112,7 @@ struct iwl_cfg;
112 * @stop: stop the op_mode. Must free all the memory allocated. 112 * @stop: stop the op_mode. Must free all the memory allocated.
113 * May sleep 113 * May sleep
114 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 114 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
115 * HCMD the this Rx responds to. 115 * HCMD this Rx responds to.
116 * This callback may sleep, it is called from a threaded IRQ handler. 116 * This callback may sleep, it is called from a threaded IRQ handler.
117 * @queue_full: notifies that a HW queue is full. 117 * @queue_full: notifies that a HW queue is full.
118 * Must be atomic and called with BH disabled. 118 * Must be atomic and called with BH disabled.
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8d91422c5982..dd57a36ecb10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -180,7 +180,7 @@ struct iwl_rx_packet {
180 * enum CMD_MODE - how to send the host commands ? 180 * enum CMD_MODE - how to send the host commands ?
181 * 181 *
182 * @CMD_SYNC: The caller will be stalled until the fw responds to the command 182 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
183 * @CMD_ASYNC: Return right away and don't want for the response 183 * @CMD_ASYNC: Return right away and don't wait for the response
184 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 184 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
185 * response. The caller needs to call iwl_free_resp when done. 185 * response. The caller needs to call iwl_free_resp when done.
186 */ 186 */
@@ -218,7 +218,7 @@ struct iwl_device_cmd {
218 * 218 *
219 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 219 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
220 * ring. The transport layer doesn't map the command's buffer to DMA, but 220 * ring. The transport layer doesn't map the command's buffer to DMA, but
221 * rather copies it to an previously allocated DMA buffer. This flag tells 221 * rather copies it to a previously allocated DMA buffer. This flag tells
222 * the transport layer not to copy the command, but to map the existing 222 * the transport layer not to copy the command, but to map the existing
223 * buffer (that is passed in) instead. This saves the memcpy and allows 223 * buffer (that is passed in) instead. This saves the memcpy and allows
224 * commands that are bigger than the fixed buffer to be submitted. 224 * commands that are bigger than the fixed buffer to be submitted.
@@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag {
243 * @handler_status: return value of the handler of the command 243 * @handler_status: return value of the handler of the command
244 * (put in setup_rx_handlers) - valid for SYNC mode only 244 * (put in setup_rx_handlers) - valid for SYNC mode only
245 * @flags: can be CMD_* 245 * @flags: can be CMD_*
246 * @len: array of the lenths of the chunks in data 246 * @len: array of the lengths of the chunks in data
247 * @dataflags: IWL_HCMD_DFL_* 247 * @dataflags: IWL_HCMD_DFL_*
248 * @id: id of the host command 248 * @id: id of the host command
249 */ 249 */
@@ -396,8 +396,6 @@ struct iwl_trans;
396 * May sleep 396 * May sleep
397 * @dbgfs_register: add the dbgfs files under this directory. Files will be 397 * @dbgfs_register: add the dbgfs files under this directory. Files will be
398 * automatically deleted. 398 * automatically deleted.
399 * @suspend: stop the device unless WoWLAN is configured
400 * @resume: resume activity of the device
401 * @write8: write a u8 to a register at offset ofs from the BAR 399 * @write8: write a u8 to a register at offset ofs from the BAR
402 * @write32: write a u32 to a register at offset ofs from the BAR 400 * @write32: write a u32 to a register at offset ofs from the BAR
403 * @read32: read a u32 register at offset ofs from the BAR 401 * @read32: read a u32 register at offset ofs from the BAR
@@ -443,10 +441,7 @@ struct iwl_trans_ops {
443 441
444 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 442 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
445 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 443 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
446#ifdef CONFIG_PM_SLEEP 444
447 int (*suspend)(struct iwl_trans *trans);
448 int (*resume)(struct iwl_trans *trans);
449#endif
450 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 445 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
451 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 446 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
452 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 447 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
@@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
700 return trans->ops->dbgfs_register(trans, dir); 695 return trans->ops->dbgfs_register(trans, dir);
701} 696}
702 697
703#ifdef CONFIG_PM_SLEEP
704static inline int iwl_trans_suspend(struct iwl_trans *trans)
705{
706 return trans->ops->suspend(trans);
707}
708
709static inline int iwl_trans_resume(struct iwl_trans *trans)
710{
711 return trans->ops->resume(trans);
712}
713#endif
714
715static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 698static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
716{ 699{
717 trans->ops->write8(trans, ofs, val); 700 trans->ops->write8(trans, ofs, val);
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ff856e543ae8..6d73817850ce 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o 2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o bt-coex.o 5iwlmvm-y += power.o power_legacy.o bt-coex.o
6iwlmvm-y += led.o tt.o 6iwlmvm-y += led.o tt.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index dbd622a3929c..0fad98b85f60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -220,66 +220,87 @@ static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
220 220
221int iwl_send_bt_init_conf(struct iwl_mvm *mvm) 221int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
222{ 222{
223 struct iwl_bt_coex_cmd cmd = { 223 struct iwl_bt_coex_cmd *bt_cmd;
224 .max_kill = 5, 224 struct iwl_host_cmd cmd = {
225 .bt3_time_t7_value = 1, 225 .id = BT_CONFIG,
226 .bt3_prio_sample_time = 2, 226 .len = { sizeof(*bt_cmd), },
227 .bt3_timer_t2_value = 0xc, 227 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
228 .flags = CMD_SYNC,
228 }; 229 };
229 int ret; 230 int ret;
230 231
231 cmd.flags = iwlwifi_mod_params.bt_coex_active ? 232 /* go to CALIB state in internal BT-Coex state machine */
233 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
234 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
235 if (ret)
236 return ret;
237
238 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
239 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
240 if (ret)
241 return ret;
242
243 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
244 if (!bt_cmd)
245 return -ENOMEM;
246 cmd.data[0] = bt_cmd;
247
248 bt_cmd->max_kill = 5;
249 bt_cmd->bt3_time_t7_value = 1;
250 bt_cmd->bt3_prio_sample_time = 2;
251 bt_cmd->bt3_timer_t2_value = 0xc;
252
253 bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
232 BT_COEX_NW : BT_COEX_DISABLE; 254 BT_COEX_NW : BT_COEX_DISABLE;
233 cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE; 255 bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
234 256
235 cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE | 257 bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
236 BT_VALID_BT_PRIO_BOOST | 258 BT_VALID_BT_PRIO_BOOST |
237 BT_VALID_MAX_KILL | 259 BT_VALID_MAX_KILL |
238 BT_VALID_3W_TMRS | 260 BT_VALID_3W_TMRS |
239 BT_VALID_KILL_ACK | 261 BT_VALID_KILL_ACK |
240 BT_VALID_KILL_CTS | 262 BT_VALID_KILL_CTS |
241 BT_VALID_REDUCED_TX_POWER | 263 BT_VALID_REDUCED_TX_POWER |
242 BT_VALID_LUT); 264 BT_VALID_LUT);
243 265
244 if (mvm->cfg->bt_shared_single_ant) 266 if (mvm->cfg->bt_shared_single_ant)
245 memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup, 267 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
246 sizeof(iwl_single_shared_ant_lookup)); 268 sizeof(iwl_single_shared_ant_lookup));
247 else if (is_loose_coex()) 269 else if (is_loose_coex())
248 memcpy(&cmd.decision_lut, iwl_loose_lookup, 270 memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
249 sizeof(iwl_tight_lookup)); 271 sizeof(iwl_tight_lookup));
250 else 272 else
251 memcpy(&cmd.decision_lut, iwl_tight_lookup, 273 memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
252 sizeof(iwl_tight_lookup)); 274 sizeof(iwl_tight_lookup));
253 275
254 cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST); 276 bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
255 cmd.kill_ack_msk = 277 bt_cmd->kill_ack_msk =
256 cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]); 278 cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
257 cmd.kill_cts_msk = 279 bt_cmd->kill_cts_msk =
258 cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]); 280 cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
259 281
260 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 282 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
261 283
262 /* go to CALIB state in internal BT-Coex state machine */ 284 ret = iwl_mvm_send_cmd(mvm, &cmd);
263 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
264 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
265 if (ret)
266 return ret;
267
268 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
269 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
270 if (ret)
271 return ret;
272 285
273 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, 286 kfree(bt_cmd);
274 sizeof(cmd), &cmd); 287 return ret;
275} 288}
276 289
277static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm, 290static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
278 bool reduced_tx_power) 291 bool reduced_tx_power)
279{ 292{
280 enum iwl_bt_kill_msk bt_kill_msk; 293 enum iwl_bt_kill_msk bt_kill_msk;
281 struct iwl_bt_coex_cmd cmd = {}; 294 struct iwl_bt_coex_cmd *bt_cmd;
282 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; 295 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
296 struct iwl_host_cmd cmd = {
297 .id = BT_CONFIG,
298 .data[0] = &bt_cmd,
299 .len = { sizeof(*bt_cmd), },
300 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
301 .flags = CMD_SYNC,
302 };
303 int ret = 0;
283 304
284 lockdep_assert_held(&mvm->mutex); 305 lockdep_assert_held(&mvm->mutex);
285 306
@@ -308,24 +329,40 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
308 return 0; 329 return 0;
309 330
310 mvm->bt_kill_msk = bt_kill_msk; 331 mvm->bt_kill_msk = bt_kill_msk;
311 cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]); 332
312 cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]); 333 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
313 cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS); 334 if (!bt_cmd)
335 return -ENOMEM;
336 cmd.data[0] = bt_cmd;
337
338 bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
339 bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
340 bt_cmd->valid_bit_msk =
341 cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
314 342
315 IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk); 343 IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
316 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, 344
317 sizeof(cmd), &cmd); 345 ret = iwl_mvm_send_cmd(mvm, &cmd);
346
347 kfree(bt_cmd);
348 return ret;
318} 349}
319 350
320static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, 351static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
321 bool enable) 352 bool enable)
322{ 353{
323 struct iwl_bt_coex_cmd cmd = { 354 struct iwl_bt_coex_cmd *bt_cmd;
324 .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER), 355 /* Send ASYNC since this can be sent from an atomic context */
325 .bt_reduced_tx_power = sta_id, 356 struct iwl_host_cmd cmd = {
357 .id = BT_CONFIG,
358 .len = { sizeof(*bt_cmd), },
359 .dataflags = { IWL_HCMD_DFL_DUP, },
360 .flags = CMD_ASYNC,
326 }; 361 };
362
327 struct ieee80211_sta *sta; 363 struct ieee80211_sta *sta;
328 struct iwl_mvm_sta *mvmsta; 364 struct iwl_mvm_sta *mvmsta;
365 int ret;
329 366
330 /* This can happen if the station has been removed right now */ 367 /* This can happen if the station has been removed right now */
331 if (sta_id == IWL_MVM_STATION_COUNT) 368 if (sta_id == IWL_MVM_STATION_COUNT)
@@ -339,17 +376,26 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
339 if (mvmsta->bt_reduced_txpower == enable) 376 if (mvmsta->bt_reduced_txpower == enable)
340 return 0; 377 return 0;
341 378
379 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
380 if (!bt_cmd)
381 return -ENOMEM;
382 cmd.data[0] = bt_cmd;
383
384 bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
385 bt_cmd->bt_reduced_tx_power = sta_id;
386
342 if (enable) 387 if (enable)
343 cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; 388 bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
344 389
345 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", 390 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
346 enable ? "en" : "dis", sta_id); 391 enable ? "en" : "dis", sta_id);
347 392
348 mvmsta->bt_reduced_txpower = enable; 393 mvmsta->bt_reduced_txpower = enable;
349 394
350 /* Send ASYNC since this can be sent from an atomic context */ 395 ret = iwl_mvm_send_cmd(mvm, &cmd);
351 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC, 396
352 sizeof(cmd), &cmd); 397 kfree(bt_cmd);
398 return ret;
353} 399}
354 400
355struct iwl_bt_iterator_data { 401struct iwl_bt_iterator_data {
@@ -384,6 +430,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
384 430
385 smps_mode = IEEE80211_SMPS_AUTOMATIC; 431 smps_mode = IEEE80211_SMPS_AUTOMATIC;
386 432
433 /* non associated BSSes aren't to be considered */
434 if (!vif->bss_conf.assoc)
435 return;
436
387 if (band != IEEE80211_BAND_2GHZ) { 437 if (band != IEEE80211_BAND_2GHZ) {
388 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, 438 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
389 smps_mode); 439 smps_mode);
@@ -523,6 +573,8 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
523 lockdep_is_held(&mvm->mutex)); 573 lockdep_is_held(&mvm->mutex));
524 mvmsta = (void *)sta->drv_priv; 574 mvmsta = (void *)sta->drv_priv;
525 575
576 data->num_bss_ifaces++;
577
526 /* 578 /*
527 * This interface doesn't support reduced Tx power (because of low 579 * This interface doesn't support reduced Tx power (because of low
528 * RSSI probably), then set bt_kill_msk to default values. 580 * RSSI probably), then set bt_kill_msk to default values.
@@ -588,23 +640,5 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
588 640
589void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 641void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
590{ 642{
591 struct ieee80211_chanctx_conf *chanctx_conf;
592 enum ieee80211_band band;
593
594 rcu_read_lock();
595 chanctx_conf = rcu_dereference(vif->chanctx_conf);
596 if (chanctx_conf && chanctx_conf->def.chan)
597 band = chanctx_conf->def.chan->band;
598 else
599 band = -1;
600 rcu_read_unlock();
601
602 /* if we are in 2GHz we will get a notification from the fw */
603 if (band == IEEE80211_BAND_2GHZ)
604 return;
605
606 /* else, we can remove all the constraints */
607 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
608
609 iwl_mvm_bt_coex_notif_handle(mvm); 643 iwl_mvm_bt_coex_notif_handle(mvm);
610} 644}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
new file mode 100644
index 000000000000..2bf29f7992ee
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -0,0 +1,80 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __MVM_CONSTANTS_H
64#define __MVM_CONSTANTS_H
65
66#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
67#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
68#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
69#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
70#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
71#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
72#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
73#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
74#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
75#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
76#define IWL_MVM_PS_SNOOZE_INTERVAL 25
77#define IWL_MVM_PS_SNOOZE_WINDOW 50
78#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
79
80#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 83da884cf303..417639f77b01 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -105,7 +105,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
105 list_for_each_entry(ifa, &idev->addr_list, if_list) { 105 list_for_each_entry(ifa, &idev->addr_list, if_list) {
106 mvmvif->target_ipv6_addrs[idx] = ifa->addr; 106 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
107 idx++; 107 idx++;
108 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS) 108 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
109 break; 109 break;
110 } 110 }
111 read_unlock_bh(&idev->lock); 111 read_unlock_bh(&idev->lock);
@@ -378,36 +378,68 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
378static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, 378static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
379 struct ieee80211_vif *vif) 379 struct ieee80211_vif *vif)
380{ 380{
381 struct iwl_proto_offload_cmd cmd = {}; 381 union {
382 struct iwl_proto_offload_cmd_v1 v1;
383 struct iwl_proto_offload_cmd_v2 v2;
384 } cmd = {};
385 struct iwl_proto_offload_cmd_common *common;
386 u32 enabled = 0, size;
382#if IS_ENABLED(CONFIG_IPV6) 387#if IS_ENABLED(CONFIG_IPV6)
383 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 388 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
384 int i; 389 int i;
385 390
386 if (mvmvif->num_target_ipv6_addrs) { 391 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
387 cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS); 392 if (mvmvif->num_target_ipv6_addrs) {
388 memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN); 393 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
389 } 394 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
395 }
396
397 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
398 sizeof(mvmvif->target_ipv6_addrs[0]));
390 399
391 BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) != 400 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
392 sizeof(mvmvif->target_ipv6_addrs[i])); 401 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
402 memcpy(cmd.v2.target_ipv6_addr[i],
403 &mvmvif->target_ipv6_addrs[i],
404 sizeof(cmd.v2.target_ipv6_addr[i]));
405 } else {
406 if (mvmvif->num_target_ipv6_addrs) {
407 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
408 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
409 }
393 410
394 for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++) 411 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
395 memcpy(cmd.target_ipv6_addr[i], 412 sizeof(mvmvif->target_ipv6_addrs[0]));
396 &mvmvif->target_ipv6_addrs[i], 413
397 sizeof(cmd.target_ipv6_addr[i])); 414 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
415 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
416 memcpy(cmd.v1.target_ipv6_addr[i],
417 &mvmvif->target_ipv6_addrs[i],
418 sizeof(cmd.v1.target_ipv6_addr[i]));
419 }
398#endif 420#endif
399 421
422 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
423 common = &cmd.v2.common;
424 size = sizeof(cmd.v2);
425 } else {
426 common = &cmd.v1.common;
427 size = sizeof(cmd.v1);
428 }
429
400 if (vif->bss_conf.arp_addr_cnt) { 430 if (vif->bss_conf.arp_addr_cnt) {
401 cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP); 431 enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
402 cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; 432 common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
403 memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN); 433 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
404 } 434 }
405 435
406 if (!cmd.enabled) 436 if (!enabled)
407 return 0; 437 return 0;
408 438
439 common->enabled = cpu_to_le32(enabled);
440
409 return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC, 441 return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
410 sizeof(cmd), &cmd); 442 size, &cmd);
411} 443}
412 444
413enum iwl_mvm_tcp_packet_type { 445enum iwl_mvm_tcp_packet_type {
@@ -1077,73 +1109,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1077 return __iwl_mvm_suspend(hw, wowlan, false); 1109 return __iwl_mvm_suspend(hw, wowlan, false);
1078} 1110}
1079 1111
1080static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1112static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1081 struct ieee80211_vif *vif) 1113 struct ieee80211_vif *vif,
1114 struct iwl_wowlan_status *status)
1082{ 1115{
1083 u32 base = mvm->error_event_table; 1116 struct sk_buff *pkt = NULL;
1084 struct error_table_start {
1085 /* cf. struct iwl_error_event_table */
1086 u32 valid;
1087 u32 error_id;
1088 } err_info;
1089 struct cfg80211_wowlan_wakeup wakeup = { 1117 struct cfg80211_wowlan_wakeup wakeup = {
1090 .pattern_idx = -1, 1118 .pattern_idx = -1,
1091 }; 1119 };
1092 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1120 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1093 struct iwl_host_cmd cmd = { 1121 u32 reasons = le32_to_cpu(status->wakeup_reasons);
1094 .id = WOWLAN_GET_STATUSES,
1095 .flags = CMD_SYNC | CMD_WANT_SKB,
1096 };
1097 struct iwl_wowlan_status *status;
1098 u32 reasons;
1099 int ret, len;
1100 struct sk_buff *pkt = NULL;
1101
1102 iwl_trans_read_mem_bytes(mvm->trans, base,
1103 &err_info, sizeof(err_info));
1104
1105 if (err_info.valid) {
1106 IWL_INFO(mvm, "error table is valid (%d)\n",
1107 err_info.valid);
1108 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1109 wakeup.rfkill_release = true;
1110 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1111 GFP_KERNEL);
1112 }
1113 return;
1114 }
1115
1116 /* only for tracing for now */
1117 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
1118 if (ret)
1119 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1120
1121 ret = iwl_mvm_send_cmd(mvm, &cmd);
1122 if (ret) {
1123 IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1124 return;
1125 }
1126
1127 /* RF-kill already asserted again... */
1128 if (!cmd.resp_pkt)
1129 return;
1130
1131 len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1132 if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
1133 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1134 goto out;
1135 }
1136
1137 status = (void *)cmd.resp_pkt->data;
1138
1139 if (len - sizeof(struct iwl_cmd_header) !=
1140 sizeof(*status) +
1141 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
1142 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1143 goto out;
1144 }
1145
1146 reasons = le32_to_cpu(status->wakeup_reasons);
1147 1122
1148 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1123 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1149 wakeup_report = NULL; 1124 wakeup_report = NULL;
@@ -1206,6 +1181,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1206 pktsize -= hdrlen; 1181 pktsize -= hdrlen;
1207 1182
1208 if (ieee80211_has_protected(hdr->frame_control)) { 1183 if (ieee80211_has_protected(hdr->frame_control)) {
1184 /*
1185 * This is unlocked and using gtk_i(c)vlen,
1186 * but since everything is under RTNL still
1187 * that's not really a problem - changing
1188 * it would be difficult.
1189 */
1209 if (is_multicast_ether_addr(hdr->addr1)) { 1190 if (is_multicast_ether_addr(hdr->addr1)) {
1210 ivlen = mvm->gtk_ivlen; 1191 ivlen = mvm->gtk_ivlen;
1211 icvlen += mvm->gtk_icvlen; 1192 icvlen += mvm->gtk_icvlen;
@@ -1256,9 +1237,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1256 report: 1237 report:
1257 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1238 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1258 kfree_skb(pkt); 1239 kfree_skb(pkt);
1240}
1259 1241
1260 out: 1242/* releases the MVM mutex */
1243static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1244 struct ieee80211_vif *vif)
1245{
1246 u32 base = mvm->error_event_table;
1247 struct error_table_start {
1248 /* cf. struct iwl_error_event_table */
1249 u32 valid;
1250 u32 error_id;
1251 } err_info;
1252 struct iwl_host_cmd cmd = {
1253 .id = WOWLAN_GET_STATUSES,
1254 .flags = CMD_SYNC | CMD_WANT_SKB,
1255 };
1256 struct iwl_wowlan_status *status;
1257 int ret, len;
1258
1259 iwl_trans_read_mem_bytes(mvm->trans, base,
1260 &err_info, sizeof(err_info));
1261
1262 if (err_info.valid) {
1263 IWL_INFO(mvm, "error table is valid (%d)\n",
1264 err_info.valid);
1265 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1266 struct cfg80211_wowlan_wakeup wakeup = {
1267 .rfkill_release = true,
1268 };
1269 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1270 GFP_KERNEL);
1271 }
1272 goto out_unlock;
1273 }
1274
1275 /* only for tracing for now */
1276 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
1277 if (ret)
1278 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1279
1280 ret = iwl_mvm_send_cmd(mvm, &cmd);
1281 if (ret) {
1282 IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1283 goto out_unlock;
1284 }
1285
1286 /* RF-kill already asserted again... */
1287 if (!cmd.resp_pkt)
1288 goto out_unlock;
1289
1290 len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1291 if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
1292 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1293 goto out_free_resp;
1294 }
1295
1296 status = (void *)cmd.resp_pkt->data;
1297
1298 if (len - sizeof(struct iwl_cmd_header) !=
1299 sizeof(*status) +
1300 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
1301 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1302 goto out_free_resp;
1303 }
1304
1305 /* now we have all the data we need, unlock to avoid mac80211 issues */
1306 mutex_unlock(&mvm->mutex);
1307
1308 iwl_mvm_report_wakeup_reasons(mvm, vif, status);
1309 iwl_free_resp(&cmd);
1310 return;
1311
1312 out_free_resp:
1261 iwl_free_resp(&cmd); 1313 iwl_free_resp(&cmd);
1314 out_unlock:
1315 mutex_unlock(&mvm->mutex);
1262} 1316}
1263 1317
1264static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) 1318static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1315,10 +1369,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1315 iwl_mvm_read_d3_sram(mvm); 1369 iwl_mvm_read_d3_sram(mvm);
1316 1370
1317 iwl_mvm_query_wakeup_reasons(mvm, vif); 1371 iwl_mvm_query_wakeup_reasons(mvm, vif);
1372 /* has unlocked the mutex, so skip that */
1373 goto out;
1318 1374
1319 out_unlock: 1375 out_unlock:
1320 mutex_unlock(&mvm->mutex); 1376 mutex_unlock(&mvm->mutex);
1321 1377
1378 out:
1322 if (!test && vif) 1379 if (!test && vif)
1323 ieee80211_resume_disconnect(vif); 1380 ieee80211_resume_disconnect(vif);
1324 1381
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index c24a744910ac..aac81b8984b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
352 IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); 352 IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
353 dbgfs_pm->lprx_rssi_threshold = val; 353 dbgfs_pm->lprx_rssi_threshold = val;
354 break; 354 break;
355 case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
356 IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
357 dbgfs_pm->snooze_ena = val;
358 break;
355 } 359 }
356} 360}
357 361
@@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
405 POWER_LPRX_RSSI_THRESHOLD_MIN) 409 POWER_LPRX_RSSI_THRESHOLD_MIN)
406 return -EINVAL; 410 return -EINVAL;
407 param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; 411 param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
412 } else if (!strncmp("snooze_enable=", buf, 14)) {
413 if (sscanf(buf + 14, "%d", &val) != 1)
414 return -EINVAL;
415 param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
408 } else { 416 } else {
409 return -EINVAL; 417 return -EINVAL;
410 } 418 }
@@ -424,40 +432,11 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
424 struct ieee80211_vif *vif = file->private_data; 432 struct ieee80211_vif *vif = file->private_data;
425 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 433 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
426 struct iwl_mvm *mvm = mvmvif->dbgfs_data; 434 struct iwl_mvm *mvm = mvmvif->dbgfs_data;
427 struct iwl_powertable_cmd cmd = {}; 435 char buf[512];
428 char buf[256];
429 int bufsz = sizeof(buf); 436 int bufsz = sizeof(buf);
430 int pos = 0; 437 int pos;
431 438
432 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 439 pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
433
434 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
435 (cmd.flags &
436 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
437 0 : 1);
438 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
439 le32_to_cpu(cmd.skip_dtim_periods));
440 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
441 iwlmvm_mod_params.power_scheme);
442 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
443 le16_to_cpu(cmd.flags));
444 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
445 cmd.keep_alive_seconds);
446
447 if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
448 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
449 (cmd.flags &
450 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
451 1 : 0);
452 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
453 le32_to_cpu(cmd.rx_data_timeout));
454 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
455 le32_to_cpu(cmd.tx_data_timeout));
456 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
457 pos += scnprintf(buf+pos, bufsz-pos,
458 "lprx_rssi_threshold = %d\n",
459 le32_to_cpu(cmd.lprx_rssi_threshold));
460 }
461 440
462 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 441 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
463} 442}
@@ -621,25 +600,160 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
621} 600}
622#undef BT_MBOX_PRINT 601#undef BT_MBOX_PRINT
623 602
603#define PRINT_STATS_LE32(_str, _val) \
604 pos += scnprintf(buf + pos, bufsz - pos, \
605 fmt_table, _str, \
606 le32_to_cpu(_val))
607
608static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
609 char __user *user_buf, size_t count,
610 loff_t *ppos)
611{
612 struct iwl_mvm *mvm = file->private_data;
613 static const char *fmt_table = "\t%-30s %10u\n";
614 static const char *fmt_header = "%-32s\n";
615 int pos = 0;
616 char *buf;
617 int ret;
618 int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
619 sizeof(struct mvm_statistics_rx_non_phy) * 10 +
620 sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
621 struct mvm_statistics_rx_phy *ofdm;
622 struct mvm_statistics_rx_phy *cck;
623 struct mvm_statistics_rx_non_phy *general;
624 struct mvm_statistics_rx_ht_phy *ht;
625
626 buf = kzalloc(bufsz, GFP_KERNEL);
627 if (!buf)
628 return -ENOMEM;
629
630 mutex_lock(&mvm->mutex);
631
632 ofdm = &mvm->rx_stats.ofdm;
633 cck = &mvm->rx_stats.cck;
634 general = &mvm->rx_stats.general;
635 ht = &mvm->rx_stats.ofdm_ht;
636
637 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
638 "Statistics_Rx - OFDM");
639 PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
640 PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
641 PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
642 PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
643 PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
644 PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
645 PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
646 PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
647 PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
648 PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
649 PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
650 PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
651 PRINT_STATS_LE32("rxe_frame_lmt_overrun",
652 ofdm->rxe_frame_limit_overrun);
653 PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
654 PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
655 PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
656 PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
657 PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
658 PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
659 PRINT_STATS_LE32("reserved", ofdm->reserved);
660
661 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
662 "Statistics_Rx - CCK");
663 PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
664 PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
665 PRINT_STATS_LE32("plcp_err", cck->plcp_err);
666 PRINT_STATS_LE32("crc32_err", cck->crc32_err);
667 PRINT_STATS_LE32("overrun_err", cck->overrun_err);
668 PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
669 PRINT_STATS_LE32("crc32_good", cck->crc32_good);
670 PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
671 PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
672 PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
673 PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
674 PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
675 PRINT_STATS_LE32("rxe_frame_lmt_overrun",
676 cck->rxe_frame_limit_overrun);
677 PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
678 PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
679 PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
680 PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
681 PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
682 PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
683 PRINT_STATS_LE32("reserved", cck->reserved);
684
685 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
686 "Statistics_Rx - GENERAL");
687 PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
688 PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
689 PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
690 PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
691 PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
692 PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
693 PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
694 PRINT_STATS_LE32("adc_rx_saturation_time",
695 general->adc_rx_saturation_time);
696 PRINT_STATS_LE32("ina_detection_search_time",
697 general->ina_detection_search_time);
698 PRINT_STATS_LE32("beacon_silence_rssi_a",
699 general->beacon_silence_rssi_a);
700 PRINT_STATS_LE32("beacon_silence_rssi_b",
701 general->beacon_silence_rssi_b);
702 PRINT_STATS_LE32("beacon_silence_rssi_c",
703 general->beacon_silence_rssi_c);
704 PRINT_STATS_LE32("interference_data_flag",
705 general->interference_data_flag);
706 PRINT_STATS_LE32("channel_load", general->channel_load);
707 PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
708 PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
709 PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
710 PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
711 PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
712 PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
713 PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
714 PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
715 PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
716
717 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
718 "Statistics_Rx - HT");
719 PRINT_STATS_LE32("plcp_err", ht->plcp_err);
720 PRINT_STATS_LE32("overrun_err", ht->overrun_err);
721 PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
722 PRINT_STATS_LE32("crc32_good", ht->crc32_good);
723 PRINT_STATS_LE32("crc32_err", ht->crc32_err);
724 PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
725 PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
726 PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
727 PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
728 PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
729
730 mutex_unlock(&mvm->mutex);
731
732 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
733 kfree(buf);
734
735 return ret;
736}
737#undef PRINT_STAT_LE32
738
624static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, 739static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
625 const char __user *user_buf, 740 const char __user *user_buf,
626 size_t count, loff_t *ppos) 741 size_t count, loff_t *ppos)
627{ 742{
628 struct iwl_mvm *mvm = file->private_data; 743 struct iwl_mvm *mvm = file->private_data;
629 bool restart_fw = iwlwifi_mod_params.restart_fw;
630 int ret; 744 int ret;
631 745
632 iwlwifi_mod_params.restart_fw = true;
633
634 mutex_lock(&mvm->mutex); 746 mutex_lock(&mvm->mutex);
635 747
748 /* allow one more restart that we're provoking here */
749 if (mvm->restart_fw >= 0)
750 mvm->restart_fw++;
751
636 /* take the return value to make compiler happy - it will fail anyway */ 752 /* take the return value to make compiler happy - it will fail anyway */
637 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL); 753 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
638 754
639 mutex_unlock(&mvm->mutex); 755 mutex_unlock(&mvm->mutex);
640 756
641 iwlwifi_mod_params.restart_fw = restart_fw;
642
643 return count; 757 return count;
644} 758}
645 759
@@ -661,8 +775,14 @@ static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
661 case MVM_DEBUGFS_BF_ROAMING_STATE: 775 case MVM_DEBUGFS_BF_ROAMING_STATE:
662 dbgfs_bf->bf_roaming_state = value; 776 dbgfs_bf->bf_roaming_state = value;
663 break; 777 break;
664 case MVM_DEBUGFS_BF_TEMPERATURE_DELTA: 778 case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
665 dbgfs_bf->bf_temperature_delta = value; 779 dbgfs_bf->bf_temp_threshold = value;
780 break;
781 case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
782 dbgfs_bf->bf_temp_fast_filter = value;
783 break;
784 case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
785 dbgfs_bf->bf_temp_slow_filter = value;
666 break; 786 break;
667 case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: 787 case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
668 dbgfs_bf->bf_enable_beacon_filter = value; 788 dbgfs_bf->bf_enable_beacon_filter = value;
@@ -721,13 +841,27 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
721 value > IWL_BF_ROAMING_STATE_MAX) 841 value > IWL_BF_ROAMING_STATE_MAX)
722 return -EINVAL; 842 return -EINVAL;
723 param = MVM_DEBUGFS_BF_ROAMING_STATE; 843 param = MVM_DEBUGFS_BF_ROAMING_STATE;
724 } else if (!strncmp("bf_temperature_delta=", buf, 21)) { 844 } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
725 if (sscanf(buf+21, "%d", &value) != 1) 845 if (sscanf(buf+18, "%d", &value) != 1)
846 return -EINVAL;
847 if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
848 value > IWL_BF_TEMP_THRESHOLD_MAX)
726 return -EINVAL; 849 return -EINVAL;
727 if (value < IWL_BF_TEMPERATURE_DELTA_MIN || 850 param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
728 value > IWL_BF_TEMPERATURE_DELTA_MAX) 851 } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
852 if (sscanf(buf+20, "%d", &value) != 1)
729 return -EINVAL; 853 return -EINVAL;
730 param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA; 854 if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
855 value > IWL_BF_TEMP_FAST_FILTER_MAX)
856 return -EINVAL;
857 param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
858 } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
859 if (sscanf(buf+20, "%d", &value) != 1)
860 return -EINVAL;
861 if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
862 value > IWL_BF_TEMP_SLOW_FILTER_MAX)
863 return -EINVAL;
864 param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
731 } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { 865 } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
732 if (sscanf(buf+24, "%d", &value) != 1) 866 if (sscanf(buf+24, "%d", &value) != 1)
733 return -EINVAL; 867 return -EINVAL;
@@ -769,10 +903,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
769 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) { 903 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
770 ret = iwl_mvm_disable_beacon_filter(mvm, vif); 904 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
771 } else { 905 } else {
772 if (mvmvif->bf_enabled) 906 ret = iwl_mvm_enable_beacon_filter(mvm, vif);
773 ret = iwl_mvm_enable_beacon_filter(mvm, vif);
774 else
775 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
776 } 907 }
777 mutex_unlock(&mvm->mutex); 908 mutex_unlock(&mvm->mutex);
778 909
@@ -789,41 +920,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
789 int pos = 0; 920 int pos = 0;
790 const size_t bufsz = sizeof(buf); 921 const size_t bufsz = sizeof(buf);
791 struct iwl_beacon_filter_cmd cmd = { 922 struct iwl_beacon_filter_cmd cmd = {
792 .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, 923 IWL_BF_CMD_CONFIG_DEFAULTS,
793 .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, 924 .bf_enable_beacon_filter =
794 .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, 925 cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
795 .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, 926 .ba_enable_beacon_abort =
796 .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT, 927 cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
797 .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
798 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
799 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
800 .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
801 }; 928 };
802 929
803 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); 930 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
804 if (mvmvif->bf_enabled) 931 if (mvmvif->bf_data.bf_enabled)
805 cmd.bf_enable_beacon_filter = 1; 932 cmd.bf_enable_beacon_filter = cpu_to_le32(1);
806 else 933 else
807 cmd.bf_enable_beacon_filter = 0; 934 cmd.bf_enable_beacon_filter = 0;
808 935
809 pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", 936 pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
810 cmd.bf_energy_delta); 937 le32_to_cpu(cmd.bf_energy_delta));
811 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", 938 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
812 cmd.bf_roaming_energy_delta); 939 le32_to_cpu(cmd.bf_roaming_energy_delta));
813 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", 940 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
814 cmd.bf_roaming_state); 941 le32_to_cpu(cmd.bf_roaming_state));
815 pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n", 942 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
816 cmd.bf_temperature_delta); 943 le32_to_cpu(cmd.bf_temp_threshold));
944 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
945 le32_to_cpu(cmd.bf_temp_fast_filter));
946 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
947 le32_to_cpu(cmd.bf_temp_slow_filter));
817 pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", 948 pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
818 cmd.bf_enable_beacon_filter); 949 le32_to_cpu(cmd.bf_enable_beacon_filter));
819 pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", 950 pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
820 cmd.bf_debug_flag); 951 le32_to_cpu(cmd.bf_debug_flag));
821 pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", 952 pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
822 cmd.bf_escape_timer); 953 le32_to_cpu(cmd.bf_escape_timer));
823 pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", 954 pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
824 cmd.ba_escape_timer); 955 le32_to_cpu(cmd.ba_escape_timer));
825 pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", 956 pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
826 cmd.ba_enable_beacon_abort); 957 le32_to_cpu(cmd.ba_enable_beacon_abort));
827 958
828 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 959 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
829} 960}
@@ -934,6 +1065,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations);
934MVM_DEBUGFS_READ_FILE_OPS(bt_notif); 1065MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
935MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); 1066MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
936MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); 1067MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
1068MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
937MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); 1069MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
938#ifdef CONFIG_PM_SLEEP 1070#ifdef CONFIG_PM_SLEEP
939MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram); 1071MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
@@ -957,6 +1089,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
957 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); 1089 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
958 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); 1090 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
959 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); 1091 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
1092 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
960 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); 1093 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
961#ifdef CONFIG_PM_SLEEP 1094#ifdef CONFIG_PM_SLEEP
962 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); 1095 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 6f8b2c16ae17..df72fcdf8170 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -98,34 +98,63 @@ enum iwl_proto_offloads {
98 IWL_D3_PROTO_OFFLOAD_NS = BIT(1), 98 IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
99}; 99};
100 100
101#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2 101#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
102#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
103#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
102 104
103/** 105/**
104 * struct iwl_proto_offload_cmd - ARP/NS offload configuration 106 * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
105 * @enabled: enable flags 107 * @enabled: enable flags
106 * @remote_ipv4_addr: remote address to answer to (or zero if all) 108 * @remote_ipv4_addr: remote address to answer to (or zero if all)
107 * @host_ipv4_addr: our IPv4 address to respond to queries for 109 * @host_ipv4_addr: our IPv4 address to respond to queries for
108 * @arp_mac_addr: our MAC address for ARP responses 110 * @arp_mac_addr: our MAC address for ARP responses
109 * @remote_ipv6_addr: remote address to answer to (or zero if all) 111 * @reserved: unused
110 * @solicited_node_ipv6_addr: broken -- solicited node address exists
111 * for each target address
112 * @target_ipv6_addr: our target addresses
113 * @ndp_mac_addr: neighbor soliciation response MAC address
114 */ 112 */
115struct iwl_proto_offload_cmd { 113struct iwl_proto_offload_cmd_common {
116 __le32 enabled; 114 __le32 enabled;
117 __be32 remote_ipv4_addr; 115 __be32 remote_ipv4_addr;
118 __be32 host_ipv4_addr; 116 __be32 host_ipv4_addr;
119 u8 arp_mac_addr[ETH_ALEN]; 117 u8 arp_mac_addr[ETH_ALEN];
120 __le16 reserved1; 118 __le16 reserved;
119} __packed;
121 120
121/**
122 * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
123 * @common: common/IPv4 configuration
124 * @remote_ipv6_addr: remote address to answer to (or zero if all)
125 * @solicited_node_ipv6_addr: broken -- solicited node address exists
126 * for each target address
127 * @target_ipv6_addr: our target addresses
128 * @ndp_mac_addr: neighbor soliciation response MAC address
129 */
130struct iwl_proto_offload_cmd_v1 {
131 struct iwl_proto_offload_cmd_common common;
122 u8 remote_ipv6_addr[16]; 132 u8 remote_ipv6_addr[16];
123 u8 solicited_node_ipv6_addr[16]; 133 u8 solicited_node_ipv6_addr[16];
124 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16]; 134 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
125 u8 ndp_mac_addr[ETH_ALEN]; 135 u8 ndp_mac_addr[ETH_ALEN];
126 __le16 reserved2; 136 __le16 reserved2;
127} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ 137} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
128 138
139/**
140 * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
141 * @common: common/IPv4 configuration
142 * @remote_ipv6_addr: remote address to answer to (or zero if all)
143 * @solicited_node_ipv6_addr: broken -- solicited node address exists
144 * for each target address
145 * @target_ipv6_addr: our target addresses
146 * @ndp_mac_addr: neighbor soliciation response MAC address
147 */
148struct iwl_proto_offload_cmd_v2 {
149 struct iwl_proto_offload_cmd_common common;
150 u8 remote_ipv6_addr[16];
151 u8 solicited_node_ipv6_addr[16];
152 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
153 u8 ndp_mac_addr[ETH_ALEN];
154 u8 numValidIPv6Addresses;
155 u8 reserved2[3];
156} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
157
129 158
130/* 159/*
131 * WOWLAN_PATTERNS 160 * WOWLAN_PATTERNS
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index a6da359a80c3..8e7ab41079ca 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -79,6 +79,10 @@
79 * '1' Driver enables PM (use rest of parameters) 79 * '1' Driver enables PM (use rest of parameters)
80 * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, 80 * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
81 * '1' PM could sleep over DTIM till listen Interval. 81 * '1' PM could sleep over DTIM till listen Interval.
82 * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
83 * access categories are both delivery and trigger enabled.
84 * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
85 * PBW Snoozing enabled
82 * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask 86 * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
83 * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. 87 * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
84*/ 88*/
@@ -86,6 +90,8 @@ enum iwl_power_flags {
86 POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), 90 POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
87 POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), 91 POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
88 POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), 92 POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
93 POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
94 POWER_FLAGS_BT_SCO_ENA = BIT(8),
89 POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), 95 POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
90 POWER_FLAGS_LPRX_ENA_MSK = BIT(11), 96 POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
91}; 97};
@@ -93,7 +99,8 @@ enum iwl_power_flags {
93#define IWL_POWER_VEC_SIZE 5 99#define IWL_POWER_VEC_SIZE 5
94 100
95/** 101/**
96 * struct iwl_powertable_cmd - Power Table Command 102 * struct iwl_powertable_cmd - legacy power command. Beside old API support this
103 * is used also with a new power API for device wide power settings.
97 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 104 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
98 * 105 *
99 * @flags: Power table command flags from POWER_FLAGS_* 106 * @flags: Power table command flags from POWER_FLAGS_*
@@ -125,6 +132,76 @@ struct iwl_powertable_cmd {
125} __packed; 132} __packed;
126 133
127/** 134/**
135 * struct iwl_mac_power_cmd - New power command containing uAPSD support
136 * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
137 * @id_and_color: MAC contex identifier
138 * @flags: Power table command flags from POWER_FLAGS_*
139 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
140 * Minimum allowed:- 3 * DTIM. Keep alive period must be
141 * set regardless of power scheme or current power state.
142 * FW use this value also when PM is disabled.
143 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
144 * PSM transition - legacy PM
145 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
146 * PSM transition - legacy PM
147 * @sleep_interval: not in use
148 * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
149 * is set. For example, if it is required to skip over
150 * one DTIM, this value need to be set to 2 (DTIM periods).
151 * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
152 * PSM transition - uAPSD
153 * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
154 * PSM transition - uAPSD
155 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
156 * Default: 80dbm
157 * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
158 * @snooze_interval: Maximum time between attempts to retrieve buffered data
159 * from the AP [msec]
160 * @snooze_window: A window of time in which PBW snoozing insures that all
161 * packets received. It is also the minimum time from last
162 * received unicast RX packet, before client stops snoozing
163 * for data. [msec]
164 * @snooze_step: TBD
165 * @qndp_tid: TID client shall use for uAPSD QNDP triggers
166 * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
167 * each corresponding AC.
168 * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
169 * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
170 * values.
171 * @heavy_tx_thld_packets: TX threshold measured in number of packets
172 * @heavy_rx_thld_packets: RX threshold measured in number of packets
173 * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
174 * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
175 * @limited_ps_threshold:
176*/
177struct iwl_mac_power_cmd {
178 /* CONTEXT_DESC_API_T_VER_1 */
179 __le32 id_and_color;
180
181 /* CLIENT_PM_POWER_TABLE_S_VER_1 */
182 __le16 flags;
183 __le16 keep_alive_seconds;
184 __le32 rx_data_timeout;
185 __le32 tx_data_timeout;
186 __le32 rx_data_timeout_uapsd;
187 __le32 tx_data_timeout_uapsd;
188 u8 lprx_rssi_threshold;
189 u8 skip_dtim_periods;
190 __le16 snooze_interval;
191 __le16 snooze_window;
192 u8 snooze_step;
193 u8 qndp_tid;
194 u8 uapsd_ac_flags;
195 u8 uapsd_max_sp;
196 u8 heavy_tx_thld_packets;
197 u8 heavy_rx_thld_packets;
198 u8 heavy_tx_thld_percentage;
199 u8 heavy_rx_thld_percentage;
200 u8 limited_ps_threshold;
201 u8 reserved;
202} __packed;
203
204/**
128 * struct iwl_beacon_filter_cmd 205 * struct iwl_beacon_filter_cmd
129 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) 206 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
130 * @id_and_color: MAC contex identifier 207 * @id_and_color: MAC contex identifier
@@ -143,11 +220,21 @@ struct iwl_powertable_cmd {
143 * calculated for current beacon is less than the threshold, use 220 * calculated for current beacon is less than the threshold, use
144 * Roaming Energy Delta Threshold, otherwise use normal Energy Delta 221 * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
145 * Threshold. Typical energy threshold is -72dBm. 222 * Threshold. Typical energy threshold is -72dBm.
146 * @bf_temperature_delta: Send Beacon to driver if delta in temperature values 223 * @bf_temp_threshold: This threshold determines the type of temperature
147 * calculated for this and the last passed beacon is greater than this 224 * filtering (Slow or Fast) that is selected (Units are in Celsuis):
148 * threshold. Zero value means that the temperature changeis ignored for 225 * If the current temperature is above this threshold - Fast filter
226 * will be used, If the current temperature is below this threshold -
227 * Slow filter will be used.
228 * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
229 * calculated for this and the last passed beacon is greater than this
230 * threshold. Zero value means that the temperature change is ignored for
149 * beacon filtering; beacons will not be forced to be sent to driver 231 * beacon filtering; beacons will not be forced to be sent to driver
150 * regardless of whether its temerature has been changed. 232 * regardless of whether its temerature has been changed.
233 * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
234 * calculated for this and the last passed beacon is greater than this
235 * threshold. Zero value means that the temperature change is ignored for
236 * beacon filtering; beacons will not be forced to be sent to driver
237 * regardless of whether its temerature has been changed.
151 * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. 238 * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
152 * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed 239 * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
153 * for a specific period of time. Units: Beacons. 240 * for a specific period of time. Units: Beacons.
@@ -156,17 +243,17 @@ struct iwl_powertable_cmd {
156 * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. 243 * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
157 */ 244 */
158struct iwl_beacon_filter_cmd { 245struct iwl_beacon_filter_cmd {
159 u8 bf_energy_delta; 246 __le32 bf_energy_delta;
160 u8 bf_roaming_energy_delta; 247 __le32 bf_roaming_energy_delta;
161 u8 bf_roaming_state; 248 __le32 bf_roaming_state;
162 u8 bf_temperature_delta; 249 __le32 bf_temp_threshold;
163 u8 bf_enable_beacon_filter; 250 __le32 bf_temp_fast_filter;
164 u8 bf_debug_flag; 251 __le32 bf_temp_slow_filter;
165 __le16 reserved1; 252 __le32 bf_enable_beacon_filter;
253 __le32 bf_debug_flag;
166 __le32 bf_escape_timer; 254 __le32 bf_escape_timer;
167 __le32 ba_escape_timer; 255 __le32 ba_escape_timer;
168 u8 ba_enable_beacon_abort; 256 __le32 ba_enable_beacon_abort;
169 u8 reserved2[3];
170} __packed; 257} __packed;
171 258
172/* Beacon filtering and beacon abort */ 259/* Beacon filtering and beacon abort */
@@ -182,9 +269,17 @@ struct iwl_beacon_filter_cmd {
182#define IWL_BF_ROAMING_STATE_MAX 255 269#define IWL_BF_ROAMING_STATE_MAX 255
183#define IWL_BF_ROAMING_STATE_MIN 0 270#define IWL_BF_ROAMING_STATE_MIN 0
184 271
185#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5 272#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
186#define IWL_BF_TEMPERATURE_DELTA_MAX 255 273#define IWL_BF_TEMP_THRESHOLD_MAX 255
187#define IWL_BF_TEMPERATURE_DELTA_MIN 0 274#define IWL_BF_TEMP_THRESHOLD_MIN 0
275
276#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
277#define IWL_BF_TEMP_FAST_FILTER_MAX 255
278#define IWL_BF_TEMP_FAST_FILTER_MIN 0
279
280#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
281#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
282#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
188 283
189#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 284#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
190 285
@@ -194,19 +289,23 @@ struct iwl_beacon_filter_cmd {
194#define IWL_BF_ESCAPE_TIMER_MAX 1024 289#define IWL_BF_ESCAPE_TIMER_MAX 1024
195#define IWL_BF_ESCAPE_TIMER_MIN 0 290#define IWL_BF_ESCAPE_TIMER_MIN 0
196 291
197#define IWL_BA_ESCAPE_TIMER_DEFAULT 3 292#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
293#define IWL_BA_ESCAPE_TIMER_D3 6
198#define IWL_BA_ESCAPE_TIMER_MAX 1024 294#define IWL_BA_ESCAPE_TIMER_MAX 1024
199#define IWL_BA_ESCAPE_TIMER_MIN 0 295#define IWL_BA_ESCAPE_TIMER_MIN 0
200 296
201#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 297#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
202 298
203#define IWL_BF_CMD_CONFIG_DEFAULTS \ 299#define IWL_BF_CMD_CONFIG_DEFAULTS \
204 .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \ 300 .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
205 .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \ 301 .bf_roaming_energy_delta = \
206 .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \ 302 cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
207 .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \ 303 .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
208 .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \ 304 .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
209 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \ 305 .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
306 .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
307 .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
308 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
210 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT) 309 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
211 310
212#endif 311#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 365095a0c3b3..83cb9b992ea4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -137,6 +137,8 @@ struct iwl_ssid_ie {
137 *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND: 137 *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
138 *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND: 138 *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
139 *@SCAN_FLAGS_FRAGMENTED_SCAN: 139 *@SCAN_FLAGS_FRAGMENTED_SCAN:
140 *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
141 * in the past hour, even if they are marked as passive.
140 */ 142 */
141enum iwl_scan_flags { 143enum iwl_scan_flags {
142 SCAN_FLAGS_PERIODIC_SCAN = BIT(0), 144 SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
@@ -144,6 +146,7 @@ enum iwl_scan_flags {
144 SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2), 146 SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
145 SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3), 147 SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
146 SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4), 148 SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
149 SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5),
147}; 150};
148 151
149/** 152/**
@@ -178,7 +181,7 @@ enum iwl_scan_type {
178 * @quiet_time: in msecs, dwell this time for active scan on quiet channels 181 * @quiet_time: in msecs, dwell this time for active scan on quiet channels
179 * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than 182 * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
180 * this number of packets were received (typically 1) 183 * this number of packets were received (typically 1)
181 * @passive2active: is auto switching from passive to active allowed (0 or 1) 184 * @passive2active: is auto switching from passive to active during scan allowed
182 * @rxchain_sel_flags: RXON_RX_CHAIN_* 185 * @rxchain_sel_flags: RXON_RX_CHAIN_*
183 * @max_out_time: in usecs, max out of serving channel time 186 * @max_out_time: in usecs, max out of serving channel time
184 * @suspend_time: how long to pause scan when returning to service channel: 187 * @suspend_time: how long to pause scan when returning to service channel:
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 700cce731770..d606197bde8f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -91,7 +91,6 @@
91 * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW 91 * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
92 * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration 92 * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
93 * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation 93 * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
94 * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
95 * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id 94 * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
96 * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped 95 * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
97 * @TX_CMD_FLG_EXEC_PAPD: execute PAPD 96 * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
@@ -120,7 +119,6 @@ enum iwl_tx_flags {
120 TX_CMD_FLG_RESP_TO_DRV = BIT(21), 119 TX_CMD_FLG_RESP_TO_DRV = BIT(21),
121 TX_CMD_FLG_CCMP_AGG = BIT(22), 120 TX_CMD_FLG_CCMP_AGG = BIT(22),
122 TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), 121 TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
123 TX_CMD_FLG_CTS_ONLY = BIT(24),
124 TX_CMD_FLG_DUR = BIT(25), 122 TX_CMD_FLG_DUR = BIT(25),
125 TX_CMD_FLG_FW_DROP = BIT(26), 123 TX_CMD_FLG_FW_DROP = BIT(26),
126 TX_CMD_FLG_EXEC_PAPD = BIT(27), 124 TX_CMD_FLG_EXEC_PAPD = BIT(27),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index cbfb3beae783..66264cc5a016 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -136,7 +136,7 @@ enum {
136 CALIB_RES_NOTIF_PHY_DB = 0x6b, 136 CALIB_RES_NOTIF_PHY_DB = 0x6b,
137 /* PHY_DB_CMD = 0x6c, */ 137 /* PHY_DB_CMD = 0x6c, */
138 138
139 /* Power */ 139 /* Power - legacy power table command */
140 POWER_TABLE_CMD = 0x77, 140 POWER_TABLE_CMD = 0x77,
141 141
142 /* Thermal Throttling*/ 142 /* Thermal Throttling*/
@@ -159,6 +159,7 @@ enum {
159 TX_ANT_CONFIGURATION_CMD = 0x98, 159 TX_ANT_CONFIGURATION_CMD = 0x98,
160 BT_CONFIG = 0x9b, 160 BT_CONFIG = 0x9b,
161 STATISTICS_NOTIFICATION = 0x9d, 161 STATISTICS_NOTIFICATION = 0x9d,
162 REDUCE_TX_POWER_CMD = 0x9f,
162 163
163 /* RF-KILL commands and notifications */ 164 /* RF-KILL commands and notifications */
164 CARD_STATE_CMD = 0xa0, 165 CARD_STATE_CMD = 0xa0,
@@ -166,6 +167,9 @@ enum {
166 167
167 MISSED_BEACONS_NOTIFICATION = 0xa2, 168 MISSED_BEACONS_NOTIFICATION = 0xa2,
168 169
170 /* Power - new power table command */
171 MAC_PM_POWER_TABLE = 0xa9,
172
169 REPLY_RX_PHY_CMD = 0xc0, 173 REPLY_RX_PHY_CMD = 0xc0,
170 REPLY_RX_MPDU_CMD = 0xc1, 174 REPLY_RX_MPDU_CMD = 0xc1,
171 BA_NOTIF = 0xc5, 175 BA_NOTIF = 0xc5,
@@ -223,6 +227,19 @@ struct iwl_tx_ant_cfg_cmd {
223 __le32 valid; 227 __le32 valid;
224} __packed; 228} __packed;
225 229
230/**
231 * struct iwl_reduce_tx_power_cmd - TX power reduction command
232 * REDUCE_TX_POWER_CMD = 0x9f
233 * @flags: (reserved for future implementation)
234 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
235 * @pwr_restriction: TX power restriction in dBms.
236 */
237struct iwl_reduce_tx_power_cmd {
238 u8 flags;
239 u8 mac_context_id;
240 __le16 pwr_restriction;
241} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
242
226/* 243/*
227 * Calibration control struct. 244 * Calibration control struct.
228 * Sent as part of the phy configuration command. 245 * Sent as part of the phy configuration command.
@@ -482,71 +499,199 @@ enum iwl_time_event_type {
482 TE_MAX 499 TE_MAX
483}; /* MAC_EVENT_TYPE_API_E_VER_1 */ 500}; /* MAC_EVENT_TYPE_API_E_VER_1 */
484 501
502
503
504/* Time event - defines for command API v1 */
505
506/*
507 * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
508 * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
509 * the first fragment is scheduled.
510 * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
511 * the first 2 fragments are scheduled.
512 * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
513 * number of fragments are valid.
514 *
515 * Other than the constant defined above, specifying a fragmentation value 'x'
516 * means that the event can be fragmented but only the first 'x' will be
517 * scheduled.
518 */
519enum {
520 TE_V1_FRAG_NONE = 0,
521 TE_V1_FRAG_SINGLE = 1,
522 TE_V1_FRAG_DUAL = 2,
523 TE_V1_FRAG_ENDLESS = 0xffffffff
524};
525
526/* If a Time Event can be fragmented, this is the max number of fragments */
527#define TE_V1_FRAG_MAX_MSK 0x0fffffff
528/* Repeat the time event endlessly (until removed) */
529#define TE_V1_REPEAT_ENDLESS 0xffffffff
530/* If a Time Event has bounded repetitions, this is the maximal value */
531#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
532
485/* Time Event dependencies: none, on another TE, or in a specific time */ 533/* Time Event dependencies: none, on another TE, or in a specific time */
486enum { 534enum {
487 TE_INDEPENDENT = 0, 535 TE_V1_INDEPENDENT = 0,
488 TE_DEP_OTHER = 1, 536 TE_V1_DEP_OTHER = BIT(0),
489 TE_DEP_TSF = 2, 537 TE_V1_DEP_TSF = BIT(1),
490 TE_EVENT_SOCIOPATHIC = 4, 538 TE_V1_EVENT_SOCIOPATHIC = BIT(2),
491}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ 539}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
540
492/* 541/*
542 * @TE_V1_NOTIF_NONE: no notifications
543 * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
544 * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
545 * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
546 * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
547 * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
548 * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
549 * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
550 * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
551 *
493 * Supported Time event notifications configuration. 552 * Supported Time event notifications configuration.
494 * A notification (both event and fragment) includes a status indicating weather 553 * A notification (both event and fragment) includes a status indicating weather
495 * the FW was able to schedule the event or not. For fragment start/end 554 * the FW was able to schedule the event or not. For fragment start/end
496 * notification the status is always success. There is no start/end fragment 555 * notification the status is always success. There is no start/end fragment
497 * notification for monolithic events. 556 * notification for monolithic events.
498 *
499 * @TE_NOTIF_NONE: no notifications
500 * @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
501 * @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
502 * @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
503 * @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
504 * @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
505 * @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
506 * @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
507 * @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
508 */ 557 */
509enum { 558enum {
510 TE_NOTIF_NONE = 0, 559 TE_V1_NOTIF_NONE = 0,
511 TE_NOTIF_HOST_EVENT_START = 0x1, 560 TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
512 TE_NOTIF_HOST_EVENT_END = 0x2, 561 TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
513 TE_NOTIF_INTERNAL_EVENT_START = 0x4, 562 TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
514 TE_NOTIF_INTERNAL_EVENT_END = 0x8, 563 TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
515 TE_NOTIF_HOST_FRAG_START = 0x10, 564 TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
516 TE_NOTIF_HOST_FRAG_END = 0x20, 565 TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
517 TE_NOTIF_INTERNAL_FRAG_START = 0x40, 566 TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
518 TE_NOTIF_INTERNAL_FRAG_END = 0x80 567 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
519}; /* MAC_EVENT_ACTION_API_E_VER_2 */ 568}; /* MAC_EVENT_ACTION_API_E_VER_2 */
520 569
570
571/**
572 * struct iwl_time_event_cmd_api_v1 - configuring Time Events
573 * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
574 * with version 2. determined by IWL_UCODE_TLV_FLAGS)
575 * ( TIME_EVENT_CMD = 0x29 )
576 * @id_and_color: ID and color of the relevant MAC
577 * @action: action to perform, one of FW_CTXT_ACTION_*
578 * @id: this field has two meanings, depending on the action:
579 * If the action is ADD, then it means the type of event to add.
580 * For all other actions it is the unique event ID assigned when the
581 * event was added by the FW.
582 * @apply_time: When to start the Time Event (in GP2)
583 * @max_delay: maximum delay to event's start (apply time), in TU
584 * @depends_on: the unique ID of the event we depend on (if any)
585 * @interval: interval between repetitions, in TU
586 * @interval_reciprocal: 2^32 / interval
587 * @duration: duration of event in TU
588 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
589 * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
590 * and TE_V1_EVENT_SOCIOPATHIC
591 * @is_present: 0 or 1, are we present or absent during the Time Event
592 * @max_frags: maximal number of fragments the Time Event can be divided to
593 * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
594 */
595struct iwl_time_event_cmd_v1 {
596 /* COMMON_INDEX_HDR_API_S_VER_1 */
597 __le32 id_and_color;
598 __le32 action;
599 __le32 id;
600 /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
601 __le32 apply_time;
602 __le32 max_delay;
603 __le32 dep_policy;
604 __le32 depends_on;
605 __le32 is_present;
606 __le32 max_frags;
607 __le32 interval;
608 __le32 interval_reciprocal;
609 __le32 duration;
610 __le32 repeat;
611 __le32 notify;
612} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
613
614
615/* Time event - defines for command API v2 */
616
521/* 617/*
522 * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed. 618 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
523 * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only 619 * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
524 * the first fragment is scheduled. 620 * the first fragment is scheduled.
525 * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only 621 * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
526 * the first 2 fragments are scheduled. 622 * the first 2 fragments are scheduled.
527 * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number 623 * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
528 * of fragments are valid. 624 * number of fragments are valid.
529 * 625 *
530 * Other than the constant defined above, specifying a fragmentation value 'x' 626 * Other than the constant defined above, specifying a fragmentation value 'x'
531 * means that the event can be fragmented but only the first 'x' will be 627 * means that the event can be fragmented but only the first 'x' will be
532 * scheduled. 628 * scheduled.
533 */ 629 */
534enum { 630enum {
535 TE_FRAG_NONE = 0, 631 TE_V2_FRAG_NONE = 0,
536 TE_FRAG_SINGLE = 1, 632 TE_V2_FRAG_SINGLE = 1,
537 TE_FRAG_DUAL = 2, 633 TE_V2_FRAG_DUAL = 2,
538 TE_FRAG_ENDLESS = 0xffffffff 634 TE_V2_FRAG_MAX = 0xfe,
635 TE_V2_FRAG_ENDLESS = 0xff
539}; 636};
540 637
541/* Repeat the time event endlessly (until removed) */ 638/* Repeat the time event endlessly (until removed) */
542#define TE_REPEAT_ENDLESS (0xffffffff) 639#define TE_V2_REPEAT_ENDLESS 0xff
543/* If a Time Event has bounded repetitions, this is the maximal value */ 640/* If a Time Event has bounded repetitions, this is the maximal value */
544#define TE_REPEAT_MAX_MSK (0x0fffffff) 641#define TE_V2_REPEAT_MAX 0xfe
545/* If a Time Event can be fragmented, this is the max number of fragments */ 642
546#define TE_FRAG_MAX_MSK (0x0fffffff) 643#define TE_V2_PLACEMENT_POS 12
644#define TE_V2_ABSENCE_POS 15
645
646/* Time event policy values (for time event cmd api v2)
647 * A notification (both event and fragment) includes a status indicating weather
648 * the FW was able to schedule the event or not. For fragment start/end
649 * notification the status is always success. There is no start/end fragment
650 * notification for monolithic events.
651 *
652 * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
653 * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
654 * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
655 * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
656 * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
657 * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
658 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
659 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
660 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
661 * @TE_V2_DEP_OTHER: depends on another time event
662 * @TE_V2_DEP_TSF: depends on a specific time
663 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
664 * @TE_V2_ABSENCE: are we present or absent during the Time Event.
665 */
666enum {
667 TE_V2_DEFAULT_POLICY = 0x0,
668
669 /* notifications (event start/stop, fragment start/stop) */
670 TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
671 TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
672 TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
673 TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
674
675 TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
676 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
677 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
678 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
679
680 TE_V2_NOTIF_MSK = 0xff,
681
682 /* placement characteristics */
683 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
684 TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
685 TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
686
687 /* are we present or absent during the Time Event. */
688 TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
689};
547 690
548/** 691/**
549 * struct iwl_time_event_cmd - configuring Time Events 692 * struct iwl_time_event_cmd_api_v2 - configuring Time Events
693 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
694 * with version 1. determined by IWL_UCODE_TLV_FLAGS)
550 * ( TIME_EVENT_CMD = 0x29 ) 695 * ( TIME_EVENT_CMD = 0x29 )
551 * @id_and_color: ID and color of the relevant MAC 696 * @id_and_color: ID and color of the relevant MAC
552 * @action: action to perform, one of FW_CTXT_ACTION_* 697 * @action: action to perform, one of FW_CTXT_ACTION_*
@@ -558,32 +703,30 @@ enum {
558 * @max_delay: maximum delay to event's start (apply time), in TU 703 * @max_delay: maximum delay to event's start (apply time), in TU
559 * @depends_on: the unique ID of the event we depend on (if any) 704 * @depends_on: the unique ID of the event we depend on (if any)
560 * @interval: interval between repetitions, in TU 705 * @interval: interval between repetitions, in TU
561 * @interval_reciprocal: 2^32 / interval
562 * @duration: duration of event in TU 706 * @duration: duration of event in TU
563 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS 707 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
564 * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
565 * @is_present: 0 or 1, are we present or absent during the Time Event
566 * @max_frags: maximal number of fragments the Time Event can be divided to 708 * @max_frags: maximal number of fragments the Time Event can be divided to
567 * @notify: notifications using TE_NOTIF_* (whom to notify when) 709 * @policy: defines whether uCode shall notify the host or other uCode modules
710 * on event and/or fragment start and/or end
711 * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
712 * TE_EVENT_SOCIOPATHIC
713 * using TE_ABSENCE and using TE_NOTIF_*
568 */ 714 */
569struct iwl_time_event_cmd { 715struct iwl_time_event_cmd_v2 {
570 /* COMMON_INDEX_HDR_API_S_VER_1 */ 716 /* COMMON_INDEX_HDR_API_S_VER_1 */
571 __le32 id_and_color; 717 __le32 id_and_color;
572 __le32 action; 718 __le32 action;
573 __le32 id; 719 __le32 id;
574 /* MAC_TIME_EVENT_DATA_API_S_VER_1 */ 720 /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
575 __le32 apply_time; 721 __le32 apply_time;
576 __le32 max_delay; 722 __le32 max_delay;
577 __le32 dep_policy;
578 __le32 depends_on; 723 __le32 depends_on;
579 __le32 is_present;
580 __le32 max_frags;
581 __le32 interval; 724 __le32 interval;
582 __le32 interval_reciprocal;
583 __le32 duration; 725 __le32 duration;
584 __le32 repeat; 726 u8 repeat;
585 __le32 notify; 727 u8 max_frags;
586} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */ 728 __le16 policy;
729} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
587 730
588/** 731/**
589 * struct iwl_time_event_resp - response structure to iwl_time_event_cmd 732 * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
@@ -765,6 +908,14 @@ struct iwl_phy_context_cmd {
765} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ 908} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
766 909
767#define IWL_RX_INFO_PHY_CNT 8 910#define IWL_RX_INFO_PHY_CNT 8
911#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
912#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
913#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
914#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
915#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
916#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
917#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
918
768#define IWL_RX_INFO_AGC_IDX 1 919#define IWL_RX_INFO_AGC_IDX 1
769#define IWL_RX_INFO_RSSI_AB_IDX 2 920#define IWL_RX_INFO_RSSI_AB_IDX 2
770#define IWL_OFDM_AGC_A_MSK 0x0000007f 921#define IWL_OFDM_AGC_A_MSK 0x0000007f
@@ -1170,7 +1321,7 @@ struct mvm_statistics_general {
1170 struct mvm_statistics_general_common common; 1321 struct mvm_statistics_general_common common;
1171 __le32 beacon_filtered; 1322 __le32 beacon_filtered;
1172 __le32 missed_beacons; 1323 __le32 missed_beacons;
1173 __s8 beacon_filter_everage_energy; 1324 __s8 beacon_filter_average_energy;
1174 __s8 beacon_filter_reason; 1325 __s8 beacon_filter_reason;
1175 __s8 beacon_filter_current_energy; 1326 __s8 beacon_filter_current_energy;
1176 __s8 beacon_filter_reserved; 1327 __s8 beacon_filter_reserved;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index cd7c0032cc58..c76299a3a1e0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -78,22 +78,6 @@
78 78
79#define UCODE_VALID_OK cpu_to_le32(0x1) 79#define UCODE_VALID_OK cpu_to_le32(0x1)
80 80
81/* Default calibration values for WkP - set to INIT image w/o running */
82static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
83static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
84
85struct iwl_calib_default_data {
86 u16 size;
87 void *data;
88};
89
90#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
91
92static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
93 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
94 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
95};
96
97struct iwl_mvm_alive_data { 81struct iwl_mvm_alive_data {
98 bool valid; 82 bool valid;
99 u32 scd_base_addr; 83 u32 scd_base_addr;
@@ -248,40 +232,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
248 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 232 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
249} 233}
250 234
251static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
252{
253 u8 cmd_raw[16]; /* holds the variable size commands */
254 struct iwl_set_calib_default_cmd *cmd =
255 (struct iwl_set_calib_default_cmd *)cmd_raw;
256 int ret, i;
257
258 /* Setting default values for calibrations we don't run */
259 for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
260 u16 cmd_len;
261
262 if (wkp_calib_default_data[i].size == 0)
263 continue;
264
265 memset(cmd_raw, 0, sizeof(cmd_raw));
266 cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
267 cmd->calib_index = cpu_to_le16(i);
268 cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
269 if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
270 "Need to enlarge cmd_raw to %d\n", cmd_len))
271 break;
272 memcpy(cmd->data, wkp_calib_default_data[i].data,
273 wkp_calib_default_data[i].size);
274 ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
275 sizeof(*cmd) +
276 wkp_calib_default_data[i].size,
277 cmd);
278 if (ret)
279 return ret;
280 }
281
282 return 0;
283}
284
285int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) 235int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
286{ 236{
287 struct iwl_notification_wait calib_wait; 237 struct iwl_notification_wait calib_wait;
@@ -342,11 +292,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
342 if (ret) 292 if (ret)
343 goto error; 293 goto error;
344 294
345 /* need to set default values */
346 ret = iwl_set_default_calibrations(mvm);
347 if (ret)
348 goto error;
349
350 /* 295 /*
351 * Send phy configurations command to init uCode 296 * Send phy configurations command to init uCode
352 * to start the 16.0 uCode init image internal calibrations. 297 * to start the 16.0 uCode init image internal calibrations.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 94aae9c8562c..5fe23a5ea9b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -264,7 +264,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
264 return 0; 264 return 0;
265 265
266 /* Therefore, in recovery, we can't get here */ 266 /* Therefore, in recovery, we can't get here */
267 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 267 if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
268 return -EBUSY;
268 269
269 mvmvif->id = find_first_bit(data.available_mac_ids, 270 mvmvif->id = find_first_bit(data.available_mac_ids,
270 NUM_MAC_INDEX_DRIVER); 271 NUM_MAC_INDEX_DRIVER);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f19baf0dea6b..9833cdf6177c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -153,7 +153,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
153 IEEE80211_HW_SUPPORTS_DYNAMIC_PS | 153 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
154 IEEE80211_HW_AMPDU_AGGREGATION | 154 IEEE80211_HW_AMPDU_AGGREGATION |
155 IEEE80211_HW_TIMING_BEACON_ONLY | 155 IEEE80211_HW_TIMING_BEACON_ONLY |
156 IEEE80211_HW_CONNECTION_MONITOR; 156 IEEE80211_HW_CONNECTION_MONITOR |
157 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
158 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
159 IEEE80211_HW_SUPPORTS_UAPSD;
157 160
158 hw->queues = IWL_MVM_FIRST_AGG_QUEUE; 161 hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
159 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 162 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -188,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
188 191
189 hw->wiphy->max_remain_on_channel_duration = 10000; 192 hw->wiphy->max_remain_on_channel_duration = 10000;
190 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 193 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
194 hw->uapsd_queues = IWL_UAPSD_AC_INFO;
195 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
191 196
192 /* Extract MAC address */ 197 /* Extract MAC address */
193 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 198 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -506,7 +511,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
506 511
507 mutex_lock(&mvm->mutex); 512 mutex_lock(&mvm->mutex);
508 513
509 /* Allocate resources for the MAC context, and add it the the fw */ 514 /* Allocate resources for the MAC context, and add it to the fw */
510 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 515 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
511 if (ret) 516 if (ret)
512 goto out_unlock; 517 goto out_unlock;
@@ -552,6 +557,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
552 goto out_release; 557 goto out_release;
553 } 558 }
554 559
560 iwl_mvm_vif_dbgfs_register(mvm, vif);
555 goto out_unlock; 561 goto out_unlock;
556 } 562 }
557 563
@@ -566,16 +572,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
566 iwl_mvm_power_update_mode(mvm, vif); 572 iwl_mvm_power_update_mode(mvm, vif);
567 573
568 /* beacon filtering */ 574 /* beacon filtering */
575 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
576 if (ret)
577 goto out_remove_mac;
578
569 if (!mvm->bf_allowed_vif && 579 if (!mvm->bf_allowed_vif &&
570 vif->type == NL80211_IFTYPE_STATION && !vif->p2p){ 580 vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
581 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
571 mvm->bf_allowed_vif = mvmvif; 582 mvm->bf_allowed_vif = mvmvif;
572 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 583 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
584 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
573 } 585 }
574 586
575 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
576 if (ret)
577 goto out_release;
578
579 /* 587 /*
580 * P2P_DEVICE interface does not have a channel context assigned to it, 588 * P2P_DEVICE interface does not have a channel context assigned to it,
581 * so a dedicated PHY context is allocated to it and the corresponding 589 * so a dedicated PHY context is allocated to it and the corresponding
@@ -586,7 +594,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
586 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 594 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
587 if (!mvmvif->phy_ctxt) { 595 if (!mvmvif->phy_ctxt) {
588 ret = -ENOSPC; 596 ret = -ENOSPC;
589 goto out_remove_mac; 597 goto out_free_bf;
590 } 598 }
591 599
592 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 600 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
@@ -610,6 +618,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
610 iwl_mvm_binding_remove_vif(mvm, vif); 618 iwl_mvm_binding_remove_vif(mvm, vif);
611 out_unref_phy: 619 out_unref_phy:
612 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 620 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
621 out_free_bf:
622 if (mvm->bf_allowed_vif == mvmvif) {
623 mvm->bf_allowed_vif = NULL;
624 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
625 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
626 }
613 out_remove_mac: 627 out_remove_mac:
614 mvmvif->phy_ctxt = NULL; 628 mvmvif->phy_ctxt = NULL;
615 iwl_mvm_mac_ctxt_remove(mvm, vif); 629 iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -674,7 +688,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
674 688
675 if (mvm->bf_allowed_vif == mvmvif) { 689 if (mvm->bf_allowed_vif == mvmvif) {
676 mvm->bf_allowed_vif = NULL; 690 mvm->bf_allowed_vif = NULL;
677 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 691 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
692 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
678 } 693 }
679 694
680 iwl_mvm_vif_dbgfs_clean(mvm, vif); 695 iwl_mvm_vif_dbgfs_clean(mvm, vif);
@@ -719,6 +734,20 @@ out_release:
719 mutex_unlock(&mvm->mutex); 734 mutex_unlock(&mvm->mutex);
720} 735}
721 736
737static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
738 s8 tx_power)
739{
740 /* FW is in charge of regulatory enforcement */
741 struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
742 .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
743 .pwr_restriction = cpu_to_le16(tx_power),
744 };
745
746 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
747 sizeof(reduce_txpwr_cmd),
748 &reduce_txpwr_cmd);
749}
750
722static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 751static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
723{ 752{
724 return 0; 753 return 0;
@@ -766,7 +795,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
766 IWL_ERR(mvm, "failed to update quotas\n"); 795 IWL_ERR(mvm, "failed to update quotas\n");
767 return; 796 return;
768 } 797 }
769 iwl_mvm_bt_coex_vif_assoc(mvm, vif);
770 iwl_mvm_configure_mcast_filter(mvm, vif); 798 iwl_mvm_configure_mcast_filter(mvm, vif);
771 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 799 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
772 /* remove AP station now that the MAC is unassoc */ 800 /* remove AP station now that the MAC is unassoc */
@@ -779,9 +807,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
779 if (ret) 807 if (ret)
780 IWL_ERR(mvm, "failed to update quotas\n"); 808 IWL_ERR(mvm, "failed to update quotas\n");
781 } 809 }
782 ret = iwl_mvm_power_update_mode(mvm, vif); 810
783 if (ret) 811 /* reset rssi values */
784 IWL_ERR(mvm, "failed to update power mode\n"); 812 mvmvif->bf_data.ave_beacon_signal = 0;
813
814 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
815 /* Workaround for FW bug, otherwise FW disables device
816 * power save upon disassociation
817 */
818 ret = iwl_mvm_power_update_mode(mvm, vif);
819 if (ret)
820 IWL_ERR(mvm, "failed to update power mode\n");
821 }
822 iwl_mvm_bt_coex_vif_assoc(mvm, vif);
785 } else if (changes & BSS_CHANGED_BEACON_INFO) { 823 } else if (changes & BSS_CHANGED_BEACON_INFO) {
786 /* 824 /*
787 * We received a beacon _after_ association so 825 * We received a beacon _after_ association so
@@ -789,11 +827,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
789 */ 827 */
790 iwl_mvm_remove_time_event(mvm, mvmvif, 828 iwl_mvm_remove_time_event(mvm, mvmvif,
791 &mvmvif->time_event_data); 829 &mvmvif->time_event_data);
792 } else if (changes & BSS_CHANGED_PS) { 830 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
793 ret = iwl_mvm_power_update_mode(mvm, vif); 831 ret = iwl_mvm_power_update_mode(mvm, vif);
794 if (ret) 832 if (ret)
795 IWL_ERR(mvm, "failed to update power mode\n"); 833 IWL_ERR(mvm, "failed to update power mode\n");
796 } 834 }
835 if (changes & BSS_CHANGED_TXPOWER) {
836 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
837 bss_conf->txpower);
838 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
839 }
840
841 if (changes & BSS_CHANGED_CQM) {
842 IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
843 /* reset cqm events tracking */
844 mvmvif->bf_data.last_cqm_event = 0;
845 ret = iwl_mvm_update_beacon_filter(mvm, vif);
846 if (ret)
847 IWL_ERR(mvm, "failed to update CQM thresholds\n");
848 }
797} 849}
798 850
799static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 851static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 420e82d379d9..b0389279cc1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -76,6 +76,7 @@
76#include "iwl-trans.h" 76#include "iwl-trans.h"
77#include "sta.h" 77#include "sta.h"
78#include "fw-api.h" 78#include "fw-api.h"
79#include "constants.h"
79 80
80#define IWL_INVALID_MAC80211_QUEUE 0xff 81#define IWL_INVALID_MAC80211_QUEUE 0xff
81#define IWL_MVM_MAX_ADDRESSES 5 82#define IWL_MVM_MAX_ADDRESSES 5
@@ -91,6 +92,9 @@ enum iwl_mvm_tx_fifo {
91}; 92};
92 93
93extern struct ieee80211_ops iwl_mvm_hw_ops; 94extern struct ieee80211_ops iwl_mvm_hw_ops;
95extern const struct iwl_mvm_power_ops pm_legacy_ops;
96extern const struct iwl_mvm_power_ops pm_mac_ops;
97
94/** 98/**
95 * struct iwl_mvm_mod_params - module parameters for iwlmvm 99 * struct iwl_mvm_mod_params - module parameters for iwlmvm
96 * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. 100 * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
@@ -149,6 +153,22 @@ enum iwl_power_scheme {
149}; 153};
150 154
151#define IWL_CONN_MAX_LISTEN_INTERVAL 70 155#define IWL_CONN_MAX_LISTEN_INTERVAL 70
156#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
157 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
158 IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
159 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
160#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
161
162struct iwl_mvm_power_ops {
163 int (*power_update_mode)(struct iwl_mvm *mvm,
164 struct ieee80211_vif *vif);
165 int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
166#ifdef CONFIG_IWLWIFI_DEBUGFS
167 int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
168 char *buf, int bufsz);
169#endif
170};
171
152 172
153#ifdef CONFIG_IWLWIFI_DEBUGFS 173#ifdef CONFIG_IWLWIFI_DEBUGFS
154enum iwl_dbgfs_pm_mask { 174enum iwl_dbgfs_pm_mask {
@@ -160,10 +180,11 @@ enum iwl_dbgfs_pm_mask {
160 MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5), 180 MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
161 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), 181 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
162 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), 182 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
183 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
163}; 184};
164 185
165struct iwl_dbgfs_pm { 186struct iwl_dbgfs_pm {
166 u8 keep_alive_seconds; 187 u16 keep_alive_seconds;
167 u32 rx_data_timeout; 188 u32 rx_data_timeout;
168 u32 tx_data_timeout; 189 u32 tx_data_timeout;
169 bool skip_over_dtim; 190 bool skip_over_dtim;
@@ -171,6 +192,7 @@ struct iwl_dbgfs_pm {
171 bool disable_power_off; 192 bool disable_power_off;
172 bool lprx_ena; 193 bool lprx_ena;
173 u32 lprx_rssi_threshold; 194 u32 lprx_rssi_threshold;
195 bool snooze_ena;
174 int mask; 196 int mask;
175}; 197};
176 198
@@ -180,24 +202,28 @@ enum iwl_dbgfs_bf_mask {
180 MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), 202 MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
181 MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), 203 MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
182 MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), 204 MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
183 MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3), 205 MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
184 MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4), 206 MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
185 MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5), 207 MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
186 MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6), 208 MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
187 MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7), 209 MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
188 MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8), 210 MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
211 MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
212 MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
189}; 213};
190 214
191struct iwl_dbgfs_bf { 215struct iwl_dbgfs_bf {
192 u8 bf_energy_delta; 216 u32 bf_energy_delta;
193 u8 bf_roaming_energy_delta; 217 u32 bf_roaming_energy_delta;
194 u8 bf_roaming_state; 218 u32 bf_roaming_state;
195 u8 bf_temperature_delta; 219 u32 bf_temp_threshold;
196 u8 bf_enable_beacon_filter; 220 u32 bf_temp_fast_filter;
197 u8 bf_debug_flag; 221 u32 bf_temp_slow_filter;
222 u32 bf_enable_beacon_filter;
223 u32 bf_debug_flag;
198 u32 bf_escape_timer; 224 u32 bf_escape_timer;
199 u32 ba_escape_timer; 225 u32 ba_escape_timer;
200 u8 ba_enable_beacon_abort; 226 u32 ba_enable_beacon_abort;
201 int mask; 227 int mask;
202}; 228};
203#endif 229#endif
@@ -209,6 +235,21 @@ enum iwl_mvm_smps_type_request {
209}; 235};
210 236
211/** 237/**
238* struct iwl_mvm_vif_bf_data - beacon filtering related data
239* @bf_enabled: indicates if beacon filtering is enabled
240* @ba_enabled: indicated if beacon abort is enabled
241* @last_beacon_signal: last beacon rssi signal in dbm
242* @ave_beacon_signal: average beacon signal
243* @last_cqm_event: rssi of the last cqm event
244*/
245struct iwl_mvm_vif_bf_data {
246 bool bf_enabled;
247 bool ba_enabled;
248 s8 ave_beacon_signal;
249 s8 last_cqm_event;
250};
251
252/**
212 * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context 253 * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
213 * @id: between 0 and 3 254 * @id: between 0 and 3
214 * @color: to solve races upon MAC addition and removal 255 * @color: to solve races upon MAC addition and removal
@@ -233,8 +274,7 @@ struct iwl_mvm_vif {
233 bool uploaded; 274 bool uploaded;
234 bool ap_active; 275 bool ap_active;
235 bool monitor_active; 276 bool monitor_active;
236 /* indicate whether beacon filtering is enabled */ 277 struct iwl_mvm_vif_bf_data bf_data;
237 bool bf_enabled;
238 278
239 u32 ap_beacon_time; 279 u32 ap_beacon_time;
240 280
@@ -268,7 +308,7 @@ struct iwl_mvm_vif {
268 308
269#if IS_ENABLED(CONFIG_IPV6) 309#if IS_ENABLED(CONFIG_IPV6)
270 /* IPv6 addresses for WoWLAN */ 310 /* IPv6 addresses for WoWLAN */
271 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS]; 311 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
272 int num_target_ipv6_addrs; 312 int num_target_ipv6_addrs;
273#endif 313#endif
274#endif 314#endif
@@ -402,6 +442,8 @@ struct iwl_mvm {
402 442
403 struct iwl_notif_wait_data notif_wait; 443 struct iwl_notif_wait_data notif_wait;
404 444
445 struct mvm_statistics_rx rx_stats;
446
405 unsigned long transport_queue_stop; 447 unsigned long transport_queue_stop;
406 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; 448 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
407 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; 449 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
@@ -459,6 +501,9 @@ struct iwl_mvm {
459 */ 501 */
460 u8 vif_count; 502 u8 vif_count;
461 503
504 /* -1 for always, 0 for never, >0 for that many times */
505 s8 restart_fw;
506
462 struct led_classdev led; 507 struct led_classdev led;
463 508
464 struct ieee80211_vif *p2p_device_vif; 509 struct ieee80211_vif *p2p_device_vif;
@@ -482,6 +527,8 @@ struct iwl_mvm {
482 /* Thermal Throttling and CTkill */ 527 /* Thermal Throttling and CTkill */
483 struct iwl_mvm_tt_mgmt thermal_throttle; 528 struct iwl_mvm_tt_mgmt thermal_throttle;
484 s32 temperature; /* Celsius */ 529 s32 temperature; /* Celsius */
530
531 const struct iwl_mvm_power_ops *pm_ops;
485}; 532};
486 533
487/* Extract MVM priv from op_mode and _hw */ 534/* Extract MVM priv from op_mode and _hw */
@@ -525,6 +572,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
525 enum ieee80211_band band); 572 enum ieee80211_band band);
526u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); 573u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
527void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); 574void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
575void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
528u8 first_antenna(u8 mask); 576u8 first_antenna(u8 mask);
529u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); 577u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
530 578
@@ -660,10 +708,26 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
660 u8 flags, bool init); 708 u8 flags, bool init);
661 709
662/* power managment */ 710/* power managment */
663int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 711static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
664int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 712 struct ieee80211_vif *vif)
665void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 713{
666 struct iwl_powertable_cmd *cmd); 714 return mvm->pm_ops->power_update_mode(mvm, vif);
715}
716
717static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
718 struct ieee80211_vif *vif)
719{
720 return mvm->pm_ops->power_disable(mvm, vif);
721}
722
723#ifdef CONFIG_IWLWIFI_DEBUGFS
724static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
725 struct ieee80211_vif *vif,
726 char *buf, int bufsz)
727{
728 return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
729}
730#endif
667 731
668int iwl_mvm_leds_init(struct iwl_mvm *mvm); 732int iwl_mvm_leds_init(struct iwl_mvm *mvm);
669void iwl_mvm_leds_exit(struct iwl_mvm *mvm); 733void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -707,6 +771,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
707 struct ieee80211_vif *vif); 771 struct ieee80211_vif *vif);
708int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 772int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
709 struct ieee80211_vif *vif); 773 struct ieee80211_vif *vif);
774int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
775 struct iwl_beacon_filter_cmd *cmd);
776int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
777 struct ieee80211_vif *vif, bool enable);
778int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
779 struct ieee80211_vif *vif);
710 780
711/* SMPS */ 781/* SMPS */
712void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 782void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index af79a14063a9..2fcc8ef88a68 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -275,6 +275,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
275 CMD(BEACON_NOTIFICATION), 275 CMD(BEACON_NOTIFICATION),
276 CMD(BEACON_TEMPLATE_CMD), 276 CMD(BEACON_TEMPLATE_CMD),
277 CMD(STATISTICS_NOTIFICATION), 277 CMD(STATISTICS_NOTIFICATION),
278 CMD(REDUCE_TX_POWER_CMD),
278 CMD(TX_ANT_CONFIGURATION_CMD), 279 CMD(TX_ANT_CONFIGURATION_CMD),
279 CMD(D3_CONFIG_CMD), 280 CMD(D3_CONFIG_CMD),
280 CMD(PROT_OFFLOAD_CONFIG_CMD), 281 CMD(PROT_OFFLOAD_CONFIG_CMD),
@@ -301,6 +302,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
301 CMD(MCAST_FILTER_CMD), 302 CMD(MCAST_FILTER_CMD),
302 CMD(REPLY_BEACON_FILTERING_CMD), 303 CMD(REPLY_BEACON_FILTERING_CMD),
303 CMD(REPLY_THERMAL_MNG_BACKOFF), 304 CMD(REPLY_THERMAL_MNG_BACKOFF),
305 CMD(MAC_PM_POWER_TABLE),
304}; 306};
305#undef CMD 307#undef CMD
306 308
@@ -340,6 +342,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
340 mvm->fw = fw; 342 mvm->fw = fw;
341 mvm->hw = hw; 343 mvm->hw = hw;
342 344
345 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
346
343 mutex_init(&mvm->mutex); 347 mutex_init(&mvm->mutex);
344 spin_lock_init(&mvm->async_handlers_lock); 348 spin_lock_init(&mvm->async_handlers_lock);
345 INIT_LIST_HEAD(&mvm->time_event_list); 349 INIT_LIST_HEAD(&mvm->time_event_list);
@@ -431,6 +435,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
431 if (err) 435 if (err)
432 goto out_unregister; 436 goto out_unregister;
433 437
438 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
439 mvm->pm_ops = &pm_mac_ops;
440 else
441 mvm->pm_ops = &pm_legacy_ops;
442
443 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
444
434 return op_mode; 445 return op_mode;
435 446
436 out_unregister: 447 out_unregister:
@@ -638,6 +649,22 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
638 ieee80211_free_txskb(mvm->hw, skb); 649 ieee80211_free_txskb(mvm->hw, skb);
639} 650}
640 651
652struct iwl_mvm_reprobe {
653 struct device *dev;
654 struct work_struct work;
655};
656
657static void iwl_mvm_reprobe_wk(struct work_struct *wk)
658{
659 struct iwl_mvm_reprobe *reprobe;
660
661 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
662 if (device_reprobe(reprobe->dev))
663 dev_err(reprobe->dev, "reprobe failed!\n");
664 kfree(reprobe);
665 module_put(THIS_MODULE);
666}
667
641static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) 668static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
642{ 669{
643 iwl_abort_notification_waits(&mvm->notif_wait); 670 iwl_abort_notification_waits(&mvm->notif_wait);
@@ -649,9 +676,30 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
649 * can't recover this since we're already half suspended. 676 * can't recover this since we're already half suspended.
650 */ 677 */
651 if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 678 if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
652 IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n"); 679 struct iwl_mvm_reprobe *reprobe;
653 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && 680
654 iwlwifi_mod_params.restart_fw) { 681 IWL_ERR(mvm,
682 "Firmware error during reconfiguration - reprobe!\n");
683
684 /*
685 * get a module reference to avoid doing this while unloading
686 * anyway and to avoid scheduling a work with code that's
687 * being removed.
688 */
689 if (!try_module_get(THIS_MODULE)) {
690 IWL_ERR(mvm, "Module is being unloaded - abort\n");
691 return;
692 }
693
694 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
695 if (!reprobe) {
696 module_put(THIS_MODULE);
697 return;
698 }
699 reprobe->dev = mvm->trans->dev;
700 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
701 schedule_work(&reprobe->work);
702 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
655 /* 703 /*
656 * This is a bit racy, but worst case we tell mac80211 about 704 * This is a bit racy, but worst case we tell mac80211 about
657 * a stopped/aborted (sched) scan when that was already done 705 * a stopped/aborted (sched) scan when that was already done
@@ -669,6 +717,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
669 break; 717 break;
670 } 718 }
671 719
720 if (mvm->restart_fw > 0)
721 mvm->restart_fw--;
672 ieee80211_restart_hw(mvm->hw); 722 ieee80211_restart_hw(mvm->hw);
673 } 723 }
674} 724}
@@ -678,6 +728,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
678 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 728 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
679 729
680 iwl_mvm_dump_nic_error_log(mvm); 730 iwl_mvm_dump_nic_error_log(mvm);
731 if (!mvm->restart_fw)
732 iwl_mvm_dump_sram(mvm);
681 733
682 iwl_mvm_nic_restart(mvm); 734 iwl_mvm_nic_restart(mvm);
683} 735}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index e7ca965a89b8..21407a353a3b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -75,8 +75,8 @@
75 75
76#define POWER_KEEP_ALIVE_PERIOD_SEC 25 76#define POWER_KEEP_ALIVE_PERIOD_SEC 25
77 77
78static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, 78int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
79 struct iwl_beacon_filter_cmd *cmd) 79 struct iwl_beacon_filter_cmd *cmd)
80{ 80{
81 int ret; 81 int ret;
82 82
@@ -85,69 +85,110 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
85 85
86 if (!ret) { 86 if (!ret) {
87 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", 87 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
88 cmd->ba_enable_beacon_abort); 88 le32_to_cpu(cmd->ba_enable_beacon_abort));
89 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", 89 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
90 cmd->ba_escape_timer); 90 le32_to_cpu(cmd->ba_escape_timer));
91 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", 91 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
92 cmd->bf_debug_flag); 92 le32_to_cpu(cmd->bf_debug_flag));
93 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", 93 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
94 cmd->bf_enable_beacon_filter); 94 le32_to_cpu(cmd->bf_enable_beacon_filter));
95 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", 95 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
96 cmd->bf_energy_delta); 96 le32_to_cpu(cmd->bf_energy_delta));
97 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", 97 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
98 cmd->bf_escape_timer); 98 le32_to_cpu(cmd->bf_escape_timer));
99 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", 99 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
100 cmd->bf_roaming_energy_delta); 100 le32_to_cpu(cmd->bf_roaming_energy_delta));
101 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", 101 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
102 cmd->bf_roaming_state); 102 le32_to_cpu(cmd->bf_roaming_state));
103 IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n", 103 IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
104 cmd->bf_temperature_delta); 104 le32_to_cpu(cmd->bf_temp_threshold));
105 IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
106 le32_to_cpu(cmd->bf_temp_fast_filter));
107 IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
108 le32_to_cpu(cmd->bf_temp_slow_filter));
105 } 109 }
106 return ret; 110 return ret;
107} 111}
108 112
109static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, 113static
110 struct ieee80211_vif *vif, bool enable) 114void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
115 struct ieee80211_vif *vif,
116 struct iwl_beacon_filter_cmd *cmd)
117{
118 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
119
120 if (vif->bss_conf.cqm_rssi_thold) {
121 cmd->bf_energy_delta =
122 cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
123 /* fw uses an absolute value for this */
124 cmd->bf_roaming_state =
125 cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
126 }
127 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
128}
129
130int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
131 struct ieee80211_vif *vif, bool enable)
111{ 132{
112 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 133 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
113 struct iwl_beacon_filter_cmd cmd = { 134 struct iwl_beacon_filter_cmd cmd = {
114 IWL_BF_CMD_CONFIG_DEFAULTS, 135 IWL_BF_CMD_CONFIG_DEFAULTS,
115 .bf_enable_beacon_filter = 1, 136 .bf_enable_beacon_filter = cpu_to_le32(1),
116 .ba_enable_beacon_abort = enable, 137 .ba_enable_beacon_abort = cpu_to_le32(enable),
117 }; 138 };
118 139
119 if (!mvmvif->bf_enabled) 140 if (!mvmvif->bf_data.bf_enabled)
120 return 0; 141 return 0;
121 142
143 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
144 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
145
146 mvmvif->bf_data.ba_enabled = enable;
147 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
122 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); 148 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
123 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); 149 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
124} 150}
125 151
126static void iwl_mvm_power_log(struct iwl_mvm *mvm, 152static void iwl_mvm_power_log(struct iwl_mvm *mvm,
127 struct iwl_powertable_cmd *cmd) 153 struct iwl_mac_power_cmd *cmd)
128{ 154{
129 IWL_DEBUG_POWER(mvm, 155 IWL_DEBUG_POWER(mvm,
130 "Sending power table command for power level %d, flags = 0x%X\n", 156 "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
131 iwlmvm_mod_params.power_scheme, 157 cmd->id_and_color, iwlmvm_mod_params.power_scheme,
132 le16_to_cpu(cmd->flags)); 158 le16_to_cpu(cmd->flags));
133 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds); 159 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
134 160 le16_to_cpu(cmd->keep_alive_seconds));
135 if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { 161
136 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", 162 if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
137 le32_to_cpu(cmd->rx_data_timeout)); 163 IWL_DEBUG_POWER(mvm, "Disable power management\n");
138 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", 164 return;
139 le32_to_cpu(cmd->tx_data_timeout)); 165 }
140 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) 166
141 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", 167 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
142 le32_to_cpu(cmd->skip_dtim_periods)); 168 le32_to_cpu(cmd->rx_data_timeout));
143 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) 169 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
144 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", 170 le32_to_cpu(cmd->tx_data_timeout));
145 le32_to_cpu(cmd->lprx_rssi_threshold)); 171 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
172 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
173 cmd->skip_dtim_periods);
174 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
175 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
176 cmd->lprx_rssi_threshold);
177 if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
178 IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
179 IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
180 le32_to_cpu(cmd->rx_data_timeout_uapsd));
181 IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
182 le32_to_cpu(cmd->tx_data_timeout_uapsd));
183 IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
184 IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
185 IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
146 } 186 }
147} 187}
148 188
149void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 189static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
150 struct iwl_powertable_cmd *cmd) 190 struct ieee80211_vif *vif,
191 struct iwl_mac_power_cmd *cmd)
151{ 192{
152 struct ieee80211_hw *hw = mvm->hw; 193 struct ieee80211_hw *hw = mvm->hw;
153 struct ieee80211_chanctx_conf *chanctx_conf; 194 struct ieee80211_chanctx_conf *chanctx_conf;
@@ -157,20 +198,29 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
157 bool radar_detect = false; 198 bool radar_detect = false;
158 struct iwl_mvm_vif *mvmvif __maybe_unused = 199 struct iwl_mvm_vif *mvmvif __maybe_unused =
159 iwl_mvm_vif_from_mac80211(vif); 200 iwl_mvm_vif_from_mac80211(vif);
201 enum ieee80211_ac_numbers ac;
202 bool tid_found = false;
203
204 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
205 mvmvif->color));
206 dtimper = hw->conf.ps_dtim_period ?: 1;
160 207
161 /* 208 /*
162 * Regardless of power management state the driver must set 209 * Regardless of power management state the driver must set
163 * keep alive period. FW will use it for sending keep alive NDPs 210 * keep alive period. FW will use it for sending keep alive NDPs
164 * immediately after association. 211 * immediately after association. Check that keep alive period
212 * is at least 3 * DTIM
165 */ 213 */
166 cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC; 214 dtimper_msec = dtimper * vif->bss_conf.beacon_int;
215 keep_alive = max_t(int, 3 * dtimper_msec,
216 MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
217 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
218 cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
167 219
168 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) 220 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
169 return; 221 return;
170 222
171 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 223 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
172 if (!vif->bss_conf.assoc)
173 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
174 224
175#ifdef CONFIG_IWLWIFI_DEBUGFS 225#ifdef CONFIG_IWLWIFI_DEBUGFS
176 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && 226 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
@@ -186,12 +236,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
186 (vif->bss_conf.beacon_rate->bitrate == 10 || 236 (vif->bss_conf.beacon_rate->bitrate == 10 ||
187 vif->bss_conf.beacon_rate->bitrate == 60)) { 237 vif->bss_conf.beacon_rate->bitrate == 60)) {
188 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); 238 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
189 cmd->lprx_rssi_threshold = 239 cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
190 cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
191 } 240 }
192 241
193 dtimper = hw->conf.ps_dtim_period ?: 1;
194
195 /* Check if radar detection is required on current channel */ 242 /* Check if radar detection is required on current channel */
196 rcu_read_lock(); 243 rcu_read_lock();
197 chanctx_conf = rcu_dereference(vif->chanctx_conf); 244 chanctx_conf = rcu_dereference(vif->chanctx_conf);
@@ -207,27 +254,82 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
207 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || 254 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
208 mvm->cur_ucode == IWL_UCODE_WOWLAN)) { 255 mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
209 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); 256 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
210 cmd->skip_dtim_periods = cpu_to_le32(3); 257 cmd->skip_dtim_periods = 3;
211 } 258 }
212 259
213 /* Check that keep alive period is at least 3 * DTIM */
214 dtimper_msec = dtimper * vif->bss_conf.beacon_int;
215 keep_alive = max_t(int, 3 * dtimper_msec,
216 MSEC_PER_SEC * cmd->keep_alive_seconds);
217 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
218 cmd->keep_alive_seconds = keep_alive;
219
220 if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { 260 if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
221 cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); 261 cmd->rx_data_timeout =
222 cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); 262 cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
263 cmd->tx_data_timeout =
264 cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
223 } else { 265 } else {
224 cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); 266 cmd->rx_data_timeout =
225 cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); 267 cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
268 cmd->tx_data_timeout =
269 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
270 }
271
272 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
273 if (!mvmvif->queue_params[ac].uapsd)
274 continue;
275
276 cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
277 cmd->uapsd_ac_flags |= BIT(ac);
278
279 /* QNDP TID - the highest TID with no admission control */
280 if (!tid_found && !mvmvif->queue_params[ac].acm) {
281 tid_found = true;
282 switch (ac) {
283 case IEEE80211_AC_VO:
284 cmd->qndp_tid = 6;
285 break;
286 case IEEE80211_AC_VI:
287 cmd->qndp_tid = 5;
288 break;
289 case IEEE80211_AC_BE:
290 cmd->qndp_tid = 0;
291 break;
292 case IEEE80211_AC_BK:
293 cmd->qndp_tid = 1;
294 break;
295 }
296 }
297 }
298
299 if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
300 cmd->rx_data_timeout_uapsd =
301 cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
302 cmd->tx_data_timeout_uapsd =
303 cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
304
305 if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
306 BIT(IEEE80211_AC_VI) |
307 BIT(IEEE80211_AC_BE) |
308 BIT(IEEE80211_AC_BK))) {
309 cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
310 cmd->snooze_interval =
311 cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
312 cmd->snooze_window =
313 (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
314 cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
315 cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
316 }
317
318 cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
319 cmd->heavy_tx_thld_packets =
320 IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
321 cmd->heavy_rx_thld_packets =
322 IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
323 cmd->heavy_tx_thld_percentage =
324 IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
325 cmd->heavy_rx_thld_percentage =
326 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
226 } 327 }
227 328
228#ifdef CONFIG_IWLWIFI_DEBUGFS 329#ifdef CONFIG_IWLWIFI_DEBUGFS
229 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) 330 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
230 cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds; 331 cmd->keep_alive_seconds =
332 cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
231 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { 333 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
232 if (mvmvif->dbgfs_pm.skip_over_dtim) 334 if (mvmvif->dbgfs_pm.skip_over_dtim)
233 cmd->flags |= 335 cmd->flags |=
@@ -243,8 +345,7 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
243 cmd->tx_data_timeout = 345 cmd->tx_data_timeout =
244 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); 346 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
245 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) 347 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
246 cmd->skip_dtim_periods = 348 cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
247 cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
248 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { 349 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
249 if (mvmvif->dbgfs_pm.lprx_ena) 350 if (mvmvif->dbgfs_pm.lprx_ena)
250 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); 351 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
@@ -252,16 +353,24 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
252 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); 353 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
253 } 354 }
254 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) 355 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
255 cmd->lprx_rssi_threshold = 356 cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
256 cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold); 357 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
358 if (mvmvif->dbgfs_pm.snooze_ena)
359 cmd->flags |=
360 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
361 else
362 cmd->flags &=
363 cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
364 }
257#endif /* CONFIG_IWLWIFI_DEBUGFS */ 365#endif /* CONFIG_IWLWIFI_DEBUGFS */
258} 366}
259 367
260int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 368static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
369 struct ieee80211_vif *vif)
261{ 370{
262 int ret; 371 int ret;
263 bool ba_enable; 372 bool ba_enable;
264 struct iwl_powertable_cmd cmd = {}; 373 struct iwl_mac_power_cmd cmd = {};
265 374
266 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 375 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
267 return 0; 376 return 0;
@@ -280,7 +389,7 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
280 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 389 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
281 iwl_mvm_power_log(mvm, &cmd); 390 iwl_mvm_power_log(mvm, &cmd);
282 391
283 ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, 392 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
284 sizeof(cmd), &cmd); 393 sizeof(cmd), &cmd);
285 if (ret) 394 if (ret)
286 return ret; 395 return ret;
@@ -291,15 +400,19 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
291 return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); 400 return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
292} 401}
293 402
294int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 403static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
404 struct ieee80211_vif *vif)
295{ 405{
296 struct iwl_powertable_cmd cmd = {}; 406 struct iwl_mac_power_cmd cmd = {};
297 struct iwl_mvm_vif *mvmvif __maybe_unused = 407 struct iwl_mvm_vif *mvmvif __maybe_unused =
298 iwl_mvm_vif_from_mac80211(vif); 408 iwl_mvm_vif_from_mac80211(vif);
299 409
300 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 410 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
301 return 0; 411 return 0;
302 412
413 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
414 mvmvif->color));
415
303 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 416 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
304 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 417 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
305 418
@@ -310,11 +423,98 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
310#endif 423#endif
311 iwl_mvm_power_log(mvm, &cmd); 424 iwl_mvm_power_log(mvm, &cmd);
312 425
313 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, 426 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
314 sizeof(cmd), &cmd); 427 sizeof(cmd), &cmd);
315} 428}
316 429
317#ifdef CONFIG_IWLWIFI_DEBUGFS 430#ifdef CONFIG_IWLWIFI_DEBUGFS
431static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
432 struct ieee80211_vif *vif, char *buf,
433 int bufsz)
434{
435 struct iwl_mac_power_cmd cmd = {};
436 int pos = 0;
437
438 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
439
440 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
441 (cmd.flags &
442 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
443 0 : 1);
444 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
445 iwlmvm_mod_params.power_scheme);
446 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
447 le16_to_cpu(cmd.flags));
448 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
449 le16_to_cpu(cmd.keep_alive_seconds));
450
451 if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
452 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
453 (cmd.flags &
454 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
455 1 : 0);
456 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
457 cmd.skip_dtim_periods);
458 if (!(cmd.flags &
459 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
460 pos += scnprintf(buf+pos, bufsz-pos,
461 "rx_data_timeout = %d\n",
462 le32_to_cpu(cmd.rx_data_timeout));
463 pos += scnprintf(buf+pos, bufsz-pos,
464 "tx_data_timeout = %d\n",
465 le32_to_cpu(cmd.tx_data_timeout));
466 }
467 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
468 pos += scnprintf(buf+pos, bufsz-pos,
469 "lprx_rssi_threshold = %d\n",
470 cmd.lprx_rssi_threshold);
471 if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
472 pos +=
473 scnprintf(buf+pos, bufsz-pos,
474 "rx_data_timeout_uapsd = %d\n",
475 le32_to_cpu(cmd.rx_data_timeout_uapsd));
476 pos +=
477 scnprintf(buf+pos, bufsz-pos,
478 "tx_data_timeout_uapsd = %d\n",
479 le32_to_cpu(cmd.tx_data_timeout_uapsd));
480 pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
481 cmd.qndp_tid);
482 pos += scnprintf(buf+pos, bufsz-pos,
483 "uapsd_ac_flags = 0x%x\n",
484 cmd.uapsd_ac_flags);
485 pos += scnprintf(buf+pos, bufsz-pos,
486 "uapsd_max_sp = %d\n",
487 cmd.uapsd_max_sp);
488 pos += scnprintf(buf+pos, bufsz-pos,
489 "heavy_tx_thld_packets = %d\n",
490 cmd.heavy_tx_thld_packets);
491 pos += scnprintf(buf+pos, bufsz-pos,
492 "heavy_rx_thld_packets = %d\n",
493 cmd.heavy_rx_thld_packets);
494 pos += scnprintf(buf+pos, bufsz-pos,
495 "heavy_tx_thld_percentage = %d\n",
496 cmd.heavy_tx_thld_percentage);
497 pos += scnprintf(buf+pos, bufsz-pos,
498 "heavy_rx_thld_percentage = %d\n",
499 cmd.heavy_rx_thld_percentage);
500 pos +=
501 scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
502 (cmd.flags &
503 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
504 1 : 0);
505 }
506 if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
507 pos += scnprintf(buf+pos, bufsz-pos,
508 "snooze_interval = %d\n",
509 cmd.snooze_interval);
510 pos += scnprintf(buf+pos, bufsz-pos,
511 "snooze_window = %d\n",
512 cmd.snooze_window);
513 }
514 }
515 return pos;
516}
517
318void 518void
319iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, 519iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
320 struct iwl_beacon_filter_cmd *cmd) 520 struct iwl_beacon_filter_cmd *cmd)
@@ -323,22 +523,30 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
323 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; 523 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
324 524
325 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) 525 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
326 cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta; 526 cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
327 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) 527 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
328 cmd->bf_roaming_energy_delta = 528 cmd->bf_roaming_energy_delta =
329 dbgfs_bf->bf_roaming_energy_delta; 529 cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
330 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) 530 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
331 cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state; 531 cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
332 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA) 532 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
333 cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta; 533 cmd->bf_temp_threshold =
534 cpu_to_le32(dbgfs_bf->bf_temp_threshold);
535 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
536 cmd->bf_temp_fast_filter =
537 cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
538 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
539 cmd->bf_temp_slow_filter =
540 cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
334 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) 541 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
335 cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag; 542 cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
336 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) 543 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
337 cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); 544 cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
338 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) 545 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
339 cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); 546 cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
340 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) 547 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
341 cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort; 548 cmd->ba_enable_beacon_abort =
549 cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
342} 550}
343#endif 551#endif
344 552
@@ -348,7 +556,7 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
348 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 556 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
349 struct iwl_beacon_filter_cmd cmd = { 557 struct iwl_beacon_filter_cmd cmd = {
350 IWL_BF_CMD_CONFIG_DEFAULTS, 558 IWL_BF_CMD_CONFIG_DEFAULTS,
351 .bf_enable_beacon_filter = 1, 559 .bf_enable_beacon_filter = cpu_to_le32(1),
352 }; 560 };
353 int ret; 561 int ret;
354 562
@@ -356,11 +564,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
356 vif->type != NL80211_IFTYPE_STATION || vif->p2p) 564 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
357 return 0; 565 return 0;
358 566
567 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
359 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); 568 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
360 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); 569 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
361 570
362 if (!ret) 571 if (!ret)
363 mvmvif->bf_enabled = true; 572 mvmvif->bf_data.bf_enabled = true;
364 573
365 return ret; 574 return ret;
366} 575}
@@ -372,13 +581,33 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
372 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 581 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
373 int ret; 582 int ret;
374 583
375 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 584 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
585 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
376 return 0; 586 return 0;
377 587
378 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); 588 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
379 589
380 if (!ret) 590 if (!ret)
381 mvmvif->bf_enabled = false; 591 mvmvif->bf_data.bf_enabled = false;
382 592
383 return ret; 593 return ret;
384} 594}
595
596int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
597 struct ieee80211_vif *vif)
598{
599 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
600
601 if (!mvmvif->bf_data.bf_enabled)
602 return 0;
603
604 return iwl_mvm_enable_beacon_filter(mvm, vif);
605}
606
607const struct iwl_mvm_power_ops pm_mac_ops = {
608 .power_update_mode = iwl_mvm_power_mac_update_mode,
609 .power_disable = iwl_mvm_power_mac_disable,
610#ifdef CONFIG_IWLWIFI_DEBUGFS
611 .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
612#endif
613};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
new file mode 100644
index 000000000000..2ce79bad5845
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
@@ -0,0 +1,319 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-debug.h"
72#include "mvm.h"
73#include "iwl-modparams.h"
74#include "fw-api-power.h"
75
76#define POWER_KEEP_ALIVE_PERIOD_SEC 25
77
78static void iwl_mvm_power_log(struct iwl_mvm *mvm,
79 struct iwl_powertable_cmd *cmd)
80{
81 IWL_DEBUG_POWER(mvm,
82 "Sending power table command for power level %d, flags = 0x%X\n",
83 iwlmvm_mod_params.power_scheme,
84 le16_to_cpu(cmd->flags));
85 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
86
87 if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
88 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
89 le32_to_cpu(cmd->rx_data_timeout));
90 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
91 le32_to_cpu(cmd->tx_data_timeout));
92 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
93 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
94 le32_to_cpu(cmd->skip_dtim_periods));
95 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
96 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
97 le32_to_cpu(cmd->lprx_rssi_threshold));
98 }
99}
100
101static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
102 struct ieee80211_vif *vif,
103 struct iwl_powertable_cmd *cmd)
104{
105 struct ieee80211_hw *hw = mvm->hw;
106 struct ieee80211_chanctx_conf *chanctx_conf;
107 struct ieee80211_channel *chan;
108 int dtimper, dtimper_msec;
109 int keep_alive;
110 bool radar_detect = false;
111 struct iwl_mvm_vif *mvmvif __maybe_unused =
112 iwl_mvm_vif_from_mac80211(vif);
113
114 /*
115 * Regardless of power management state the driver must set
116 * keep alive period. FW will use it for sending keep alive NDPs
117 * immediately after association.
118 */
119 cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
120
121 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
122 return;
123
124 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
125 if (!vif->bss_conf.assoc)
126 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
127
128#ifdef CONFIG_IWLWIFI_DEBUGFS
129 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
130 mvmvif->dbgfs_pm.disable_power_off)
131 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
132#endif
133 if (!vif->bss_conf.ps)
134 return;
135
136 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
137
138 if (vif->bss_conf.beacon_rate &&
139 (vif->bss_conf.beacon_rate->bitrate == 10 ||
140 vif->bss_conf.beacon_rate->bitrate == 60)) {
141 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
142 cmd->lprx_rssi_threshold =
143 cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
144 }
145
146 dtimper = hw->conf.ps_dtim_period ?: 1;
147
148 /* Check if radar detection is required on current channel */
149 rcu_read_lock();
150 chanctx_conf = rcu_dereference(vif->chanctx_conf);
151 WARN_ON(!chanctx_conf);
152 if (chanctx_conf) {
153 chan = chanctx_conf->def.chan;
154 radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
155 }
156 rcu_read_unlock();
157
158 /* Check skip over DTIM conditions */
159 if (!radar_detect && (dtimper <= 10) &&
160 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
161 mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
162 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
163 cmd->skip_dtim_periods = cpu_to_le32(3);
164 }
165
166 /* Check that keep alive period is at least 3 * DTIM */
167 dtimper_msec = dtimper * vif->bss_conf.beacon_int;
168 keep_alive = max_t(int, 3 * dtimper_msec,
169 MSEC_PER_SEC * cmd->keep_alive_seconds);
170 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
171 cmd->keep_alive_seconds = keep_alive;
172
173 if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
174 cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
175 cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
176 } else {
177 cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
178 cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
179 }
180
181#ifdef CONFIG_IWLWIFI_DEBUGFS
182 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
183 cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
184 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
185 if (mvmvif->dbgfs_pm.skip_over_dtim)
186 cmd->flags |=
187 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
188 else
189 cmd->flags &=
190 cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
191 }
192 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
193 cmd->rx_data_timeout =
194 cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
195 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
196 cmd->tx_data_timeout =
197 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
198 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
199 cmd->skip_dtim_periods =
200 cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
201 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
202 if (mvmvif->dbgfs_pm.lprx_ena)
203 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
204 else
205 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
206 }
207 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
208 cmd->lprx_rssi_threshold =
209 cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
210#endif /* CONFIG_IWLWIFI_DEBUGFS */
211}
212
213static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
214 struct ieee80211_vif *vif)
215{
216 int ret;
217 bool ba_enable;
218 struct iwl_powertable_cmd cmd = {};
219
220 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
221 return 0;
222
223 /*
224 * TODO: The following vif_count verification is temporary condition.
225 * Avoid power mode update if more than one interface is currently
226 * active. Remove this condition when FW will support power management
227 * on multiple MACs.
228 */
229 IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
230 mvm->vif_count);
231 if (mvm->vif_count > 1)
232 return 0;
233
234 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
235 iwl_mvm_power_log(mvm, &cmd);
236
237 ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
238 sizeof(cmd), &cmd);
239 if (ret)
240 return ret;
241
242 ba_enable = !!(cmd.flags &
243 cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
244
245 return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
246}
247
248static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
249 struct ieee80211_vif *vif)
250{
251 struct iwl_powertable_cmd cmd = {};
252 struct iwl_mvm_vif *mvmvif __maybe_unused =
253 iwl_mvm_vif_from_mac80211(vif);
254
255 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
256 return 0;
257
258 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
259 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
260
261#ifdef CONFIG_IWLWIFI_DEBUGFS
262 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
263 mvmvif->dbgfs_pm.disable_power_off)
264 cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
265#endif
266 iwl_mvm_power_log(mvm, &cmd);
267
268 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
269 sizeof(cmd), &cmd);
270}
271
272#ifdef CONFIG_IWLWIFI_DEBUGFS
273static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
274 struct ieee80211_vif *vif, char *buf,
275 int bufsz)
276{
277 struct iwl_powertable_cmd cmd = {};
278 int pos = 0;
279
280 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
281
282 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
283 (cmd.flags &
284 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
285 0 : 1);
286 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
287 le32_to_cpu(cmd.skip_dtim_periods));
288 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
289 iwlmvm_mod_params.power_scheme);
290 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
291 le16_to_cpu(cmd.flags));
292 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
293 cmd.keep_alive_seconds);
294
295 if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
296 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
297 (cmd.flags &
298 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
299 1 : 0);
300 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
301 le32_to_cpu(cmd.rx_data_timeout));
302 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
303 le32_to_cpu(cmd.tx_data_timeout));
304 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
305 pos += scnprintf(buf+pos, bufsz-pos,
306 "lprx_rssi_threshold = %d\n",
307 le32_to_cpu(cmd.lprx_rssi_threshold));
308 }
309 return pos;
310}
311#endif
312
313const struct iwl_mvm_power_ops pm_legacy_ops = {
314 .power_update_mode = iwl_mvm_power_legacy_update_mode,
315 .power_disable = iwl_mvm_power_legacy_disable,
316#ifdef CONFIG_IWLWIFI_DEBUGFS
317 .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
318#endif
319};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 29d49cf0fdb2..5c6ae16ec52b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -131,23 +131,22 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
131 131
132int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) 132int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
133{ 133{
134 struct iwl_time_quota_cmd cmd; 134 struct iwl_time_quota_cmd cmd = {};
135 int i, idx, ret, num_active_bindings, quota, quota_rem; 135 int i, idx, ret, num_active_macs, quota, quota_rem;
136 struct iwl_mvm_quota_iterator_data data = { 136 struct iwl_mvm_quota_iterator_data data = {
137 .n_interfaces = {}, 137 .n_interfaces = {},
138 .colors = { -1, -1, -1, -1 }, 138 .colors = { -1, -1, -1, -1 },
139 .new_vif = newvif, 139 .new_vif = newvif,
140 }; 140 };
141 141
142 lockdep_assert_held(&mvm->mutex);
143
142 /* update all upon completion */ 144 /* update all upon completion */
143 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 145 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
144 return 0; 146 return 0;
145 147
146 BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1); 148 /* iterator data above must match */
147 149 BUILD_BUG_ON(MAX_BINDINGS != 4);
148 lockdep_assert_held(&mvm->mutex);
149
150 memset(&cmd, 0, sizeof(cmd));
151 150
152 ieee80211_iterate_active_interfaces_atomic( 151 ieee80211_iterate_active_interfaces_atomic(
153 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 152 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -162,18 +161,17 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
162 * IWL_MVM_MAX_QUOTA fragments. Divide these fragments 161 * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
163 * equally between all the bindings that require quota 162 * equally between all the bindings that require quota
164 */ 163 */
165 num_active_bindings = 0; 164 num_active_macs = 0;
166 for (i = 0; i < MAX_BINDINGS; i++) { 165 for (i = 0; i < MAX_BINDINGS; i++) {
167 cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); 166 cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
168 if (data.n_interfaces[i] > 0) 167 num_active_macs += data.n_interfaces[i];
169 num_active_bindings++;
170 } 168 }
171 169
172 quota = 0; 170 quota = 0;
173 quota_rem = 0; 171 quota_rem = 0;
174 if (num_active_bindings) { 172 if (num_active_macs) {
175 quota = IWL_MVM_MAX_QUOTA / num_active_bindings; 173 quota = IWL_MVM_MAX_QUOTA / num_active_macs;
176 quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings; 174 quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
177 } 175 }
178 176
179 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { 177 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -187,7 +185,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
187 cmd.quotas[idx].quota = cpu_to_le32(0); 185 cmd.quotas[idx].quota = cpu_to_le32(0);
188 cmd.quotas[idx].max_duration = cpu_to_le32(0); 186 cmd.quotas[idx].max_duration = cpu_to_le32(0);
189 } else { 187 } else {
190 cmd.quotas[idx].quota = cpu_to_le32(quota); 188 cmd.quotas[idx].quota =
189 cpu_to_le32(quota * data.n_interfaces[i]);
191 cmd.quotas[idx].max_duration = 190 cmd.quotas[idx].max_duration =
192 cpu_to_le32(IWL_MVM_MAX_QUOTA); 191 cpu_to_le32(IWL_MVM_MAX_QUOTA);
193 } 192 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index b328a988c130..4ffaa3fa153f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -56,61 +56,61 @@
56#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) 56#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
57 57
58static u8 rs_ht_to_legacy[] = { 58static u8 rs_ht_to_legacy[] = {
59 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, 59 [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, 60 [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, 61 [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, 62 [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
63 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, 63 [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
64 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, 64 [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX 65 [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
66 [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
67 [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
68 [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
69 [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
70 [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
71 [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
66}; 72};
67 73
68static const u8 ant_toggle_lookup[] = { 74static const u8 ant_toggle_lookup[] = {
69 /*ANT_NONE -> */ ANT_NONE, 75 [ANT_NONE] = ANT_NONE,
70 /*ANT_A -> */ ANT_B, 76 [ANT_A] = ANT_B,
71 /*ANT_B -> */ ANT_C, 77 [ANT_B] = ANT_C,
72 /*ANT_AB -> */ ANT_BC, 78 [ANT_AB] = ANT_BC,
73 /*ANT_C -> */ ANT_A, 79 [ANT_C] = ANT_A,
74 /*ANT_AC -> */ ANT_AB, 80 [ANT_AC] = ANT_AB,
75 /*ANT_BC -> */ ANT_AC, 81 [ANT_BC] = ANT_AC,
76 /*ANT_ABC -> */ ANT_ABC, 82 [ANT_ABC] = ANT_ABC,
77}; 83};
78 84
79#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ 85#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
80 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 86 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
81 IWL_RATE_SISO_##s##M_PLCP, \ 87 IWL_RATE_SISO_##s##M_PLCP, \
82 IWL_RATE_MIMO2_##s##M_PLCP,\ 88 IWL_RATE_MIMO2_##s##M_PLCP,\
83 IWL_RATE_MIMO3_##s##M_PLCP,\
84 IWL_RATE_##r##M_IEEE, \
85 IWL_RATE_##ip##M_INDEX, \
86 IWL_RATE_##in##M_INDEX, \
87 IWL_RATE_##rp##M_INDEX, \ 89 IWL_RATE_##rp##M_INDEX, \
88 IWL_RATE_##rn##M_INDEX, \ 90 IWL_RATE_##rn##M_INDEX }
89 IWL_RATE_##pp##M_INDEX, \
90 IWL_RATE_##np##M_INDEX }
91 91
92/* 92/*
93 * Parameter order: 93 * Parameter order:
94 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate 94 * rate, ht rate, prev rate, next rate
95 * 95 *
96 * If there isn't a valid next or previous rate then INV is used which 96 * If there isn't a valid next or previous rate then INV is used which
97 * maps to IWL_RATE_INVALID 97 * maps to IWL_RATE_INVALID
98 * 98 *
99 */ 99 */
100static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { 100static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ 101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ 102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ 103 IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ 104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ 105 IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ 106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ 107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ 108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ 109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ 110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ 111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ 112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ 113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
114 /* FIXME:RS: ^^ should be INV (legacy) */ 114 /* FIXME:RS: ^^ should be INV (legacy) */
115}; 115};
116 116
@@ -128,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
128 if (rate_n_flags & RATE_MCS_HT_MSK) { 128 if (rate_n_flags & RATE_MCS_HT_MSK) {
129 idx = rs_extract_rate(rate_n_flags); 129 idx = rs_extract_rate(rate_n_flags);
130 130
131 if (idx >= IWL_RATE_MIMO3_6M_PLCP) 131 WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
132 idx = idx - IWL_RATE_MIMO3_6M_PLCP; 132 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
133 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
134 idx = idx - IWL_RATE_MIMO2_6M_PLCP; 133 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
135 134
136 idx += IWL_FIRST_OFDM_RATE; 135 idx += IWL_FIRST_OFDM_RATE;
@@ -162,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
162 161
163#ifdef CONFIG_MAC80211_DEBUGFS 162#ifdef CONFIG_MAC80211_DEBUGFS
164static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, 163static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
165 u32 *rate_n_flags, int index); 164 u32 *rate_n_flags);
166#else 165#else
167static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, 166static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
168 u32 *rate_n_flags, int index) 167 u32 *rate_n_flags)
169{} 168{}
170#endif 169#endif
171 170
@@ -212,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
212 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ 211 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
213}; 212};
214 213
215static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
216 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
217 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
218 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
219 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
220};
221
222static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
223 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
224 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
225 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
226 {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
227};
228
229/* mbps, mcs */ 214/* mbps, mcs */
230static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { 215static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
231 { "1", "BPSK DSSS"}, 216 { "1", "BPSK DSSS"},
@@ -260,82 +245,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
260 return (ant_type & valid_antenna) == ant_type; 245 return (ant_type & valid_antenna) == ant_type;
261} 246}
262 247
263/*
264 * removes the old data from the statistics. All data that is older than
265 * TID_MAX_TIME_DIFF, will be deleted.
266 */
267static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
268{
269 /* The oldest age we want to keep */
270 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
271
272 while (tl->queue_count &&
273 (tl->time_stamp < oldest_time)) {
274 tl->total -= tl->packet_count[tl->head];
275 tl->packet_count[tl->head] = 0;
276 tl->time_stamp += TID_QUEUE_CELL_SPACING;
277 tl->queue_count--;
278 tl->head++;
279 if (tl->head >= TID_QUEUE_MAX_SIZE)
280 tl->head = 0;
281 }
282}
283
284/*
285 * increment traffic load value for tid and also remove
286 * any old values if passed the certain time period
287 */
288static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
289 struct ieee80211_hdr *hdr)
290{
291 u32 curr_time = jiffies_to_msecs(jiffies);
292 u32 time_diff;
293 s32 index;
294 struct iwl_traffic_load *tl = NULL;
295 u8 tid;
296
297 if (ieee80211_is_data_qos(hdr->frame_control)) {
298 u8 *qc = ieee80211_get_qos_ctl(hdr);
299 tid = qc[0] & 0xf;
300 } else {
301 return IWL_MAX_TID_COUNT;
302 }
303
304 if (unlikely(tid >= IWL_MAX_TID_COUNT))
305 return IWL_MAX_TID_COUNT;
306
307 tl = &lq_data->load[tid];
308
309 curr_time -= curr_time % TID_ROUND_VALUE;
310
311 /* Happens only for the first packet. Initialize the data */
312 if (!(tl->queue_count)) {
313 tl->total = 1;
314 tl->time_stamp = curr_time;
315 tl->queue_count = 1;
316 tl->head = 0;
317 tl->packet_count[0] = 1;
318 return IWL_MAX_TID_COUNT;
319 }
320
321 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
322 index = time_diff / TID_QUEUE_CELL_SPACING;
323
324 /* The history is too long: remove data that is older than */
325 /* TID_MAX_TIME_DIFF */
326 if (index >= TID_QUEUE_MAX_SIZE)
327 rs_tl_rm_old_stats(tl, curr_time);
328
329 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
330 tl->packet_count[index] = tl->packet_count[index] + 1;
331 tl->total = tl->total + 1;
332
333 if ((index + 1) > tl->queue_count)
334 tl->queue_count = index + 1;
335
336 return tid;
337}
338
339#ifdef CONFIG_MAC80211_DEBUGFS 248#ifdef CONFIG_MAC80211_DEBUGFS
340/** 249/**
341 * Program the device to use fixed rate for frame transmit 250 * Program the device to use fixed rate for frame transmit
@@ -349,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
349 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ 258 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
350 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 259 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
351 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 260 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
352 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
353 261
354 IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", 262 IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
355 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); 263 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@@ -361,45 +269,11 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
361} 269}
362#endif 270#endif
363 271
364/*
365 get the traffic load value for tid
366*/
367static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
368{
369 u32 curr_time = jiffies_to_msecs(jiffies);
370 u32 time_diff;
371 s32 index;
372 struct iwl_traffic_load *tl = NULL;
373
374 if (tid >= IWL_MAX_TID_COUNT)
375 return 0;
376
377 tl = &(lq_data->load[tid]);
378
379 curr_time -= curr_time % TID_ROUND_VALUE;
380
381 if (!(tl->queue_count))
382 return 0;
383
384 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
385 index = time_diff / TID_QUEUE_CELL_SPACING;
386
387 /* The history is too long: remove data that is older than */
388 /* TID_MAX_TIME_DIFF */
389 if (index >= TID_QUEUE_MAX_SIZE)
390 rs_tl_rm_old_stats(tl, curr_time);
391
392 return tl->total;
393}
394
395static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, 272static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
396 struct iwl_lq_sta *lq_data, u8 tid, 273 struct iwl_lq_sta *lq_data, u8 tid,
397 struct ieee80211_sta *sta) 274 struct ieee80211_sta *sta)
398{ 275{
399 int ret = -EAGAIN; 276 int ret = -EAGAIN;
400 u32 load;
401
402 load = rs_tl_get_load(lq_data, tid);
403 277
404 /* 278 /*
405 * Don't create TX aggregation sessions when in high 279 * Don't create TX aggregation sessions when in high
@@ -563,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
563 else if (is_mimo2(tbl->lq_type)) 437 else if (is_mimo2(tbl->lq_type))
564 rate_n_flags |= iwl_rates[index].plcp_mimo2; 438 rate_n_flags |= iwl_rates[index].plcp_mimo2;
565 else 439 else
566 rate_n_flags |= iwl_rates[index].plcp_mimo3; 440 WARN_ON_ONCE(1);
567 } else { 441 } else {
568 IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type); 442 IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
569 } 443 }
@@ -601,7 +475,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
601 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); 475 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
602 u8 mcs; 476 u8 mcs;
603 477
604 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); 478 memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
605 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); 479 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
606 480
607 if (*rate_idx == IWL_RATE_INVALID) { 481 if (*rate_idx == IWL_RATE_INVALID) {
@@ -640,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
640 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { 514 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
641 if (num_of_ant == 2) 515 if (num_of_ant == 2)
642 tbl->lq_type = LQ_MIMO2; 516 tbl->lq_type = LQ_MIMO2;
643 /* MIMO3 */
644 } else { 517 } else {
645 if (num_of_ant == 3) { 518 WARN_ON_ONCE(num_of_ant == 3);
646 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
647 tbl->lq_type = LQ_MIMO3;
648 }
649 } 519 }
650 } 520 }
651 return 0; 521 return 0;
@@ -711,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
711 } else { 581 } else {
712 if (is_siso(rate_type)) 582 if (is_siso(rate_type))
713 return lq_sta->active_siso_rate; 583 return lq_sta->active_siso_rate;
714 else if (is_mimo2(rate_type)) 584 else {
585 WARN_ON_ONCE(!is_mimo2(rate_type));
715 return lq_sta->active_mimo2_rate; 586 return lq_sta->active_mimo2_rate;
716 else 587 }
717 return lq_sta->active_mimo3_rate;
718 } 588 }
719} 589}
720 590
@@ -1089,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1089 } 959 }
1090 960
1091 /* Choose among many HT tables depending on number of streams 961 /* Choose among many HT tables depending on number of streams
1092 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation 962 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1093 * status */ 963 * status */
1094 if (is_siso(tbl->lq_type) && !tbl->is_ht40) 964 if (is_siso(tbl->lq_type) && !tbl->is_ht40)
1095 ht_tbl_pointer = expected_tpt_siso20MHz; 965 ht_tbl_pointer = expected_tpt_siso20MHz;
@@ -1097,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1097 ht_tbl_pointer = expected_tpt_siso40MHz; 967 ht_tbl_pointer = expected_tpt_siso40MHz;
1098 else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40) 968 else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
1099 ht_tbl_pointer = expected_tpt_mimo2_20MHz; 969 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1100 else if (is_mimo2(tbl->lq_type)) 970 else {
971 WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
1101 ht_tbl_pointer = expected_tpt_mimo2_40MHz; 972 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1102 else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40) 973 }
1103 ht_tbl_pointer = expected_tpt_mimo3_20MHz;
1104 else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
1105 ht_tbl_pointer = expected_tpt_mimo3_40MHz;
1106 974
1107 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ 975 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1108 tbl->expected_tpt = ht_tbl_pointer[0]; 976 tbl->expected_tpt = ht_tbl_pointer[0];
@@ -1274,58 +1142,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
1274} 1142}
1275 1143
1276/* 1144/*
1277 * Set up search table for MIMO3
1278 */
1279static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
1280 struct iwl_lq_sta *lq_sta,
1281 struct ieee80211_sta *sta,
1282 struct iwl_scale_tbl_info *tbl, int index)
1283{
1284 u16 rate_mask;
1285 s32 rate;
1286 s8 is_green = lq_sta->is_green;
1287
1288 if (!sta->ht_cap.ht_supported)
1289 return -1;
1290
1291 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
1292 return -1;
1293
1294 /* Need both Tx chains/antennas to support MIMO */
1295 if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
1296 return -1;
1297
1298 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
1299
1300 tbl->lq_type = LQ_MIMO3;
1301 tbl->action = 0;
1302 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
1303 rate_mask = lq_sta->active_mimo3_rate;
1304
1305 if (iwl_is_ht40_tx_allowed(sta))
1306 tbl->is_ht40 = 1;
1307 else
1308 tbl->is_ht40 = 0;
1309
1310 rs_set_expected_tpt_table(lq_sta, tbl);
1311
1312 rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
1313
1314 IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
1315 rate, rate_mask);
1316 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1317 IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
1318 rate, rate_mask);
1319 return -1;
1320 }
1321 tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
1322
1323 IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
1324 tbl->current_rate, is_green);
1325 return 0;
1326}
1327
1328/*
1329 * Set up search table for SISO 1145 * Set up search table for SISO
1330 */ 1146 */
1331static int rs_switch_to_siso(struct iwl_mvm *mvm, 1147static int rs_switch_to_siso(struct iwl_mvm *mvm,
@@ -1434,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1434 } 1250 }
1435 1251
1436 break; 1252 break;
1437 case IWL_LEGACY_SWITCH_MIMO2_AB: 1253 case IWL_LEGACY_SWITCH_MIMO2:
1438 case IWL_LEGACY_SWITCH_MIMO2_AC:
1439 case IWL_LEGACY_SWITCH_MIMO2_BC:
1440 IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n"); 1254 IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
1441 1255
1442 /* Set up search table to try MIMO */ 1256 /* Set up search table to try MIMO */
1443 memcpy(search_tbl, tbl, sz); 1257 memcpy(search_tbl, tbl, sz);
1444 search_tbl->is_SGI = 0; 1258 search_tbl->is_SGI = 0;
1445 1259
1446 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) 1260 search_tbl->ant_type = ANT_AB;
1447 search_tbl->ant_type = ANT_AB;
1448 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1449 search_tbl->ant_type = ANT_AC;
1450 else
1451 search_tbl->ant_type = ANT_BC;
1452 1261
1453 if (!rs_is_valid_ant(valid_tx_ant, 1262 if (!rs_is_valid_ant(valid_tx_ant,
1454 search_tbl->ant_type)) 1263 search_tbl->ant_type))
@@ -1461,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1461 goto out; 1270 goto out;
1462 } 1271 }
1463 break; 1272 break;
1464 1273 default:
1465 case IWL_LEGACY_SWITCH_MIMO3_ABC: 1274 WARN_ON_ONCE(1);
1466 IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
1467
1468 /* Set up search table to try MIMO3 */
1469 memcpy(search_tbl, tbl, sz);
1470 search_tbl->is_SGI = 0;
1471
1472 search_tbl->ant_type = ANT_ABC;
1473
1474 if (!rs_is_valid_ant(valid_tx_ant,
1475 search_tbl->ant_type))
1476 break;
1477
1478 ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
1479 search_tbl, index);
1480 if (!ret) {
1481 lq_sta->action_counter = 0;
1482 goto out;
1483 }
1484 break;
1485 } 1275 }
1486 tbl->action++; 1276 tbl->action++;
1487 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) 1277 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1488 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; 1278 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1489 1279
1490 if (tbl->action == start_action) 1280 if (tbl->action == start_action)
@@ -1496,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1496out: 1286out:
1497 lq_sta->search_better_tbl = 1; 1287 lq_sta->search_better_tbl = 1;
1498 tbl->action++; 1288 tbl->action++;
1499 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) 1289 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1500 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; 1290 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1501 if (update_search_tbl_counter) 1291 if (update_search_tbl_counter)
1502 search_tbl->action = tbl->action; 1292 search_tbl->action = tbl->action;
@@ -1531,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1531 case IWL_BT_COEX_TRAFFIC_LOAD_LOW: 1321 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1532 /* avoid antenna B unless MIMO */ 1322 /* avoid antenna B unless MIMO */
1533 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) 1323 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1534 tbl->action = IWL_SISO_SWITCH_MIMO2_AB; 1324 tbl->action = IWL_SISO_SWITCH_MIMO2;
1535 break; 1325 break;
1536 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: 1326 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1537 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1327 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1573,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1573 goto out; 1363 goto out;
1574 } 1364 }
1575 break; 1365 break;
1576 case IWL_SISO_SWITCH_MIMO2_AB: 1366 case IWL_SISO_SWITCH_MIMO2:
1577 case IWL_SISO_SWITCH_MIMO2_AC:
1578 case IWL_SISO_SWITCH_MIMO2_BC:
1579 IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n"); 1367 IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
1580 memcpy(search_tbl, tbl, sz); 1368 memcpy(search_tbl, tbl, sz);
1581 search_tbl->is_SGI = 0; 1369 search_tbl->is_SGI = 0;
1582 1370
1583 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) 1371 search_tbl->ant_type = ANT_AB;
1584 search_tbl->ant_type = ANT_AB;
1585 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1586 search_tbl->ant_type = ANT_AC;
1587 else
1588 search_tbl->ant_type = ANT_BC;
1589 1372
1590 if (!rs_is_valid_ant(valid_tx_ant, 1373 if (!rs_is_valid_ant(valid_tx_ant,
1591 search_tbl->ant_type)) 1374 search_tbl->ant_type))
@@ -1626,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1626 index, is_green); 1409 index, is_green);
1627 update_search_tbl_counter = 1; 1410 update_search_tbl_counter = 1;
1628 goto out; 1411 goto out;
1629 case IWL_SISO_SWITCH_MIMO3_ABC: 1412 default:
1630 IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n"); 1413 WARN_ON_ONCE(1);
1631 memcpy(search_tbl, tbl, sz);
1632 search_tbl->is_SGI = 0;
1633 search_tbl->ant_type = ANT_ABC;
1634
1635 if (!rs_is_valid_ant(valid_tx_ant,
1636 search_tbl->ant_type))
1637 break;
1638
1639 ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
1640 search_tbl, index);
1641 if (!ret)
1642 goto out;
1643 break;
1644 } 1414 }
1645 tbl->action++; 1415 tbl->action++;
1646 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) 1416 if (tbl->action > IWL_SISO_SWITCH_GI)
1647 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1417 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1648 1418
1649 if (tbl->action == start_action) 1419 if (tbl->action == start_action)
@@ -1655,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1655 out: 1425 out:
1656 lq_sta->search_better_tbl = 1; 1426 lq_sta->search_better_tbl = 1;
1657 tbl->action++; 1427 tbl->action++;
1658 if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) 1428 if (tbl->action > IWL_SISO_SWITCH_GI)
1659 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1429 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1660 if (update_search_tbl_counter) 1430 if (update_search_tbl_counter)
1661 search_tbl->action = tbl->action; 1431 search_tbl->action = tbl->action;
@@ -1696,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1696 break; 1466 break;
1697 case IWL_BT_COEX_TRAFFIC_LOAD_LOW: 1467 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1698 /* avoid antenna B unless MIMO */ 1468 /* avoid antenna B unless MIMO */
1699 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || 1469 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1700 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1701 tbl->action = IWL_MIMO2_SWITCH_SISO_A; 1470 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1702 break; 1471 break;
1703 default: 1472 default:
@@ -1730,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1730 break; 1499 break;
1731 case IWL_MIMO2_SWITCH_SISO_A: 1500 case IWL_MIMO2_SWITCH_SISO_A:
1732 case IWL_MIMO2_SWITCH_SISO_B: 1501 case IWL_MIMO2_SWITCH_SISO_B:
1733 case IWL_MIMO2_SWITCH_SISO_C:
1734 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n"); 1502 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
1735 1503
1736 /* Set up new search table for SISO */ 1504 /* Set up new search table for SISO */
@@ -1738,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1738 1506
1739 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) 1507 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1740 search_tbl->ant_type = ANT_A; 1508 search_tbl->ant_type = ANT_A;
1741 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) 1509 else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
1742 search_tbl->ant_type = ANT_B; 1510 search_tbl->ant_type = ANT_B;
1743 else
1744 search_tbl->ant_type = ANT_C;
1745 1511
1746 if (!rs_is_valid_ant(valid_tx_ant, 1512 if (!rs_is_valid_ant(valid_tx_ant,
1747 search_tbl->ant_type)) 1513 search_tbl->ant_type))
@@ -1784,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1784 index, is_green); 1550 index, is_green);
1785 update_search_tbl_counter = 1; 1551 update_search_tbl_counter = 1;
1786 goto out; 1552 goto out;
1787 1553 default:
1788 case IWL_MIMO2_SWITCH_MIMO3_ABC: 1554 WARN_ON_ONCE(1);
1789 IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
1790 memcpy(search_tbl, tbl, sz);
1791 search_tbl->is_SGI = 0;
1792 search_tbl->ant_type = ANT_ABC;
1793
1794 if (!rs_is_valid_ant(valid_tx_ant,
1795 search_tbl->ant_type))
1796 break;
1797
1798 ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
1799 search_tbl, index);
1800 if (!ret)
1801 goto out;
1802
1803 break;
1804 } 1555 }
1805 tbl->action++; 1556 tbl->action++;
1806 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) 1557 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1807 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; 1558 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1808 1559
1809 if (tbl->action == start_action) 1560 if (tbl->action == start_action)
@@ -1814,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1814 out: 1565 out:
1815 lq_sta->search_better_tbl = 1; 1566 lq_sta->search_better_tbl = 1;
1816 tbl->action++; 1567 tbl->action++;
1817 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) 1568 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1818 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; 1569 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1819 if (update_search_tbl_counter) 1570 if (update_search_tbl_counter)
1820 search_tbl->action = tbl->action; 1571 search_tbl->action = tbl->action;
@@ -1823,171 +1574,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1823} 1574}
1824 1575
1825/* 1576/*
1826 * Try to switch to new modulation mode from MIMO3
1827 */
1828static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
1829 struct iwl_lq_sta *lq_sta,
1830 struct ieee80211_sta *sta, int index)
1831{
1832 s8 is_green = lq_sta->is_green;
1833 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1834 struct iwl_scale_tbl_info *search_tbl =
1835 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1836 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1837 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1838 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1839 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1840 u8 start_action;
1841 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
1842 u8 tx_chains_num = num_of_ant(valid_tx_ant);
1843 int ret;
1844 u8 update_search_tbl_counter = 0;
1845
1846 switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
1847 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1848 /* nothing */
1849 break;
1850 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1851 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1852 /* avoid antenna B and MIMO */
1853 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1854 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1855 break;
1856 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1857 /* avoid antenna B unless MIMO */
1858 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1859 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1860 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1861 break;
1862 default:
1863 IWL_ERR(mvm, "Invalid BT load %d",
1864 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
1865 break;
1866 }
1867
1868 start_action = tbl->action;
1869 while (1) {
1870 lq_sta->action_counter++;
1871 switch (tbl->action) {
1872 case IWL_MIMO3_SWITCH_ANTENNA1:
1873 case IWL_MIMO3_SWITCH_ANTENNA2:
1874 IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
1875
1876 if (tx_chains_num <= 3)
1877 break;
1878
1879 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1880 break;
1881
1882 memcpy(search_tbl, tbl, sz);
1883 if (rs_toggle_antenna(valid_tx_ant,
1884 &search_tbl->current_rate,
1885 search_tbl))
1886 goto out;
1887 break;
1888 case IWL_MIMO3_SWITCH_SISO_A:
1889 case IWL_MIMO3_SWITCH_SISO_B:
1890 case IWL_MIMO3_SWITCH_SISO_C:
1891 IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
1892
1893 /* Set up new search table for SISO */
1894 memcpy(search_tbl, tbl, sz);
1895
1896 if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
1897 search_tbl->ant_type = ANT_A;
1898 else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
1899 search_tbl->ant_type = ANT_B;
1900 else
1901 search_tbl->ant_type = ANT_C;
1902
1903 if (!rs_is_valid_ant(valid_tx_ant,
1904 search_tbl->ant_type))
1905 break;
1906
1907 ret = rs_switch_to_siso(mvm, lq_sta, sta,
1908 search_tbl, index);
1909 if (!ret)
1910 goto out;
1911
1912 break;
1913
1914 case IWL_MIMO3_SWITCH_MIMO2_AB:
1915 case IWL_MIMO3_SWITCH_MIMO2_AC:
1916 case IWL_MIMO3_SWITCH_MIMO2_BC:
1917 IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
1918
1919 memcpy(search_tbl, tbl, sz);
1920 search_tbl->is_SGI = 0;
1921 if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
1922 search_tbl->ant_type = ANT_AB;
1923 else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
1924 search_tbl->ant_type = ANT_AC;
1925 else
1926 search_tbl->ant_type = ANT_BC;
1927
1928 if (!rs_is_valid_ant(valid_tx_ant,
1929 search_tbl->ant_type))
1930 break;
1931
1932 ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
1933 search_tbl, index);
1934 if (!ret)
1935 goto out;
1936
1937 break;
1938
1939 case IWL_MIMO3_SWITCH_GI:
1940 if (!tbl->is_ht40 && !(ht_cap->cap &
1941 IEEE80211_HT_CAP_SGI_20))
1942 break;
1943 if (tbl->is_ht40 && !(ht_cap->cap &
1944 IEEE80211_HT_CAP_SGI_40))
1945 break;
1946
1947 IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
1948
1949 /* Set up new search table for MIMO */
1950 memcpy(search_tbl, tbl, sz);
1951 search_tbl->is_SGI = !tbl->is_SGI;
1952 rs_set_expected_tpt_table(lq_sta, search_tbl);
1953 /*
1954 * If active table already uses the fastest possible
1955 * modulation (dual stream with short guard interval),
1956 * and it's working well, there's no need to look
1957 * for a better type of modulation!
1958 */
1959 if (tbl->is_SGI) {
1960 s32 tpt = lq_sta->last_tpt / 100;
1961 if (tpt >= search_tbl->expected_tpt[index])
1962 break;
1963 }
1964 search_tbl->current_rate =
1965 rate_n_flags_from_tbl(mvm, search_tbl,
1966 index, is_green);
1967 update_search_tbl_counter = 1;
1968 goto out;
1969 }
1970 tbl->action++;
1971 if (tbl->action > IWL_MIMO3_SWITCH_GI)
1972 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
1973
1974 if (tbl->action == start_action)
1975 break;
1976 }
1977 search_tbl->lq_type = LQ_NONE;
1978 return 0;
1979 out:
1980 lq_sta->search_better_tbl = 1;
1981 tbl->action++;
1982 if (tbl->action > IWL_MIMO3_SWITCH_GI)
1983 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
1984 if (update_search_tbl_counter)
1985 search_tbl->action = tbl->action;
1986
1987 return 0;
1988}
1989
1990/*
1991 * Check whether we should continue using same modulation mode, or 1577 * Check whether we should continue using same modulation mode, or
1992 * begin search for a new mode, based on: 1578 * begin search for a new mode, based on:
1993 * 1) # tx successes or failures while using this mode 1579 * 1) # tx successes or failures while using this mode
@@ -2086,6 +1672,22 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
2086 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); 1672 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
2087} 1673}
2088 1674
1675static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
1676 struct ieee80211_hdr *hdr)
1677{
1678 u8 tid = IWL_MAX_TID_COUNT;
1679
1680 if (ieee80211_is_data_qos(hdr->frame_control)) {
1681 u8 *qc = ieee80211_get_qos_ctl(hdr);
1682 tid = qc[0] & 0xf;
1683 }
1684
1685 if (unlikely(tid > IWL_MAX_TID_COUNT))
1686 tid = IWL_MAX_TID_COUNT;
1687
1688 return tid;
1689}
1690
2089/* 1691/*
2090 * Do rate scaling and search for new modulation mode. 1692 * Do rate scaling and search for new modulation mode.
2091 */ 1693 */
@@ -2129,7 +1731,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2129 1731
2130 lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; 1732 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2131 1733
2132 tid = rs_tl_add_packet(lq_sta, hdr); 1734 tid = rs_get_tid(lq_sta, hdr);
2133 if ((tid != IWL_MAX_TID_COUNT) && 1735 if ((tid != IWL_MAX_TID_COUNT) &&
2134 (lq_sta->tx_agg_tid_en & (1 << tid))) { 1736 (lq_sta->tx_agg_tid_en & (1 << tid))) {
2135 tid_data = &sta_priv->tid_data[tid]; 1737 tid_data = &sta_priv->tid_data[tid];
@@ -2377,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2377 scale_action = 0; 1979 scale_action = 0;
2378 1980
2379 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 1981 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
2380 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && 1982 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
2381 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2382 if (lq_sta->last_bt_traffic > 1983 if (lq_sta->last_bt_traffic >
2383 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { 1984 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
2384 /* 1985 /*
@@ -2395,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2395 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); 1996 BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
2396 1997
2397 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 1998 if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
2398 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && 1999 IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
2399 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2400 /* search for a new modulation */ 2000 /* search for a new modulation */
2401 rs_stay_in_table(lq_sta, true); 2001 rs_stay_in_table(lq_sta, true);
2402 goto lq_update; 2002 goto lq_update;
@@ -2456,7 +2056,7 @@ lq_update:
2456 else if (is_mimo2(tbl->lq_type)) 2056 else if (is_mimo2(tbl->lq_type))
2457 rs_move_mimo2_to_other(mvm, lq_sta, sta, index); 2057 rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
2458 else 2058 else
2459 rs_move_mimo3_to_other(mvm, lq_sta, sta, index); 2059 WARN_ON_ONCE(1);
2460 2060
2461 /* If new "search" mode was selected, set up in uCode table */ 2061 /* If new "search" mode was selected, set up in uCode table */
2462 if (lq_sta->search_better_tbl) { 2062 if (lq_sta->search_better_tbl) {
@@ -2621,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2621 rate_idx -= IWL_FIRST_OFDM_RATE; 2221 rate_idx -= IWL_FIRST_OFDM_RATE;
2622 /* 6M and 9M shared same MCS index */ 2222 /* 6M and 9M shared same MCS index */
2623 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; 2223 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2224 WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
2225 IWL_RATE_MIMO3_6M_PLCP);
2624 if (rs_extract_rate(lq_sta->last_rate_n_flags) >= 2226 if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2625 IWL_RATE_MIMO3_6M_PLCP) 2227 IWL_RATE_MIMO2_6M_PLCP)
2626 rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
2627 else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2628 IWL_RATE_MIMO2_6M_PLCP)
2629 rate_idx = rate_idx + MCS_INDEX_PER_STREAM; 2228 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2630 info->control.rates[0].flags = IEEE80211_TX_RC_MCS; 2229 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2631 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) 2230 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
@@ -2688,9 +2287,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2688 2287
2689 lq_sta->flush_timer = 0; 2288 lq_sta->flush_timer = 0;
2690 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2289 lq_sta->supp_rates = sta->supp_rates[sband->band];
2691 for (j = 0; j < LQ_SIZE; j++)
2692 for (i = 0; i < IWL_RATE_COUNT; i++)
2693 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2694 2290
2695 IWL_DEBUG_RATE(mvm, 2291 IWL_DEBUG_RATE(mvm,
2696 "LQ: *** rate scale station global init for station %d ***\n", 2292 "LQ: *** rate scale station global init for station %d ***\n",
@@ -2727,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2727 lq_sta->active_mimo2_rate &= ~((u16)0x2); 2323 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2728 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; 2324 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2729 2325
2730 lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
2731 lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
2732 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2733 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2734
2735 IWL_DEBUG_RATE(mvm, 2326 IWL_DEBUG_RATE(mvm,
2736 "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", 2327 "SISO-RATE=%X MIMO2-RATE=%X\n",
2737 lq_sta->active_siso_rate, 2328 lq_sta->active_siso_rate,
2738 lq_sta->active_mimo2_rate, 2329 lq_sta->active_mimo2_rate);
2739 lq_sta->active_mimo3_rate);
2740 2330
2741 /* These values will be overridden later */ 2331 /* These values will be overridden later */
2742 lq_sta->lq.single_stream_ant_msk = 2332 lq_sta->lq.single_stream_ant_msk =
@@ -2780,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2780 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; 2370 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
2781 2371
2782 /* Override starting rate (index 0) if needed for debug purposes */ 2372 /* Override starting rate (index 0) if needed for debug purposes */
2783 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2373 rs_dbgfs_set_mcs(lq_sta, &new_rate);
2784 2374
2785 /* Interpret new_rate (rate_n_flags) */ 2375 /* Interpret new_rate (rate_n_flags) */
2786 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, 2376 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2827,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2827 } 2417 }
2828 2418
2829 /* Override next rate if needed for debug purposes */ 2419 /* Override next rate if needed for debug purposes */
2830 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2420 rs_dbgfs_set_mcs(lq_sta, &new_rate);
2831 2421
2832 /* Fill next table entry */ 2422 /* Fill next table entry */
2833 lq_cmd->rs_table[index] = 2423 lq_cmd->rs_table[index] =
@@ -2869,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2869 use_ht_possible = 0; 2459 use_ht_possible = 0;
2870 2460
2871 /* Override next rate if needed for debug purposes */ 2461 /* Override next rate if needed for debug purposes */
2872 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2462 rs_dbgfs_set_mcs(lq_sta, &new_rate);
2873 2463
2874 /* Fill next table entry */ 2464 /* Fill next table entry */
2875 lq_cmd->rs_table[index] = cpu_to_le32(new_rate); 2465 lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
@@ -2914,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
2914 2504
2915#ifdef CONFIG_MAC80211_DEBUGFS 2505#ifdef CONFIG_MAC80211_DEBUGFS
2916static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, 2506static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2917 u32 *rate_n_flags, int index) 2507 u32 *rate_n_flags)
2918{ 2508{
2919 struct iwl_mvm *mvm; 2509 struct iwl_mvm *mvm;
2920 u8 valid_tx_ant; 2510 u8 valid_tx_ant;
@@ -2999,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2999 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 2589 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3000 if (is_Ht(tbl->lq_type)) { 2590 if (is_Ht(tbl->lq_type)) {
3001 desc += sprintf(buff+desc, " %s", 2591 desc += sprintf(buff+desc, " %s",
3002 (is_siso(tbl->lq_type)) ? "SISO" : 2592 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
3003 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
3004 desc += sprintf(buff+desc, " %s", 2593 desc += sprintf(buff+desc, " %s",
3005 (tbl->is_ht40) ? "40MHz" : "20MHz"); 2594 (tbl->is_ht40) ? "40MHz" : "20MHz");
3006 desc += sprintf(buff+desc, " %s %s %s\n", 2595 desc += sprintf(buff+desc, " %s %s %s\n",
@@ -3100,32 +2689,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3100 .llseek = default_llseek, 2689 .llseek = default_llseek,
3101}; 2690};
3102 2691
3103static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3104 char __user *user_buf, size_t count, loff_t *ppos)
3105{
3106 struct iwl_lq_sta *lq_sta = file->private_data;
3107 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3108 char buff[120];
3109 int desc = 0;
3110
3111 if (is_Ht(tbl->lq_type))
3112 desc += sprintf(buff+desc,
3113 "Bit Rate= %d Mb/s\n",
3114 tbl->expected_tpt[lq_sta->last_txrate_idx]);
3115 else
3116 desc += sprintf(buff+desc,
3117 "Bit Rate= %d Mb/s\n",
3118 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
3119
3120 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3121}
3122
3123static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
3124 .read = rs_sta_dbgfs_rate_scale_data_read,
3125 .open = simple_open,
3126 .llseek = default_llseek,
3127};
3128
3129static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) 2692static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
3130{ 2693{
3131 struct iwl_lq_sta *lq_sta = mvm_sta; 2694 struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -3135,9 +2698,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
3135 lq_sta->rs_sta_dbgfs_stats_table_file = 2698 lq_sta->rs_sta_dbgfs_stats_table_file =
3136 debugfs_create_file("rate_stats_table", S_IRUSR, dir, 2699 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3137 lq_sta, &rs_sta_dbgfs_stats_table_ops); 2700 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3138 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
3139 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
3140 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
3141 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = 2701 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
3142 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, 2702 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3143 &lq_sta->tx_agg_tid_en); 2703 &lq_sta->tx_agg_tid_en);
@@ -3148,7 +2708,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
3148 struct iwl_lq_sta *lq_sta = mvm_sta; 2708 struct iwl_lq_sta *lq_sta = mvm_sta;
3149 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 2709 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
3150 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 2710 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3151 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
3152 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); 2711 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3153} 2712}
3154#endif 2713#endif
@@ -3159,8 +2718,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
3159 * station is added we ignore it. 2718 * station is added we ignore it.
3160 */ 2719 */
3161static void rs_rate_init_stub(void *mvm_r, 2720static void rs_rate_init_stub(void *mvm_r,
3162 struct ieee80211_supported_band *sband, 2721 struct ieee80211_supported_band *sband,
3163 struct ieee80211_sta *sta, void *mvm_sta) 2722 struct cfg80211_chan_def *chandef,
2723 struct ieee80211_sta *sta, void *mvm_sta)
3164{ 2724{
3165} 2725}
3166static struct rate_control_ops rs_mvm_ops = { 2726static struct rate_control_ops rs_mvm_ops = {
@@ -3193,13 +2753,14 @@ void iwl_mvm_rate_control_unregister(void)
3193 * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable 2753 * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
3194 * Tx protection, according to this rquest and previous requests, 2754 * Tx protection, according to this rquest and previous requests,
3195 * and send the LQ command. 2755 * and send the LQ command.
3196 * @lq: The LQ command
3197 * @mvmsta: The station 2756 * @mvmsta: The station
3198 * @enable: Enable Tx protection? 2757 * @enable: Enable Tx protection?
3199 */ 2758 */
3200int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, 2759int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
3201 struct iwl_mvm_sta *mvmsta, bool enable) 2760 bool enable)
3202{ 2761{
2762 struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
2763
3203 lockdep_assert_held(&mvm->mutex); 2764 lockdep_assert_held(&mvm->mutex);
3204 2765
3205 if (enable) { 2766 if (enable) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index cff4f6da7733..335cf1682902 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -38,14 +38,8 @@ struct iwl_rs_rate_info {
38 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 38 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
39 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 39 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
40 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ 40 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
41 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
42 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
43 u8 prev_ieee; /* previous rate in IEEE speeds */
44 u8 next_ieee; /* next rate in IEEE speeds */
45 u8 prev_rs; /* previous rate used in rs algo */ 41 u8 prev_rs; /* previous rate used in rs algo */
46 u8 next_rs; /* next rate used in rs algo */ 42 u8 next_rs; /* next rate used in rs algo */
47 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
48 u8 next_rs_tgg; /* next rate used in TGG rs algo */
49}; 43};
50 44
51#define IWL_RATE_60M_PLCP 3 45#define IWL_RATE_60M_PLCP 3
@@ -120,23 +114,6 @@ enum {
120 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, 114 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
121}; 115};
122 116
123/* MAC header values for bit rates */
124enum {
125 IWL_RATE_6M_IEEE = 12,
126 IWL_RATE_9M_IEEE = 18,
127 IWL_RATE_12M_IEEE = 24,
128 IWL_RATE_18M_IEEE = 36,
129 IWL_RATE_24M_IEEE = 48,
130 IWL_RATE_36M_IEEE = 72,
131 IWL_RATE_48M_IEEE = 96,
132 IWL_RATE_54M_IEEE = 108,
133 IWL_RATE_60M_IEEE = 120,
134 IWL_RATE_1M_IEEE = 2,
135 IWL_RATE_2M_IEEE = 4,
136 IWL_RATE_5M_IEEE = 11,
137 IWL_RATE_11M_IEEE = 22,
138};
139
140#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 117#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
141 118
142#define IWL_INVALID_VALUE -1 119#define IWL_INVALID_VALUE -1
@@ -165,47 +142,22 @@ enum {
165#define IWL_LEGACY_SWITCH_ANTENNA1 0 142#define IWL_LEGACY_SWITCH_ANTENNA1 0
166#define IWL_LEGACY_SWITCH_ANTENNA2 1 143#define IWL_LEGACY_SWITCH_ANTENNA2 1
167#define IWL_LEGACY_SWITCH_SISO 2 144#define IWL_LEGACY_SWITCH_SISO 2
168#define IWL_LEGACY_SWITCH_MIMO2_AB 3 145#define IWL_LEGACY_SWITCH_MIMO2 3
169#define IWL_LEGACY_SWITCH_MIMO2_AC 4
170#define IWL_LEGACY_SWITCH_MIMO2_BC 5
171#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
172 146
173/* possible actions when in siso mode */ 147/* possible actions when in siso mode */
174#define IWL_SISO_SWITCH_ANTENNA1 0 148#define IWL_SISO_SWITCH_ANTENNA1 0
175#define IWL_SISO_SWITCH_ANTENNA2 1 149#define IWL_SISO_SWITCH_ANTENNA2 1
176#define IWL_SISO_SWITCH_MIMO2_AB 2 150#define IWL_SISO_SWITCH_MIMO2 2
177#define IWL_SISO_SWITCH_MIMO2_AC 3 151#define IWL_SISO_SWITCH_GI 3
178#define IWL_SISO_SWITCH_MIMO2_BC 4
179#define IWL_SISO_SWITCH_GI 5
180#define IWL_SISO_SWITCH_MIMO3_ABC 6
181
182 152
183/* possible actions when in mimo mode */ 153/* possible actions when in mimo mode */
184#define IWL_MIMO2_SWITCH_ANTENNA1 0 154#define IWL_MIMO2_SWITCH_ANTENNA1 0
185#define IWL_MIMO2_SWITCH_ANTENNA2 1 155#define IWL_MIMO2_SWITCH_ANTENNA2 1
186#define IWL_MIMO2_SWITCH_SISO_A 2 156#define IWL_MIMO2_SWITCH_SISO_A 2
187#define IWL_MIMO2_SWITCH_SISO_B 3 157#define IWL_MIMO2_SWITCH_SISO_B 3
188#define IWL_MIMO2_SWITCH_SISO_C 4 158#define IWL_MIMO2_SWITCH_GI 4
189#define IWL_MIMO2_SWITCH_GI 5
190#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
191
192 159
193/* possible actions when in mimo3 mode */ 160#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
194#define IWL_MIMO3_SWITCH_ANTENNA1 0
195#define IWL_MIMO3_SWITCH_ANTENNA2 1
196#define IWL_MIMO3_SWITCH_SISO_A 2
197#define IWL_MIMO3_SWITCH_SISO_B 3
198#define IWL_MIMO3_SWITCH_SISO_C 4
199#define IWL_MIMO3_SWITCH_MIMO2_AB 5
200#define IWL_MIMO3_SWITCH_MIMO2_AC 6
201#define IWL_MIMO3_SWITCH_MIMO2_BC 7
202#define IWL_MIMO3_SWITCH_GI 8
203
204
205#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
206#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
207
208/*FIXME:RS:add possible actions for MIMO3*/
209 161
210#define IWL_ACTION_LIMIT 3 /* # possible actions */ 162#define IWL_ACTION_LIMIT 3 /* # possible actions */
211 163
@@ -240,15 +192,13 @@ enum iwl_table_type {
240 LQ_A, 192 LQ_A,
241 LQ_SISO, /* high-throughput types */ 193 LQ_SISO, /* high-throughput types */
242 LQ_MIMO2, 194 LQ_MIMO2,
243 LQ_MIMO3,
244 LQ_MAX, 195 LQ_MAX,
245}; 196};
246 197
247#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) 198#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
248#define is_siso(tbl) ((tbl) == LQ_SISO) 199#define is_siso(tbl) ((tbl) == LQ_SISO)
249#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) 200#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
250#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) 201#define is_mimo(tbl) is_mimo2(tbl)
251#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
252#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) 202#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
253#define is_a_band(tbl) ((tbl) == LQ_A) 203#define is_a_band(tbl) ((tbl) == LQ_A)
254#define is_g_and(tbl) ((tbl) == LQ_G) 204#define is_g_and(tbl) ((tbl) == LQ_G)
@@ -290,17 +240,6 @@ struct iwl_scale_tbl_info {
290 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 240 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
291}; 241};
292 242
293struct iwl_traffic_load {
294 unsigned long time_stamp; /* age of the oldest statistics */
295 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
296 * slice */
297 u32 total; /* total num of packets during the
298 * last TID_MAX_TIME_DIFF */
299 u8 queue_count; /* number of queues that has
300 * been used since the last cleanup */
301 u8 head; /* start of the circular buffer */
302};
303
304/** 243/**
305 * struct iwl_lq_sta -- driver's rate scaling private structure 244 * struct iwl_lq_sta -- driver's rate scaling private structure
306 * 245 *
@@ -331,18 +270,15 @@ struct iwl_lq_sta {
331 u16 active_legacy_rate; 270 u16 active_legacy_rate;
332 u16 active_siso_rate; 271 u16 active_siso_rate;
333 u16 active_mimo2_rate; 272 u16 active_mimo2_rate;
334 u16 active_mimo3_rate;
335 s8 max_rate_idx; /* Max rate set by user */ 273 s8 max_rate_idx; /* Max rate set by user */
336 u8 missed_rate_counter; 274 u8 missed_rate_counter;
337 275
338 struct iwl_lq_cmd lq; 276 struct iwl_lq_cmd lq;
339 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 277 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
340 struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
341 u8 tx_agg_tid_en; 278 u8 tx_agg_tid_en;
342#ifdef CONFIG_MAC80211_DEBUGFS 279#ifdef CONFIG_MAC80211_DEBUGFS
343 struct dentry *rs_sta_dbgfs_scale_table_file; 280 struct dentry *rs_sta_dbgfs_scale_table_file;
344 struct dentry *rs_sta_dbgfs_stats_table_file; 281 struct dentry *rs_sta_dbgfs_stats_table_file;
345 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
346 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 282 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
347 u32 dbg_fixed_rate; 283 u32 dbg_fixed_rate;
348#endif 284#endif
@@ -404,7 +340,7 @@ extern void iwl_mvm_rate_control_unregister(void);
404 340
405struct iwl_mvm_sta; 341struct iwl_mvm_sta;
406 342
407int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, 343int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
408 struct iwl_mvm_sta *mvmsta, bool enable); 344 bool enable);
409 345
410#endif /* __rs__ */ 346#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index e4930d5027d2..2a8cb5a60535 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -124,24 +124,15 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
124 ieee80211_rx_ni(mvm->hw, skb); 124 ieee80211_rx_ni(mvm->hw, skb);
125} 125}
126 126
127/* 127static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
128 * iwl_mvm_calc_rssi - calculate the rssi in dBm 128 struct iwl_rx_phy_info *phy_info,
129 * @phy_info: the phy information for the coming packet 129 struct ieee80211_rx_status *rx_status)
130 */
131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
132 struct iwl_rx_phy_info *phy_info)
133{ 130{
134 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; 131 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
135 int rssi_all_band_a, rssi_all_band_b; 132 int rssi_all_band_a, rssi_all_band_b;
136 u32 agc_a, agc_b, max_agc; 133 u32 agc_a, agc_b, max_agc;
137 u32 val; 134 u32 val;
138 135
139 /* Find max rssi among 2 possible receivers.
140 * These values are measured by the Digital Signal Processor (DSP).
141 * They should stay fairly constant even as the signal strength varies,
142 * if the radio's Automatic Gain Control (AGC) is working right.
143 * AGC value (see below) will provide the "interesting" info.
144 */
145 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); 136 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
146 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; 137 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
147 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; 138 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
@@ -166,7 +157,51 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
166 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", 157 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
167 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); 158 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
168 159
169 return max_rssi_dbm; 160 rx_status->signal = max_rssi_dbm;
161 rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
162 RX_RES_PHY_FLAGS_ANTENNA)
163 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
164 rx_status->chain_signal[0] = rssi_a_dbm;
165 rx_status->chain_signal[1] = rssi_b_dbm;
166}
167
168/*
169 * iwl_mvm_get_signal_strength - use new rx PHY INFO API
170 * values are reported by the fw as positive values - need to negate
171 * to obtain their dBM. Account for missing antennas by replacing 0
172 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
173 */
174static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
175 struct iwl_rx_phy_info *phy_info,
176 struct ieee80211_rx_status *rx_status)
177{
178 int energy_a, energy_b, energy_c, max_energy;
179 u32 val;
180
181 val =
182 le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
183 energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
184 IWL_RX_INFO_ENERGY_ANT_A_POS;
185 energy_a = energy_a ? -energy_a : -256;
186 energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
187 IWL_RX_INFO_ENERGY_ANT_B_POS;
188 energy_b = energy_b ? -energy_b : -256;
189 energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
190 IWL_RX_INFO_ENERGY_ANT_C_POS;
191 energy_c = energy_c ? -energy_c : -256;
192 max_energy = max(energy_a, energy_b);
193 max_energy = max(max_energy, energy_c);
194
195 IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
196 energy_a, energy_b, energy_c, max_energy);
197
198 rx_status->signal = max_energy;
199 rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
200 RX_RES_PHY_FLAGS_ANTENNA)
201 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
202 rx_status->chain_signal[0] = energy_a;
203 rx_status->chain_signal[1] = energy_b;
204 rx_status->chain_signal[2] = energy_c;
170} 205}
171 206
172/* 207/*
@@ -289,29 +324,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
289 */ 324 */
290 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ 325 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
291 326
292 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 327 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
293 rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info); 328 iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
329 else
330 iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
294 331
295 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, 332 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
296 (unsigned long long)rx_status.mactime); 333 (unsigned long long)rx_status.mactime);
297 334
298 /*
299 * "antenna number"
300 *
301 * It seems that the antenna field in the phy flags value
302 * is actually a bit field. This is undefined by radiotap,
303 * it wants an actual antenna number but I always get "7"
304 * for most legacy frames I receive indicating that the
305 * same frame was received on all three RX chains.
306 *
307 * I think this field should be removed in favor of a
308 * new 802.11n radiotap field "RX chains" that is defined
309 * as a bitmask.
310 */
311 rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
312 RX_RES_PHY_FLAGS_ANTENNA)
313 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
314
315 /* set the preamble flag if appropriate */ 335 /* set the preamble flag if appropriate */
316 if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) 336 if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
317 rx_status.flag |= RX_FLAG_SHORTPRE; 337 rx_status.flag |= RX_FLAG_SHORTPRE;
@@ -364,11 +384,74 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
364 return 0; 384 return 0;
365} 385}
366 386
387static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
388 struct iwl_notif_statistics *stats)
389{
390 /*
391 * NOTE FW aggregates the statistics - BUT the statistics are cleared
392 * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
393 * bit set.
394 */
395 lockdep_assert_held(&mvm->mutex);
396 memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
397}
398
399struct iwl_mvm_stat_data {
400 struct iwl_notif_statistics *stats;
401 struct iwl_mvm *mvm;
402};
403
404static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
405 struct ieee80211_vif *vif)
406{
407 struct iwl_mvm_stat_data *data = _data;
408 struct iwl_notif_statistics *stats = data->stats;
409 struct iwl_mvm *mvm = data->mvm;
410 int sig = -stats->general.beacon_filter_average_energy;
411 int last_event;
412 int thold = vif->bss_conf.cqm_rssi_thold;
413 int hyst = vif->bss_conf.cqm_rssi_hyst;
414 u16 id = le32_to_cpu(stats->rx.general.mac_id);
415 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
416
417 if (mvmvif->id != id)
418 return;
419
420 if (vif->type != NL80211_IFTYPE_STATION)
421 return;
422
423 mvmvif->bf_data.ave_beacon_signal = sig;
424
425 if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
426 return;
427
428 /* CQM Notification */
429 last_event = mvmvif->bf_data.last_cqm_event;
430 if (thold && sig < thold && (last_event == 0 ||
431 sig < last_event - hyst)) {
432 mvmvif->bf_data.last_cqm_event = sig;
433 IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
434 sig);
435 ieee80211_cqm_rssi_notify(
436 vif,
437 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
438 GFP_KERNEL);
439 } else if (sig > thold &&
440 (last_event == 0 || sig > last_event + hyst)) {
441 mvmvif->bf_data.last_cqm_event = sig;
442 IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
443 sig);
444 ieee80211_cqm_rssi_notify(
445 vif,
446 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
447 GFP_KERNEL);
448 }
449}
450
367/* 451/*
368 * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler 452 * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
369 * 453 *
370 * TODO: This handler is implemented partially. 454 * TODO: This handler is implemented partially.
371 * It only gets the NIC's temperature.
372 */ 455 */
373int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, 456int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
374 struct iwl_rx_cmd_buffer *rxb, 457 struct iwl_rx_cmd_buffer *rxb,
@@ -377,11 +460,20 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
377 struct iwl_rx_packet *pkt = rxb_addr(rxb); 460 struct iwl_rx_packet *pkt = rxb_addr(rxb);
378 struct iwl_notif_statistics *stats = (void *)&pkt->data; 461 struct iwl_notif_statistics *stats = (void *)&pkt->data;
379 struct mvm_statistics_general_common *common = &stats->general.common; 462 struct mvm_statistics_general_common *common = &stats->general.common;
463 struct iwl_mvm_stat_data data = {
464 .stats = stats,
465 .mvm = mvm,
466 };
380 467
381 if (mvm->temperature != le32_to_cpu(common->temperature)) { 468 if (mvm->temperature != le32_to_cpu(common->temperature)) {
382 mvm->temperature = le32_to_cpu(common->temperature); 469 mvm->temperature = le32_to_cpu(common->temperature);
383 iwl_mvm_tt_handler(mvm); 470 iwl_mvm_tt_handler(mvm);
384 } 471 }
472 iwl_mvm_update_rx_statistics(mvm, stats);
385 473
474 ieee80211_iterate_active_interfaces(mvm->hw,
475 IEEE80211_IFACE_ITER_NORMAL,
476 iwl_mvm_stat_iterator,
477 &data);
386 return 0; 478 return 0;
387} 479}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index acdff6b67e04..9a7ab8495300 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -301,10 +301,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
301 */ 301 */
302 if (req->n_ssids > 0) { 302 if (req->n_ssids > 0) {
303 cmd->passive2active = cpu_to_le16(1); 303 cmd->passive2active = cpu_to_le16(1);
304 cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
304 ssid = req->ssids[0].ssid; 305 ssid = req->ssids[0].ssid;
305 ssid_len = req->ssids[0].ssid_len; 306 ssid_len = req->ssids[0].ssid_len;
306 } else { 307 } else {
307 cmd->passive2active = 0; 308 cmd->passive2active = 0;
309 cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
308 } 310 }
309 311
310 iwl_mvm_scan_fill_ssids(cmd, req); 312 iwl_mvm_scan_fill_ssids(cmd, req);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 563f559b902d..44add291531b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -826,8 +826,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
826 * method for HT traffic 826 * method for HT traffic
827 * this function also sends the LQ command 827 * this function also sends the LQ command
828 */ 828 */
829 return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, 829 return iwl_mvm_tx_protection(mvm, mvmsta, true);
830 mvmsta, true);
831 /* 830 /*
832 * TODO: remove the TLC_RTS flag when we tear down the last 831 * TODO: remove the TLC_RTS flag when we tear down the last
833 * AGG session (agg_tids_count in DVM) 832 * AGG session (agg_tids_count in DVM)
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 7fd6fbfbc1b3..c17b74c31398 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -73,7 +73,6 @@
73#include "iwl-prph.h" 73#include "iwl-prph.h"
74 74
75/* A TimeUnit is 1024 microsecond */ 75/* A TimeUnit is 1024 microsecond */
76#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
77#define MSEC_TO_TU(_msec) (_msec*1000/1024) 76#define MSEC_TO_TU(_msec) (_msec*1000/1024)
78 77
79/* 78/*
@@ -185,7 +184,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
185 } 184 }
186 } 185 }
187 186
188 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { 187 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
189 IWL_DEBUG_TE(mvm, 188 IWL_DEBUG_TE(mvm,
190 "TE ended - current time %lu, estimated end %lu\n", 189 "TE ended - current time %lu, estimated end %lu\n",
191 jiffies, te_data->end_jiffies); 190 jiffies, te_data->end_jiffies);
@@ -202,10 +201,9 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
202 iwl_mvm_te_check_disconnect(mvm, te_data->vif, 201 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
203 "No assocation and the time event is over already..."); 202 "No assocation and the time event is over already...");
204 iwl_mvm_te_clear_data(mvm, te_data); 203 iwl_mvm_te_clear_data(mvm, te_data);
205 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { 204 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
206 te_data->running = true; 205 te_data->running = true;
207 te_data->end_jiffies = jiffies + 206 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
208 TU_TO_JIFFIES(te_data->duration);
209 207
210 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 208 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
211 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 209 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
@@ -270,10 +268,67 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
270 return true; 268 return true;
271} 269}
272 270
271/* used to convert from time event API v2 to v1 */
272#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
273 TE_V2_EVENT_SOCIOPATHIC)
274static inline u16 te_v2_get_notify(__le16 policy)
275{
276 return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
277}
278
279static inline u16 te_v2_get_dep_policy(__le16 policy)
280{
281 return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
282 TE_V2_PLACEMENT_POS;
283}
284
285static inline u16 te_v2_get_absence(__le16 policy)
286{
287 return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
288}
289
290static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
291 struct iwl_time_event_cmd_v1 *cmd_v1)
292{
293 cmd_v1->id_and_color = cmd_v2->id_and_color;
294 cmd_v1->action = cmd_v2->action;
295 cmd_v1->id = cmd_v2->id;
296 cmd_v1->apply_time = cmd_v2->apply_time;
297 cmd_v1->max_delay = cmd_v2->max_delay;
298 cmd_v1->depends_on = cmd_v2->depends_on;
299 cmd_v1->interval = cmd_v2->interval;
300 cmd_v1->duration = cmd_v2->duration;
301 if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
302 cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
303 else
304 cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
305 cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
306 cmd_v1->interval_reciprocal = 0; /* unused */
307
308 cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
309 cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
310 cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
311}
312
313static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
314 const struct iwl_time_event_cmd_v2 *cmd)
315{
316 struct iwl_time_event_cmd_v1 cmd_v1;
317
318 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
319 return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
320 sizeof(*cmd), cmd);
321
322 iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
323 return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
324 sizeof(cmd_v1), &cmd_v1);
325}
326
327
273static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, 328static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
274 struct ieee80211_vif *vif, 329 struct ieee80211_vif *vif,
275 struct iwl_mvm_time_event_data *te_data, 330 struct iwl_mvm_time_event_data *te_data,
276 struct iwl_time_event_cmd *te_cmd) 331 struct iwl_time_event_cmd_v2 *te_cmd)
277{ 332{
278 static const u8 time_event_response[] = { TIME_EVENT_CMD }; 333 static const u8 time_event_response[] = { TIME_EVENT_CMD };
279 struct iwl_notification_wait wait_time_event; 334 struct iwl_notification_wait wait_time_event;
@@ -309,8 +364,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
309 ARRAY_SIZE(time_event_response), 364 ARRAY_SIZE(time_event_response),
310 iwl_mvm_time_event_response, te_data); 365 iwl_mvm_time_event_response, te_data);
311 366
312 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, 367 ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
313 sizeof(*te_cmd), te_cmd);
314 if (ret) { 368 if (ret) {
315 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); 369 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
316 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 370 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -337,13 +391,12 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
337{ 391{
338 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 392 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
339 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 393 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
340 struct iwl_time_event_cmd time_cmd = {}; 394 struct iwl_time_event_cmd_v2 time_cmd = {};
341 395
342 lockdep_assert_held(&mvm->mutex); 396 lockdep_assert_held(&mvm->mutex);
343 397
344 if (te_data->running && 398 if (te_data->running &&
345 time_after(te_data->end_jiffies, 399 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
346 jiffies + TU_TO_JIFFIES(min_duration))) {
347 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", 400 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
348 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 401 jiffies_to_msecs(te_data->end_jiffies - jiffies));
349 return; 402 return;
@@ -372,17 +425,14 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
372 time_cmd.apply_time = 425 time_cmd.apply_time =
373 cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); 426 cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
374 427
375 time_cmd.dep_policy = TE_INDEPENDENT; 428 time_cmd.max_frags = TE_V2_FRAG_NONE;
376 time_cmd.is_present = cpu_to_le32(1);
377 time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
378 time_cmd.max_delay = cpu_to_le32(500); 429 time_cmd.max_delay = cpu_to_le32(500);
379 /* TODO: why do we need to interval = bi if it is not periodic? */ 430 /* TODO: why do we need to interval = bi if it is not periodic? */
380 time_cmd.interval = cpu_to_le32(1); 431 time_cmd.interval = cpu_to_le32(1);
381 time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
382 time_cmd.duration = cpu_to_le32(duration); 432 time_cmd.duration = cpu_to_le32(duration);
383 time_cmd.repeat = cpu_to_le32(1); 433 time_cmd.repeat = 1;
384 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START | 434 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
385 TE_NOTIF_HOST_EVENT_END); 435 TE_V2_NOTIF_HOST_EVENT_END);
386 436
387 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 437 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
388} 438}
@@ -396,7 +446,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
396 struct iwl_mvm_vif *mvmvif, 446 struct iwl_mvm_vif *mvmvif,
397 struct iwl_mvm_time_event_data *te_data) 447 struct iwl_mvm_time_event_data *te_data)
398{ 448{
399 struct iwl_time_event_cmd time_cmd = {}; 449 struct iwl_time_event_cmd_v2 time_cmd = {};
400 u32 id, uid; 450 u32 id, uid;
401 int ret; 451 int ret;
402 452
@@ -433,8 +483,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
433 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 483 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
434 484
435 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); 485 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
436 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, 486 ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
437 sizeof(time_cmd), &time_cmd);
438 if (WARN_ON(ret)) 487 if (WARN_ON(ret))
439 return; 488 return;
440} 489}
@@ -454,7 +503,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
454{ 503{
455 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 504 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
456 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 505 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
457 struct iwl_time_event_cmd time_cmd = {}; 506 struct iwl_time_event_cmd_v2 time_cmd = {};
458 507
459 lockdep_assert_held(&mvm->mutex); 508 lockdep_assert_held(&mvm->mutex);
460 if (te_data->running) { 509 if (te_data->running) {
@@ -485,8 +534,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
485 } 534 }
486 535
487 time_cmd.apply_time = cpu_to_le32(0); 536 time_cmd.apply_time = cpu_to_le32(0);
488 time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
489 time_cmd.is_present = cpu_to_le32(1);
490 time_cmd.interval = cpu_to_le32(1); 537 time_cmd.interval = cpu_to_le32(1);
491 538
492 /* 539 /*
@@ -495,12 +542,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
495 * scheduled. To improve the chances of it being scheduled, allow them 542 * scheduled. To improve the chances of it being scheduled, allow them
496 * to be fragmented, and in addition allow them to be delayed. 543 * to be fragmented, and in addition allow them to be delayed.
497 */ 544 */
498 time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20); 545 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
499 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); 546 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
500 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); 547 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
501 time_cmd.repeat = cpu_to_le32(1); 548 time_cmd.repeat = 1;
502 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START | 549 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
503 TE_NOTIF_HOST_EVENT_END); 550 TE_V2_NOTIF_HOST_EVENT_END);
504 551
505 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 552 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
506} 553}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index d6ae7f16ac11..1f3282dff513 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -391,8 +391,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
391 mvmsta = (void *)sta->drv_priv; 391 mvmsta = (void *)sta->drv_priv;
392 if (enable == mvmsta->tt_tx_protection) 392 if (enable == mvmsta->tt_tx_protection)
393 continue; 393 continue;
394 err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, 394 err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
395 mvmsta, enable);
396 if (err) { 395 if (err) {
397 IWL_ERR(mvm, "Failed to %s Tx protection\n", 396 IWL_ERR(mvm, "Failed to %s Tx protection\n",
398 enable ? "enable" : "disable"); 397 enable ? "enable" : "disable");
@@ -513,12 +512,39 @@ static const struct iwl_tt_params iwl7000_tt_params = {
513 .support_tx_backoff = true, 512 .support_tx_backoff = true,
514}; 513};
515 514
515static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
516 .ct_kill_entry = 118,
517 .ct_kill_exit = 96,
518 .ct_kill_duration = 5,
519 .dynamic_smps_entry = 114,
520 .dynamic_smps_exit = 110,
521 .tx_protection_entry = 114,
522 .tx_protection_exit = 108,
523 .tx_backoff = {
524 {.temperature = 112, .backoff = 300},
525 {.temperature = 113, .backoff = 800},
526 {.temperature = 114, .backoff = 1500},
527 {.temperature = 115, .backoff = 3000},
528 {.temperature = 116, .backoff = 5000},
529 {.temperature = 117, .backoff = 10000},
530 },
531 .support_ct_kill = true,
532 .support_dynamic_smps = true,
533 .support_tx_protection = true,
534 .support_tx_backoff = true,
535};
536
516void iwl_mvm_tt_initialize(struct iwl_mvm *mvm) 537void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
517{ 538{
518 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; 539 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
519 540
520 IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); 541 IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
521 tt->params = &iwl7000_tt_params; 542
543 if (mvm->cfg->high_temp)
544 tt->params = &iwl7000_high_temp_tt_params;
545 else
546 tt->params = &iwl7000_tt_params;
547
522 tt->throttle = false; 548 tt->throttle = false;
523 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); 549 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
524} 550}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f0e96a927407..e05440d90319 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -91,11 +91,10 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
91 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; 91 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
92 92
93 /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */ 93 /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
94 if (info->band == IEEE80211_BAND_2GHZ && 94 if (info->band == IEEE80211_BAND_2GHZ &&
95 (skb->protocol == cpu_to_be16(ETH_P_PAE) || 95 (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
96 is_multicast_ether_addr(hdr->addr1) || 96 is_multicast_ether_addr(hdr->addr1) ||
97 ieee80211_is_back_req(fc) || 97 ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
98 ieee80211_is_mgmt(fc)))
99 tx_flags |= TX_CMD_FLG_BT_DIS; 98 tx_flags |= TX_CMD_FLG_BT_DIS;
100 99
101 if (ieee80211_has_morefrags(fc)) 100 if (ieee80211_has_morefrags(fc))
@@ -123,6 +122,8 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
123 * it 122 * it
124 */ 123 */
125 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); 124 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
125 } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
126 tx_cmd->pm_frame_timeout = cpu_to_le16(2);
126 } else { 127 } else {
127 tx_cmd->pm_frame_timeout = 0; 128 tx_cmd->pm_frame_timeout = 0;
128 } 129 }
@@ -171,7 +172,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
171 } 172 }
172 173
173 /* 174 /*
174 * for data packets, rate info comes from the table inside he fw. This 175 * for data packets, rate info comes from the table inside the fw. This
175 * table is controlled by LINK_QUALITY commands 176 * table is controlled by LINK_QUALITY commands
176 */ 177 */
177 178
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 1e1332839e4a..a9c357491434 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -453,6 +453,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
453 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); 453 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
454} 454}
455 455
456void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
457{
458 const struct fw_img *img;
459 int ofs, len = 0;
460 u8 *buf;
461
462 if (!mvm->ucode_loaded)
463 return;
464
465 img = &mvm->fw->img[mvm->cur_ucode];
466 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
467 len = img->sec[IWL_UCODE_SECTION_DATA].len;
468
469 buf = kzalloc(len, GFP_KERNEL);
470 if (!buf)
471 return;
472
473 iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
474 iwl_print_hex_error(mvm->trans, buf, len);
475
476 kfree(buf);
477}
478
456/** 479/**
457 * iwl_mvm_send_lq_cmd() - Send link quality command 480 * iwl_mvm_send_lq_cmd() - Send link quality command
458 * @init: This command is sent as part of station initialization right 481 * @init: This command is sent as part of station initialization right
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index ff13458efc27..dc02cb9792af 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -273,9 +273,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
273 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, 273 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
274 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, 274 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
275 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, 275 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)}, 276 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
277 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)}, 277 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
278 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)}, 278 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
279 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, 279 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
280 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, 280 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
281 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, 281 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
@@ -325,15 +325,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
325 int ret; 325 int ret;
326 326
327 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); 327 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
328 if (iwl_trans == NULL) 328 if (IS_ERR(iwl_trans))
329 return -ENOMEM; 329 return PTR_ERR(iwl_trans);
330 330
331 pci_set_drvdata(pdev, iwl_trans); 331 pci_set_drvdata(pdev, iwl_trans);
332 332
333 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); 333 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
334 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); 334 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
335 335
336 if (IS_ERR_OR_NULL(trans_pcie->drv)) { 336 if (IS_ERR(trans_pcie->drv)) {
337 ret = PTR_ERR(trans_pcie->drv); 337 ret = PTR_ERR(trans_pcie->drv);
338 goto out_free_trans; 338 goto out_free_trans;
339 } 339 }
@@ -368,21 +368,19 @@ static void iwl_pci_remove(struct pci_dev *pdev)
368 368
369static int iwl_pci_suspend(struct device *device) 369static int iwl_pci_suspend(struct device *device)
370{ 370{
371 struct pci_dev *pdev = to_pci_dev(device);
372 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
373
374 /* Before you put code here, think about WoWLAN. You cannot check here 371 /* Before you put code here, think about WoWLAN. You cannot check here
375 * whether WoWLAN is enabled or not, and your code will run even if 372 * whether WoWLAN is enabled or not, and your code will run even if
376 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. 373 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
377 */ 374 */
378 375
379 return iwl_trans_suspend(iwl_trans); 376 return 0;
380} 377}
381 378
382static int iwl_pci_resume(struct device *device) 379static int iwl_pci_resume(struct device *device)
383{ 380{
384 struct pci_dev *pdev = to_pci_dev(device); 381 struct pci_dev *pdev = to_pci_dev(device);
385 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); 382 struct iwl_trans *trans = pci_get_drvdata(pdev);
383 bool hw_rfkill;
386 384
387 /* Before you put code here, think about WoWLAN. You cannot check here 385 /* Before you put code here, think about WoWLAN. You cannot check here
388 * whether WoWLAN is enabled or not, and your code will run even if 386 * whether WoWLAN is enabled or not, and your code will run even if
@@ -395,7 +393,15 @@ static int iwl_pci_resume(struct device *device)
395 */ 393 */
396 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 394 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
397 395
398 return iwl_trans_resume(iwl_trans); 396 if (!trans->op_mode)
397 return 0;
398
399 iwl_enable_rfkill_int(trans);
400
401 hw_rfkill = iwl_is_rfkill_set(trans);
402 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
403
404 return 0;
399} 405}
400 406
401static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); 407static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index b654dcdd048a..fa22639b63c9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -392,7 +392,6 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
392/***************************************************** 392/*****************************************************
393* Error handling 393* Error handling
394******************************************************/ 394******************************************************/
395int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
396void iwl_pcie_dump_csr(struct iwl_trans *trans); 395void iwl_pcie_dump_csr(struct iwl_trans *trans);
397 396
398/***************************************************** 397/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fd848cd1583e..3f237b42eb36 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -112,15 +112,16 @@
112 */ 112 */
113static int iwl_rxq_space(const struct iwl_rxq *rxq) 113static int iwl_rxq_space(const struct iwl_rxq *rxq)
114{ 114{
115 int s = rxq->read - rxq->write; 115 /* Make sure RX_QUEUE_SIZE is a power of 2 */
116 116 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
117 if (s <= 0) 117
118 s += RX_QUEUE_SIZE; 118 /*
119 /* keep some buffer to not confuse full and empty queue */ 119 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
120 s -= 2; 120 * between empty and completely full queues.
121 if (s < 0) 121 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
122 s = 0; 122 * defined for negative dividends.
123 return s; 123 */
124 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
124} 125}
125 126
126/* 127/*
@@ -793,7 +794,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
793 } 794 }
794 795
795 iwl_pcie_dump_csr(trans); 796 iwl_pcie_dump_csr(trans);
796 iwl_pcie_dump_fh(trans, NULL); 797 iwl_dump_fh(trans, NULL);
797 798
798 set_bit(STATUS_FW_ERROR, &trans_pcie->status); 799 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
799 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 800 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
@@ -1120,6 +1121,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
1120 struct iwl_trans *trans = data; 1121 struct iwl_trans *trans = data;
1121 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1122 u32 inta, inta_mask; 1123 u32 inta, inta_mask;
1124 irqreturn_t ret = IRQ_NONE;
1123 1125
1124 lockdep_assert_held(&trans_pcie->irq_lock); 1126 lockdep_assert_held(&trans_pcie->irq_lock);
1125 1127
@@ -1168,10 +1170,8 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
1168 /* the thread will service interrupts and re-enable them */ 1170 /* the thread will service interrupts and re-enable them */
1169 if (likely(inta)) 1171 if (likely(inta))
1170 return IRQ_WAKE_THREAD; 1172 return IRQ_WAKE_THREAD;
1171 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 1173
1172 !trans_pcie->inta) 1174 ret = IRQ_HANDLED;
1173 iwl_enable_interrupts(trans);
1174 return IRQ_HANDLED;
1175 1175
1176none: 1176none:
1177 /* re-enable interrupts here since we don't have anything to service. */ 1177 /* re-enable interrupts here since we don't have anything to service. */
@@ -1180,7 +1180,7 @@ none:
1180 !trans_pcie->inta) 1180 !trans_pcie->inta)
1181 iwl_enable_interrupts(trans); 1181 iwl_enable_interrupts(trans);
1182 1182
1183 return IRQ_NONE; 1183 return ret;
1184} 1184}
1185 1185
1186/* interrupt handler using ict table, with this interrupt driver will 1186/* interrupt handler using ict table, with this interrupt driver will
@@ -1199,6 +1199,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1199 u32 val = 0; 1199 u32 val = 0;
1200 u32 read; 1200 u32 read;
1201 unsigned long flags; 1201 unsigned long flags;
1202 irqreturn_t ret = IRQ_NONE;
1202 1203
1203 if (!trans) 1204 if (!trans)
1204 return IRQ_NONE; 1205 return IRQ_NONE;
@@ -1211,7 +1212,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1211 * use legacy interrupt. 1212 * use legacy interrupt.
1212 */ 1213 */
1213 if (unlikely(!trans_pcie->use_ict)) { 1214 if (unlikely(!trans_pcie->use_ict)) {
1214 irqreturn_t ret = iwl_pcie_isr(irq, data); 1215 ret = iwl_pcie_isr(irq, data);
1215 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1216 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1216 return ret; 1217 return ret;
1217 } 1218 }
@@ -1280,17 +1281,9 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1280 if (likely(inta)) { 1281 if (likely(inta)) {
1281 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1282 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1282 return IRQ_WAKE_THREAD; 1283 return IRQ_WAKE_THREAD;
1283 } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1284 !trans_pcie->inta) {
1285 /* Allow interrupt if was disabled by this handler and
1286 * no tasklet was schedules, We should not enable interrupt,
1287 * tasklet will enable it.
1288 */
1289 iwl_enable_interrupts(trans);
1290 } 1284 }
1291 1285
1292 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1286 ret = IRQ_HANDLED;
1293 return IRQ_HANDLED;
1294 1287
1295 none: 1288 none:
1296 /* re-enable interrupts here since we don't have anything to service. 1289 /* re-enable interrupts here since we don't have anything to service.
@@ -1301,5 +1294,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1301 iwl_enable_interrupts(trans); 1294 iwl_enable_interrupts(trans);
1302 1295
1303 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1296 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1304 return IRQ_NONE; 1297 return ret;
1305} 1298}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 390e2f058aff..bad95d28d50d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -820,25 +820,6 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
820 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); 820 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
821} 821}
822 822
823#ifdef CONFIG_PM_SLEEP
824static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
825{
826 return 0;
827}
828
829static int iwl_trans_pcie_resume(struct iwl_trans *trans)
830{
831 bool hw_rfkill;
832
833 iwl_enable_rfkill_int(trans);
834
835 hw_rfkill = iwl_is_rfkill_set(trans);
836 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
837
838 return 0;
839}
840#endif /* CONFIG_PM_SLEEP */
841
842static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, 823static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
843 unsigned long *flags) 824 unsigned long *flags)
844{ 825{
@@ -1038,71 +1019,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1038 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1019 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1039} 1020}
1040 1021
1041static const char *get_fh_string(int cmd)
1042{
1043#define IWL_CMD(x) case x: return #x
1044 switch (cmd) {
1045 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1046 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1047 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1048 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1049 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1050 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1051 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1052 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1053 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1054 default:
1055 return "UNKNOWN";
1056 }
1057#undef IWL_CMD
1058}
1059
1060int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
1061{
1062 int i;
1063 static const u32 fh_tbl[] = {
1064 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1065 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1066 FH_RSCSR_CHNL0_WPTR,
1067 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1068 FH_MEM_RSSR_SHARED_CTRL_REG,
1069 FH_MEM_RSSR_RX_STATUS_REG,
1070 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1071 FH_TSSR_TX_STATUS_REG,
1072 FH_TSSR_TX_ERROR_REG
1073 };
1074
1075#ifdef CONFIG_IWLWIFI_DEBUGFS
1076 if (buf) {
1077 int pos = 0;
1078 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1079
1080 *buf = kmalloc(bufsz, GFP_KERNEL);
1081 if (!*buf)
1082 return -ENOMEM;
1083
1084 pos += scnprintf(*buf + pos, bufsz - pos,
1085 "FH register values:\n");
1086
1087 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1088 pos += scnprintf(*buf + pos, bufsz - pos,
1089 " %34s: 0X%08x\n",
1090 get_fh_string(fh_tbl[i]),
1091 iwl_read_direct32(trans, fh_tbl[i]));
1092
1093 return pos;
1094 }
1095#endif
1096
1097 IWL_ERR(trans, "FH register values:\n");
1098 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1099 IWL_ERR(trans, " %34s: 0X%08x\n",
1100 get_fh_string(fh_tbl[i]),
1101 iwl_read_direct32(trans, fh_tbl[i]));
1102
1103 return 0;
1104}
1105
1106static const char *get_csr_string(int cmd) 1022static const char *get_csr_string(int cmd)
1107{ 1023{
1108#define IWL_CMD(x) case x: return #x 1024#define IWL_CMD(x) case x: return #x
@@ -1183,18 +1099,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
1183} while (0) 1099} while (0)
1184 1100
1185/* file operation */ 1101/* file operation */
1186#define DEBUGFS_READ_FUNC(name) \
1187static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1188 char __user *user_buf, \
1189 size_t count, loff_t *ppos);
1190
1191#define DEBUGFS_WRITE_FUNC(name) \
1192static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1193 const char __user *user_buf, \
1194 size_t count, loff_t *ppos);
1195
1196#define DEBUGFS_READ_FILE_OPS(name) \ 1102#define DEBUGFS_READ_FILE_OPS(name) \
1197 DEBUGFS_READ_FUNC(name); \
1198static const struct file_operations iwl_dbgfs_##name##_ops = { \ 1103static const struct file_operations iwl_dbgfs_##name##_ops = { \
1199 .read = iwl_dbgfs_##name##_read, \ 1104 .read = iwl_dbgfs_##name##_read, \
1200 .open = simple_open, \ 1105 .open = simple_open, \
@@ -1202,7 +1107,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
1202}; 1107};
1203 1108
1204#define DEBUGFS_WRITE_FILE_OPS(name) \ 1109#define DEBUGFS_WRITE_FILE_OPS(name) \
1205 DEBUGFS_WRITE_FUNC(name); \
1206static const struct file_operations iwl_dbgfs_##name##_ops = { \ 1110static const struct file_operations iwl_dbgfs_##name##_ops = { \
1207 .write = iwl_dbgfs_##name##_write, \ 1111 .write = iwl_dbgfs_##name##_write, \
1208 .open = simple_open, \ 1112 .open = simple_open, \
@@ -1210,8 +1114,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
1210}; 1114};
1211 1115
1212#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 1116#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1213 DEBUGFS_READ_FUNC(name); \
1214 DEBUGFS_WRITE_FUNC(name); \
1215static const struct file_operations iwl_dbgfs_##name##_ops = { \ 1117static const struct file_operations iwl_dbgfs_##name##_ops = { \
1216 .write = iwl_dbgfs_##name##_write, \ 1118 .write = iwl_dbgfs_##name##_write, \
1217 .read = iwl_dbgfs_##name##_read, \ 1119 .read = iwl_dbgfs_##name##_read, \
@@ -1395,7 +1297,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1395 int pos = 0; 1297 int pos = 0;
1396 ssize_t ret = -EFAULT; 1298 ssize_t ret = -EFAULT;
1397 1299
1398 ret = pos = iwl_pcie_dump_fh(trans, &buf); 1300 ret = pos = iwl_dump_fh(trans, &buf);
1399 if (buf) { 1301 if (buf) {
1400 ret = simple_read_from_buffer(user_buf, 1302 ret = simple_read_from_buffer(user_buf,
1401 count, ppos, buf, pos); 1303 count, ppos, buf, pos);
@@ -1459,10 +1361,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
1459 1361
1460 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, 1362 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
1461 1363
1462#ifdef CONFIG_PM_SLEEP
1463 .suspend = iwl_trans_pcie_suspend,
1464 .resume = iwl_trans_pcie_resume,
1465#endif
1466 .write8 = iwl_trans_pcie_write8, 1364 .write8 = iwl_trans_pcie_write8,
1467 .write32 = iwl_trans_pcie_write32, 1365 .write32 = iwl_trans_pcie_write32,
1468 .read32 = iwl_trans_pcie_read32, 1366 .read32 = iwl_trans_pcie_read32,
@@ -1488,9 +1386,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1488 1386
1489 trans = kzalloc(sizeof(struct iwl_trans) + 1387 trans = kzalloc(sizeof(struct iwl_trans) +
1490 sizeof(struct iwl_trans_pcie), GFP_KERNEL); 1388 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
1491 1389 if (!trans) {
1492 if (!trans) 1390 err = -ENOMEM;
1493 return NULL; 1391 goto out;
1392 }
1494 1393
1495 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1394 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1496 1395
@@ -1502,15 +1401,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1502 spin_lock_init(&trans_pcie->reg_lock); 1401 spin_lock_init(&trans_pcie->reg_lock);
1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1504 1403
1505 if (pci_enable_device(pdev)) { 1404 if (!cfg->base_params->pcie_l1_allowed) {
1506 err = -ENODEV; 1405 /*
1507 goto out_no_pci; 1406 * W/A - seems to solve weird behavior. We need to remove this
1407 * if we don't want to stay in L1 all the time. This wastes a
1408 * lot of power.
1409 */
1410 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
1411 PCIE_LINK_STATE_L1 |
1412 PCIE_LINK_STATE_CLKPM);
1508 } 1413 }
1509 1414
1510 /* W/A - seems to solve weird behavior. We need to remove this if we 1415 err = pci_enable_device(pdev);
1511 * don't want to stay in L1 all the time. This wastes a lot of power */ 1416 if (err)
1512 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 1417 goto out_no_pci;
1513 PCIE_LINK_STATE_CLKPM);
1514 1418
1515 pci_set_master(pdev); 1419 pci_set_master(pdev);
1516 1420
@@ -1579,17 +1483,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1579 SLAB_HWCACHE_ALIGN, 1483 SLAB_HWCACHE_ALIGN,
1580 NULL); 1484 NULL);
1581 1485
1582 if (!trans->dev_cmd_pool) 1486 if (!trans->dev_cmd_pool) {
1487 err = -ENOMEM;
1583 goto out_pci_disable_msi; 1488 goto out_pci_disable_msi;
1489 }
1584 1490
1585 trans_pcie->inta_mask = CSR_INI_SET_MASK; 1491 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1586 1492
1587 if (iwl_pcie_alloc_ict(trans)) 1493 if (iwl_pcie_alloc_ict(trans))
1588 goto out_free_cmd_pool; 1494 goto out_free_cmd_pool;
1589 1495
1590 if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, 1496 err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
1591 iwl_pcie_irq_handler, 1497 iwl_pcie_irq_handler,
1592 IRQF_SHARED, DRV_NAME, trans)) { 1498 IRQF_SHARED, DRV_NAME, trans);
1499 if (err) {
1593 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 1500 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
1594 goto out_free_ict; 1501 goto out_free_ict;
1595 } 1502 }
@@ -1608,5 +1515,6 @@ out_pci_disable_device:
1608 pci_disable_device(pdev); 1515 pci_disable_device(pdev);
1609out_no_pci: 1516out_no_pci:
1610 kfree(trans); 1517 kfree(trans);
1611 return NULL; 1518out:
1519 return ERR_PTR(err);
1612} 1520}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c47c92165aba..f45eb29c2ede 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -65,18 +65,30 @@
65 ***************************************************/ 65 ***************************************************/
66static int iwl_queue_space(const struct iwl_queue *q) 66static int iwl_queue_space(const struct iwl_queue *q)
67{ 67{
68 int s = q->read_ptr - q->write_ptr; 68 unsigned int max;
69 69 unsigned int used;
70 if (q->read_ptr > q->write_ptr) 70
71 s -= q->n_bd; 71 /*
72 72 * To avoid ambiguity between empty and completely full queues, there
73 if (s <= 0) 73 * should always be less than q->n_bd elements in the queue.
74 s += q->n_window; 74 * If q->n_window is smaller than q->n_bd, there is no need to reserve
75 /* keep some reserve to not confuse empty and full situations */ 75 * any queue entries for this purpose.
76 s -= 2; 76 */
77 if (s < 0) 77 if (q->n_window < q->n_bd)
78 s = 0; 78 max = q->n_window;
79 return s; 79 else
80 max = q->n_bd - 1;
81
82 /*
83 * q->n_bd is a power of 2, so the following is equivalent to modulo by
84 * q->n_bd and is well defined for negative dividends.
85 */
86 used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
87
88 if (WARN_ON(used > max))
89 return 0;
90
91 return max - used;
80} 92}
81 93
82/* 94/*
@@ -451,13 +463,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
451 return -EINVAL; 463 return -EINVAL;
452 } 464 }
453 465
454 if (WARN_ON(addr & ~DMA_BIT_MASK(36))) 466 if (WARN(addr & ~IWL_TX_DMA_MASK,
467 "Unaligned address = %llx\n", (unsigned long long)addr))
455 return -EINVAL; 468 return -EINVAL;
456 469
457 if (unlikely(addr & ~IWL_TX_DMA_MASK))
458 IWL_ERR(trans, "Unaligned address = %llx\n",
459 (unsigned long long)addr);
460
461 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); 470 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
462 471
463 return 0; 472 return 0;
@@ -829,7 +838,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
829 sizeof(struct iwl_txq), GFP_KERNEL); 838 sizeof(struct iwl_txq), GFP_KERNEL);
830 if (!trans_pcie->txq) { 839 if (!trans_pcie->txq) {
831 IWL_ERR(trans, "Not enough memory for txq\n"); 840 IWL_ERR(trans, "Not enough memory for txq\n");
832 ret = ENOMEM; 841 ret = -ENOMEM;
833 goto error; 842 goto error;
834 } 843 }
835 844
@@ -1153,10 +1162,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1153/* 1162/*
1154 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1163 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1155 * @priv: device private data point 1164 * @priv: device private data point
1156 * @cmd: a point to the ucode command structure 1165 * @cmd: a pointer to the ucode command structure
1157 * 1166 *
1158 * The function returns < 0 values to indicate the operation is 1167 * The function returns < 0 values to indicate the operation
1159 * failed. On success, it turns the index (> 0) of command in the 1168 * failed. On success, it returns the index (>= 0) of command in the
1160 * command queue. 1169 * command queue.
1161 */ 1170 */
1162static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1171static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@@ -1619,10 +1628,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1619 txq = &trans_pcie->txq[txq_id]; 1628 txq = &trans_pcie->txq[txq_id];
1620 q = &txq->q; 1629 q = &txq->q;
1621 1630
1622 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { 1631 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
1623 WARN_ON_ONCE(1); 1632 "TX on unused queue %d\n", txq_id))
1624 return -EINVAL; 1633 return -EINVAL;
1625 }
1626 1634
1627 spin_lock(&txq->lock); 1635 spin_lock(&txq->lock);
1628 1636
@@ -1632,7 +1640,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1632 * Check here that the packets are in the right place on the ring. 1640 * Check here that the packets are in the right place on the ring.
1633 */ 1641 */
1634 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1642 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1635 WARN_ONCE(trans_pcie->txq[txq_id].ampdu && 1643 WARN_ONCE(txq->ampdu &&
1636 (wifi_seq & 0xff) != q->write_ptr, 1644 (wifi_seq & 0xff) != q->write_ptr,
1637 "Q: %d WiFi Seq %d tfdNum %d", 1645 "Q: %d WiFi Seq %d tfdNum %d",
1638 txq_id, wifi_seq, q->write_ptr); 1646 txq_id, wifi_seq, q->write_ptr);
@@ -1664,7 +1672,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1664 */ 1672 */
1665 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1673 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1666 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; 1674 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1667 tb1_len = (len + 3) & ~3; 1675 tb1_len = ALIGN(len, 4);
1668 1676
1669 /* Tell NIC about any 2-byte padding after MAC header */ 1677 /* Tell NIC about any 2-byte padding after MAC header */
1670 if (tb1_len != len) 1678 if (tb1_len != len)
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index efae07e05c80..6fef746345bc 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1017,7 +1017,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
1017 1017
1018 mesh_dev->netdev_ops = &mesh_netdev_ops; 1018 mesh_dev->netdev_ops = &mesh_netdev_ops;
1019 mesh_dev->ethtool_ops = &lbs_ethtool_ops; 1019 mesh_dev->ethtool_ops = &lbs_ethtool_ops;
1020 memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN); 1020 eth_hw_addr_inherit(mesh_dev, priv->dev);
1021 1021
1022 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); 1022 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
1023 1023
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cb34c7895f2a..2cd3f54e1efa 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -867,7 +867,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
867 867
868 if (WARN_ON(skb->len < 10)) { 868 if (WARN_ON(skb->len < 10)) {
869 /* Should not happen; just a sanity check for addr1 use */ 869 /* Should not happen; just a sanity check for addr1 use */
870 dev_kfree_skb(skb); 870 ieee80211_free_txskb(hw, skb);
871 return; 871 return;
872 } 872 }
873 873
@@ -884,13 +884,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
884 } 884 }
885 885
886 if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { 886 if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
887 dev_kfree_skb(skb); 887 ieee80211_free_txskb(hw, skb);
888 return; 888 return;
889 } 889 }
890 890
891 if (data->idle && !data->tmp_chan) { 891 if (data->idle && !data->tmp_chan) {
892 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); 892 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
893 dev_kfree_skb(skb); 893 ieee80211_free_txskb(hw, skb);
894 return; 894 return;
895 } 895 }
896 896
@@ -1364,6 +1364,7 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
1364static int hwsim_fops_ps_write(void *dat, u64 val); 1364static int hwsim_fops_ps_write(void *dat, u64 val);
1365 1365
1366static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, 1366static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
1367 struct ieee80211_vif *vif,
1367 void *data, int len) 1368 void *data, int len)
1368{ 1369{
1369 struct mac80211_hwsim_data *hwsim = hw->priv; 1370 struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -2309,7 +2310,9 @@ static int __init init_mac80211_hwsim(void)
2309 hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE; 2310 hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
2310 2311
2311 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 2312 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
2312 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 2313 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
2314 WIPHY_FLAG_AP_UAPSD;
2315 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
2313 2316
2314 /* ask mac80211 to reserve space for magic */ 2317 /* ask mac80211 to reserve space for magic */
2315 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 2318 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -2525,8 +2528,10 @@ static int __init init_mac80211_hwsim(void)
2525 } 2528 }
2526 2529
2527 hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup); 2530 hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
2528 if (hwsim_mon == NULL) 2531 if (hwsim_mon == NULL) {
2532 err = -ENOMEM;
2529 goto failed; 2533 goto failed;
2534 }
2530 2535
2531 rtnl_lock(); 2536 rtnl_lock();
2532 2537
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 41e9d25a2d8e..0b803c05cab3 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -292,6 +292,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
292 struct mwifiex_ie_types_extcap *ext_cap; 292 struct mwifiex_ie_types_extcap *ext_cap;
293 int ret_len = 0; 293 int ret_len = 0;
294 struct ieee80211_supported_band *sband; 294 struct ieee80211_supported_band *sband;
295 struct ieee_types_header *hdr;
295 u8 radio_type; 296 u8 radio_type;
296 297
297 if (!buffer || !*buffer) 298 if (!buffer || !*buffer)
@@ -388,17 +389,24 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
388 } 389 }
389 390
390 if (bss_desc->bcn_ext_cap) { 391 if (bss_desc->bcn_ext_cap) {
392 hdr = (void *)bss_desc->bcn_ext_cap;
391 ext_cap = (struct mwifiex_ie_types_extcap *) *buffer; 393 ext_cap = (struct mwifiex_ie_types_extcap *) *buffer;
392 memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap)); 394 memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap));
393 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY); 395 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
394 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap)); 396 ext_cap->header.len = cpu_to_le16(hdr->len);
395 397
396 memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header), 398 memcpy((u8 *)ext_cap->ext_capab,
397 bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header), 399 bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
398 le16_to_cpu(ext_cap->header.len)); 400 le16_to_cpu(ext_cap->header.len));
399 401
400 *buffer += sizeof(struct mwifiex_ie_types_extcap); 402 if (hdr->len > 3 &&
401 ret_len += sizeof(struct mwifiex_ie_types_extcap); 403 ext_cap->ext_capab[3] & WLAN_EXT_CAPA4_INTERWORKING_ENABLED)
404 priv->hs2_enabled = true;
405 else
406 priv->hs2_enabled = false;
407
408 *buffer += sizeof(struct mwifiex_ie_types_extcap) + hdr->len;
409 ret_len += sizeof(struct mwifiex_ie_types_extcap) + hdr->len;
402 } 410 }
403 411
404 return ret_len; 412 return ret_len;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index a78e0651409c..21c688264708 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -69,7 +69,8 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
69 memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); 69 memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
70 70
71 /* Copy SNAP header */ 71 /* Copy SNAP header */
72 snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset); 72 snap.snap_type =
73 le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset));
73 dt_offset += sizeof(u16); 74 dt_offset += sizeof(u16);
74 75
75 memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); 76 memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
@@ -189,7 +190,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
189 190
190 skb_src = skb_dequeue(&pra_list->skb_head); 191 skb_src = skb_dequeue(&pra_list->skb_head);
191 192
192 pra_list->total_pkts_size -= skb_src->len; 193 pra_list->total_pkt_count--;
193 194
194 atomic_dec(&priv->wmm.tx_pkts_queued); 195 atomic_dec(&priv->wmm.tx_pkts_queued);
195 196
@@ -268,7 +269,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
268 269
269 skb_queue_tail(&pra_list->skb_head, skb_aggr); 270 skb_queue_tail(&pra_list->skb_head, skb_aggr);
270 271
271 pra_list->total_pkts_size += skb_aggr->len; 272 pra_list->total_pkt_count++;
272 273
273 atomic_inc(&priv->wmm.tx_pkts_queued); 274 atomic_inc(&priv->wmm.tx_pkts_queued);
274 275
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 89459db4c53b..fbad00a5abc8 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -25,7 +25,9 @@ module_param(reg_alpha2, charp, 0);
25 25
26static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { 26static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
27 { 27 {
28 .max = 2, .types = BIT(NL80211_IFTYPE_STATION), 28 .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
29 BIT(NL80211_IFTYPE_P2P_GO) |
30 BIT(NL80211_IFTYPE_P2P_CLIENT),
29 }, 31 },
30 { 32 {
31 .max = 1, .types = BIT(NL80211_IFTYPE_AP), 33 .max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -189,6 +191,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
189 struct sk_buff *skb; 191 struct sk_buff *skb;
190 u16 pkt_len; 192 u16 pkt_len;
191 const struct ieee80211_mgmt *mgmt; 193 const struct ieee80211_mgmt *mgmt;
194 struct mwifiex_txinfo *tx_info;
192 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); 195 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
193 196
194 if (!buf || !len) { 197 if (!buf || !len) {
@@ -216,6 +219,10 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
216 return -ENOMEM; 219 return -ENOMEM;
217 } 220 }
218 221
222 tx_info = MWIFIEX_SKB_TXCB(skb);
223 tx_info->bss_num = priv->bss_num;
224 tx_info->bss_type = priv->bss_type;
225
219 mwifiex_form_mgmt_frame(skb, buf, len); 226 mwifiex_form_mgmt_frame(skb, buf, len);
220 mwifiex_queue_tx_pkt(priv, skb); 227 mwifiex_queue_tx_pkt(priv, skb);
221 228
@@ -235,16 +242,20 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
235 u16 frame_type, bool reg) 242 u16 frame_type, bool reg)
236{ 243{
237 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); 244 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
245 u32 mask;
238 246
239 if (reg) 247 if (reg)
240 priv->mgmt_frame_mask |= BIT(frame_type >> 4); 248 mask = priv->mgmt_frame_mask | BIT(frame_type >> 4);
241 else 249 else
242 priv->mgmt_frame_mask &= ~BIT(frame_type >> 4); 250 mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4);
243
244 mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
245 HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
246 251
247 wiphy_dbg(wiphy, "info: mgmt frame registered\n"); 252 if (mask != priv->mgmt_frame_mask) {
253 priv->mgmt_frame_mask = mask;
254 mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
255 HostCmd_ACT_GEN_SET, 0,
256 &priv->mgmt_frame_mask);
257 wiphy_dbg(wiphy, "info: mgmt frame registered\n");
258 }
248} 259}
249 260
250/* 261/*
@@ -1497,6 +1508,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
1497 " reason code %d\n", priv->cfg_bssid, reason_code); 1508 " reason code %d\n", priv->cfg_bssid, reason_code);
1498 1509
1499 memset(priv->cfg_bssid, 0, ETH_ALEN); 1510 memset(priv->cfg_bssid, 0, ETH_ALEN);
1511 priv->hs2_enabled = false;
1500 1512
1501 return 0; 1513 return 0;
1502} 1514}
@@ -2296,10 +2308,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2296} 2308}
2297EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); 2309EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
2298 2310
2299#ifdef CONFIG_PM
2300static bool 2311static bool
2301mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat, 2312mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
2302 s8 *byte_seq) 2313 u8 max_byte_seq)
2303{ 2314{
2304 int j, k, valid_byte_cnt = 0; 2315 int j, k, valid_byte_cnt = 0;
2305 bool dont_care_byte = false; 2316 bool dont_care_byte = false;
@@ -2317,16 +2328,17 @@ mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
2317 dont_care_byte = true; 2328 dont_care_byte = true;
2318 } 2329 }
2319 2330
2320 if (valid_byte_cnt > MAX_BYTESEQ) 2331 if (valid_byte_cnt > max_byte_seq)
2321 return false; 2332 return false;
2322 } 2333 }
2323 } 2334 }
2324 2335
2325 byte_seq[MAX_BYTESEQ] = valid_byte_cnt; 2336 byte_seq[max_byte_seq] = valid_byte_cnt;
2326 2337
2327 return true; 2338 return true;
2328} 2339}
2329 2340
2341#ifdef CONFIG_PM
2330static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, 2342static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2331 struct cfg80211_wowlan *wowlan) 2343 struct cfg80211_wowlan *wowlan)
2332{ 2344{
@@ -2335,7 +2347,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2335 struct mwifiex_mef_entry *mef_entry; 2347 struct mwifiex_mef_entry *mef_entry;
2336 int i, filt_num = 0, ret; 2348 int i, filt_num = 0, ret;
2337 bool first_pat = true; 2349 bool first_pat = true;
2338 u8 byte_seq[MAX_BYTESEQ + 1]; 2350 u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
2339 const u8 ipv4_mc_mac[] = {0x33, 0x33}; 2351 const u8 ipv4_mc_mac[] = {0x33, 0x33};
2340 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e}; 2352 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
2341 struct mwifiex_private *priv = 2353 struct mwifiex_private *priv =
@@ -2365,7 +2377,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2365 for (i = 0; i < wowlan->n_patterns; i++) { 2377 for (i = 0; i < wowlan->n_patterns; i++) {
2366 memset(byte_seq, 0, sizeof(byte_seq)); 2378 memset(byte_seq, 0, sizeof(byte_seq));
2367 if (!mwifiex_is_pattern_supported(&wowlan->patterns[i], 2379 if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
2368 byte_seq)) { 2380 byte_seq,
2381 MWIFIEX_MEF_MAX_BYTESEQ)) {
2369 wiphy_err(wiphy, "Pattern not supported\n"); 2382 wiphy_err(wiphy, "Pattern not supported\n");
2370 kfree(mef_entry); 2383 kfree(mef_entry);
2371 return -EOPNOTSUPP; 2384 return -EOPNOTSUPP;
@@ -2373,16 +2386,16 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2373 2386
2374 if (!wowlan->patterns[i].pkt_offset) { 2387 if (!wowlan->patterns[i].pkt_offset) {
2375 if (!(byte_seq[0] & 0x01) && 2388 if (!(byte_seq[0] & 0x01) &&
2376 (byte_seq[MAX_BYTESEQ] == 1)) { 2389 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
2377 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; 2390 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
2378 continue; 2391 continue;
2379 } else if (is_broadcast_ether_addr(byte_seq)) { 2392 } else if (is_broadcast_ether_addr(byte_seq)) {
2380 mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST; 2393 mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
2381 continue; 2394 continue;
2382 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) && 2395 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
2383 (byte_seq[MAX_BYTESEQ] == 2)) || 2396 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
2384 (!memcmp(byte_seq, ipv6_mc_mac, 3) && 2397 (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
2385 (byte_seq[MAX_BYTESEQ] == 3))) { 2398 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
2386 mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST; 2399 mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
2387 continue; 2400 continue;
2388 } 2401 }
@@ -2408,7 +2421,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2408 mef_entry->filter[filt_num].repeat = 16; 2421 mef_entry->filter[filt_num].repeat = 16;
2409 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr, 2422 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
2410 ETH_ALEN); 2423 ETH_ALEN);
2411 mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN; 2424 mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
2425 ETH_ALEN;
2412 mef_entry->filter[filt_num].offset = 14; 2426 mef_entry->filter[filt_num].offset = 14;
2413 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2427 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2414 if (filt_num) 2428 if (filt_num)
@@ -2442,6 +2456,119 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
2442} 2456}
2443#endif 2457#endif
2444 2458
2459static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
2460{
2461 const u8 ipv4_mc_mac[] = {0x33, 0x33};
2462 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
2463 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff};
2464
2465 if ((byte_seq[0] & 0x01) &&
2466 (byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 1))
2467 return PACKET_TYPE_UNICAST;
2468 else if (!memcmp(byte_seq, bc_mac, 4))
2469 return PACKET_TYPE_BROADCAST;
2470 else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
2471 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 2) ||
2472 (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
2473 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 3))
2474 return PACKET_TYPE_MULTICAST;
2475
2476 return 0;
2477}
2478
2479static int
2480mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
2481 struct cfg80211_coalesce_rules *crule,
2482 struct mwifiex_coalesce_rule *mrule)
2483{
2484 u8 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ + 1];
2485 struct filt_field_param *param;
2486 int i;
2487
2488 mrule->max_coalescing_delay = crule->delay;
2489
2490 param = mrule->params;
2491
2492 for (i = 0; i < crule->n_patterns; i++) {
2493 memset(byte_seq, 0, sizeof(byte_seq));
2494 if (!mwifiex_is_pattern_supported(&crule->patterns[i],
2495 byte_seq,
2496 MWIFIEX_COALESCE_MAX_BYTESEQ)) {
2497 dev_err(priv->adapter->dev, "Pattern not supported\n");
2498 return -EOPNOTSUPP;
2499 }
2500
2501 if (!crule->patterns[i].pkt_offset) {
2502 u8 pkt_type;
2503
2504 pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
2505 if (pkt_type && mrule->pkt_type) {
2506 dev_err(priv->adapter->dev,
2507 "Multiple packet types not allowed\n");
2508 return -EOPNOTSUPP;
2509 } else if (pkt_type) {
2510 mrule->pkt_type = pkt_type;
2511 continue;
2512 }
2513 }
2514
2515 if (crule->condition == NL80211_COALESCE_CONDITION_MATCH)
2516 param->operation = RECV_FILTER_MATCH_TYPE_EQ;
2517 else
2518 param->operation = RECV_FILTER_MATCH_TYPE_NE;
2519
2520 param->operand_len = byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ];
2521 memcpy(param->operand_byte_stream, byte_seq,
2522 param->operand_len);
2523 param->offset = crule->patterns[i].pkt_offset;
2524 param++;
2525
2526 mrule->num_of_fields++;
2527 }
2528
2529 if (!mrule->pkt_type) {
2530 dev_err(priv->adapter->dev,
2531 "Packet type can not be determined\n");
2532 return -EOPNOTSUPP;
2533 }
2534
2535 return 0;
2536}
2537
2538static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
2539 struct cfg80211_coalesce *coalesce)
2540{
2541 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2542 int i, ret;
2543 struct mwifiex_ds_coalesce_cfg coalesce_cfg;
2544 struct mwifiex_private *priv =
2545 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
2546
2547 memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
2548 if (!coalesce) {
2549 dev_dbg(adapter->dev,
2550 "Disable coalesce and reset all previous rules\n");
2551 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
2552 HostCmd_ACT_GEN_SET, 0,
2553 &coalesce_cfg);
2554 }
2555
2556 coalesce_cfg.num_of_rules = coalesce->n_rules;
2557 for (i = 0; i < coalesce->n_rules; i++) {
2558 ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
2559 &coalesce_cfg.rule[i]);
2560 if (ret) {
2561 dev_err(priv->adapter->dev,
2562 "Recheck the patterns provided for rule %d\n",
2563 i + 1);
2564 return ret;
2565 }
2566 }
2567
2568 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
2569 HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
2570}
2571
2445/* station cfg80211 operations */ 2572/* station cfg80211 operations */
2446static struct cfg80211_ops mwifiex_cfg80211_ops = { 2573static struct cfg80211_ops mwifiex_cfg80211_ops = {
2447 .add_virtual_intf = mwifiex_add_virtual_intf, 2574 .add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2476,12 +2603,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2476 .resume = mwifiex_cfg80211_resume, 2603 .resume = mwifiex_cfg80211_resume,
2477 .set_wakeup = mwifiex_cfg80211_set_wakeup, 2604 .set_wakeup = mwifiex_cfg80211_set_wakeup,
2478#endif 2605#endif
2606 .set_coalesce = mwifiex_cfg80211_set_coalesce,
2479}; 2607};
2480 2608
2481#ifdef CONFIG_PM 2609#ifdef CONFIG_PM
2482static const struct wiphy_wowlan_support mwifiex_wowlan_support = { 2610static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
2483 .flags = WIPHY_WOWLAN_MAGIC_PKT, 2611 .flags = WIPHY_WOWLAN_MAGIC_PKT,
2484 .n_patterns = MWIFIEX_MAX_FILTERS, 2612 .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
2485 .pattern_min_len = 1, 2613 .pattern_min_len = 1,
2486 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, 2614 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
2487 .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, 2615 .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
@@ -2499,6 +2627,15 @@ static bool mwifiex_is_valid_alpha2(const char *alpha2)
2499 return false; 2627 return false;
2500} 2628}
2501 2629
2630static const struct wiphy_coalesce_support mwifiex_coalesce_support = {
2631 .n_rules = MWIFIEX_COALESCE_MAX_RULES,
2632 .max_delay = MWIFIEX_MAX_COALESCING_DELAY,
2633 .n_patterns = MWIFIEX_COALESCE_MAX_FILTERS,
2634 .pattern_min_len = 1,
2635 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
2636 .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
2637};
2638
2502/* 2639/*
2503 * This function registers the device with CFG802.11 subsystem. 2640 * This function registers the device with CFG802.11 subsystem.
2504 * 2641 *
@@ -2560,6 +2697,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2560 wiphy->wowlan = &mwifiex_wowlan_support; 2697 wiphy->wowlan = &mwifiex_wowlan_support;
2561#endif 2698#endif
2562 2699
2700 wiphy->coalesce = &mwifiex_coalesce_support;
2701
2563 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 2702 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
2564 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 2703 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
2565 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 2704 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 5178c4630d89..9eefacbc844b 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -404,11 +404,43 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv)
404 return false; 404 return false;
405} 405}
406 406
407/* 407/* This function gets the supported data rates from bitmask inside
408 * This function gets the supported data rates. 408 * cfg80211_scan_request.
409 * 409 */
410 * The function works in both Ad-Hoc and infra mode by printing the 410u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
411 * band and returning the data rates. 411 u8 *rates, u8 radio_type)
412{
413 struct wiphy *wiphy = priv->adapter->wiphy;
414 struct cfg80211_scan_request *request = priv->scan_request;
415 u32 num_rates, rate_mask;
416 struct ieee80211_supported_band *sband;
417 int i;
418
419 if (radio_type) {
420 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
421 if (WARN_ON_ONCE(!sband))
422 return 0;
423 rate_mask = request->rates[IEEE80211_BAND_5GHZ];
424 } else {
425 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
426 if (WARN_ON_ONCE(!sband))
427 return 0;
428 rate_mask = request->rates[IEEE80211_BAND_2GHZ];
429 }
430
431 num_rates = 0;
432 for (i = 0; i < sband->n_bitrates; i++) {
433 if ((BIT(i) & rate_mask) == 0)
434 continue; /* skip rate */
435 rates[num_rates++] = (u8)(sband->bitrates[i].bitrate / 5);
436 }
437
438 return num_rates;
439}
440
441/* This function gets the supported data rates. The function works in
442 * both Ad-Hoc and infra mode by printing the band and returning the
443 * data rates.
412 */ 444 */
413u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) 445u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
414{ 446{
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 94cc09d48444..5c85d7803d00 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -26,6 +26,7 @@
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29#include <uapi/linux/if_arp.h>
29#include <net/mac80211.h> 30#include <net/mac80211.h>
30 31
31 32
@@ -75,7 +76,8 @@
75#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) 76#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
76#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1) 77#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
77 78
78#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024 79#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
80#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
79 81
80enum mwifiex_bss_type { 82enum mwifiex_bss_type {
81 MWIFIEX_BSS_TYPE_STA = 0, 83 MWIFIEX_BSS_TYPE_STA = 0,
@@ -151,4 +153,12 @@ struct mwifiex_types_wmm_info {
151 u8 reserved; 153 u8 reserved;
152 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS]; 154 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
153} __packed; 155} __packed;
156
157struct mwifiex_arp_eth_header {
158 struct arphdr hdr;
159 u8 ar_sha[ETH_ALEN];
160 u8 ar_sip[4];
161 u8 ar_tha[ETH_ALEN];
162 u8 ar_tip[4];
163} __packed;
154#endif /* !_MWIFIEX_DECL_H_ */ 164#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1b45aa533300..f80f30b6160e 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -85,9 +85,6 @@ enum KEY_TYPE_ID {
85#define WAPI_KEY_LEN 50 85#define WAPI_KEY_LEN 50
86 86
87#define MAX_POLL_TRIES 100 87#define MAX_POLL_TRIES 100
88
89#define MAX_MULTI_INTERFACE_POLL_TRIES 1000
90
91#define MAX_FIRMWARE_POLL_TRIES 100 88#define MAX_FIRMWARE_POLL_TRIES 100
92 89
93#define FIRMWARE_READY_SDIO 0xfedc 90#define FIRMWARE_READY_SDIO 0xfedc
@@ -156,6 +153,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
156#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123) 153#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
157#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) 154#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
158#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) 155#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
156#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
159 157
160#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 158#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
161 159
@@ -297,6 +295,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
297#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed 295#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
298#define HostCmd_CMD_SET_BSS_MODE 0x00f7 296#define HostCmd_CMD_SET_BSS_MODE 0x00f7
299#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa 297#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
298#define HostCmd_CMD_COALESCE_CFG 0x010a
300#define HostCmd_CMD_MGMT_FRAME_REG 0x010c 299#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
301#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 300#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
302#define HostCmd_CMD_11AC_CFG 0x0112 301#define HostCmd_CMD_11AC_CFG 0x0112
@@ -453,7 +452,7 @@ enum P2P_MODES {
453 (((event_cause) >> 24) & 0x00ff) 452 (((event_cause) >> 24) & 0x00ff)
454 453
455#define MWIFIEX_MAX_PATTERN_LEN 20 454#define MWIFIEX_MAX_PATTERN_LEN 20
456#define MWIFIEX_MAX_OFFSET_LEN 50 455#define MWIFIEX_MAX_OFFSET_LEN 100
457#define STACK_NBYTES 100 456#define STACK_NBYTES 100
458#define TYPE_DNUM 1 457#define TYPE_DNUM 1
459#define TYPE_BYTESEQ 2 458#define TYPE_BYTESEQ 2
@@ -1331,7 +1330,7 @@ struct mwifiex_ie_types_2040bssco {
1331 1330
1332struct mwifiex_ie_types_extcap { 1331struct mwifiex_ie_types_extcap {
1333 struct mwifiex_ie_types_header header; 1332 struct mwifiex_ie_types_header header;
1334 u8 ext_cap; 1333 u8 ext_capab[0];
1335} __packed; 1334} __packed;
1336 1335
1337struct host_cmd_ds_mac_reg_access { 1336struct host_cmd_ds_mac_reg_access {
@@ -1369,11 +1368,6 @@ struct host_cmd_ds_802_11_eeprom_access {
1369 u8 value; 1368 u8 value;
1370} __packed; 1369} __packed;
1371 1370
1372struct host_cmd_tlv {
1373 __le16 type;
1374 __le16 len;
1375} __packed;
1376
1377struct mwifiex_assoc_event { 1371struct mwifiex_assoc_event {
1378 u8 sta_addr[ETH_ALEN]; 1372 u8 sta_addr[ETH_ALEN];
1379 __le16 type; 1373 __le16 type;
@@ -1399,99 +1393,99 @@ struct host_cmd_11ac_vht_cfg {
1399} __packed; 1393} __packed;
1400 1394
1401struct host_cmd_tlv_akmp { 1395struct host_cmd_tlv_akmp {
1402 struct host_cmd_tlv tlv; 1396 struct mwifiex_ie_types_header header;
1403 __le16 key_mgmt; 1397 __le16 key_mgmt;
1404 __le16 key_mgmt_operation; 1398 __le16 key_mgmt_operation;
1405} __packed; 1399} __packed;
1406 1400
1407struct host_cmd_tlv_pwk_cipher { 1401struct host_cmd_tlv_pwk_cipher {
1408 struct host_cmd_tlv tlv; 1402 struct mwifiex_ie_types_header header;
1409 __le16 proto; 1403 __le16 proto;
1410 u8 cipher; 1404 u8 cipher;
1411 u8 reserved; 1405 u8 reserved;
1412} __packed; 1406} __packed;
1413 1407
1414struct host_cmd_tlv_gwk_cipher { 1408struct host_cmd_tlv_gwk_cipher {
1415 struct host_cmd_tlv tlv; 1409 struct mwifiex_ie_types_header header;
1416 u8 cipher; 1410 u8 cipher;
1417 u8 reserved; 1411 u8 reserved;
1418} __packed; 1412} __packed;
1419 1413
1420struct host_cmd_tlv_passphrase { 1414struct host_cmd_tlv_passphrase {
1421 struct host_cmd_tlv tlv; 1415 struct mwifiex_ie_types_header header;
1422 u8 passphrase[0]; 1416 u8 passphrase[0];
1423} __packed; 1417} __packed;
1424 1418
1425struct host_cmd_tlv_wep_key { 1419struct host_cmd_tlv_wep_key {
1426 struct host_cmd_tlv tlv; 1420 struct mwifiex_ie_types_header header;
1427 u8 key_index; 1421 u8 key_index;
1428 u8 is_default; 1422 u8 is_default;
1429 u8 key[1]; 1423 u8 key[1];
1430}; 1424};
1431 1425
1432struct host_cmd_tlv_auth_type { 1426struct host_cmd_tlv_auth_type {
1433 struct host_cmd_tlv tlv; 1427 struct mwifiex_ie_types_header header;
1434 u8 auth_type; 1428 u8 auth_type;
1435} __packed; 1429} __packed;
1436 1430
1437struct host_cmd_tlv_encrypt_protocol { 1431struct host_cmd_tlv_encrypt_protocol {
1438 struct host_cmd_tlv tlv; 1432 struct mwifiex_ie_types_header header;
1439 __le16 proto; 1433 __le16 proto;
1440} __packed; 1434} __packed;
1441 1435
1442struct host_cmd_tlv_ssid { 1436struct host_cmd_tlv_ssid {
1443 struct host_cmd_tlv tlv; 1437 struct mwifiex_ie_types_header header;
1444 u8 ssid[0]; 1438 u8 ssid[0];
1445} __packed; 1439} __packed;
1446 1440
1447struct host_cmd_tlv_rates { 1441struct host_cmd_tlv_rates {
1448 struct host_cmd_tlv tlv; 1442 struct mwifiex_ie_types_header header;
1449 u8 rates[0]; 1443 u8 rates[0];
1450} __packed; 1444} __packed;
1451 1445
1452struct host_cmd_tlv_bcast_ssid { 1446struct host_cmd_tlv_bcast_ssid {
1453 struct host_cmd_tlv tlv; 1447 struct mwifiex_ie_types_header header;
1454 u8 bcast_ctl; 1448 u8 bcast_ctl;
1455} __packed; 1449} __packed;
1456 1450
1457struct host_cmd_tlv_beacon_period { 1451struct host_cmd_tlv_beacon_period {
1458 struct host_cmd_tlv tlv; 1452 struct mwifiex_ie_types_header header;
1459 __le16 period; 1453 __le16 period;
1460} __packed; 1454} __packed;
1461 1455
1462struct host_cmd_tlv_dtim_period { 1456struct host_cmd_tlv_dtim_period {
1463 struct host_cmd_tlv tlv; 1457 struct mwifiex_ie_types_header header;
1464 u8 period; 1458 u8 period;
1465} __packed; 1459} __packed;
1466 1460
1467struct host_cmd_tlv_frag_threshold { 1461struct host_cmd_tlv_frag_threshold {
1468 struct host_cmd_tlv tlv; 1462 struct mwifiex_ie_types_header header;
1469 __le16 frag_thr; 1463 __le16 frag_thr;
1470} __packed; 1464} __packed;
1471 1465
1472struct host_cmd_tlv_rts_threshold { 1466struct host_cmd_tlv_rts_threshold {
1473 struct host_cmd_tlv tlv; 1467 struct mwifiex_ie_types_header header;
1474 __le16 rts_thr; 1468 __le16 rts_thr;
1475} __packed; 1469} __packed;
1476 1470
1477struct host_cmd_tlv_retry_limit { 1471struct host_cmd_tlv_retry_limit {
1478 struct host_cmd_tlv tlv; 1472 struct mwifiex_ie_types_header header;
1479 u8 limit; 1473 u8 limit;
1480} __packed; 1474} __packed;
1481 1475
1482struct host_cmd_tlv_mac_addr { 1476struct host_cmd_tlv_mac_addr {
1483 struct host_cmd_tlv tlv; 1477 struct mwifiex_ie_types_header header;
1484 u8 mac_addr[ETH_ALEN]; 1478 u8 mac_addr[ETH_ALEN];
1485} __packed; 1479} __packed;
1486 1480
1487struct host_cmd_tlv_channel_band { 1481struct host_cmd_tlv_channel_band {
1488 struct host_cmd_tlv tlv; 1482 struct mwifiex_ie_types_header header;
1489 u8 band_config; 1483 u8 band_config;
1490 u8 channel; 1484 u8 channel;
1491} __packed; 1485} __packed;
1492 1486
1493struct host_cmd_tlv_ageout_timer { 1487struct host_cmd_tlv_ageout_timer {
1494 struct host_cmd_tlv tlv; 1488 struct mwifiex_ie_types_header header;
1495 __le32 sta_ao_timer; 1489 __le32 sta_ao_timer;
1496} __packed; 1490} __packed;
1497 1491
@@ -1604,6 +1598,27 @@ struct host_cmd_ds_802_11_cfg_data {
1604 __le16 data_len; 1598 __le16 data_len;
1605} __packed; 1599} __packed;
1606 1600
1601struct coalesce_filt_field_param {
1602 u8 operation;
1603 u8 operand_len;
1604 __le16 offset;
1605 u8 operand_byte_stream[4];
1606};
1607
1608struct coalesce_receive_filt_rule {
1609 struct mwifiex_ie_types_header header;
1610 u8 num_of_fields;
1611 u8 pkt_type;
1612 __le16 max_coalescing_delay;
1613 struct coalesce_filt_field_param params[0];
1614} __packed;
1615
1616struct host_cmd_ds_coalesce_cfg {
1617 __le16 action;
1618 __le16 num_of_rules;
1619 struct coalesce_receive_filt_rule rule[0];
1620} __packed;
1621
1607struct host_cmd_ds_command { 1622struct host_cmd_ds_command {
1608 __le16 command; 1623 __le16 command;
1609 __le16 size; 1624 __le16 size;
@@ -1664,6 +1679,7 @@ struct host_cmd_ds_command {
1664 struct host_cmd_ds_sta_deauth sta_deauth; 1679 struct host_cmd_ds_sta_deauth sta_deauth;
1665 struct host_cmd_11ac_vht_cfg vht_cfg; 1680 struct host_cmd_11ac_vht_cfg vht_cfg;
1666 struct host_cmd_ds_802_11_cfg_data cfg_data; 1681 struct host_cmd_ds_802_11_cfg_data cfg_data;
1682 struct host_cmd_ds_coalesce_cfg coalesce_cfg;
1667 } params; 1683 } params;
1668} __packed; 1684} __packed;
1669 1685
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index e38342f86c51..220af4fe0fc6 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -87,7 +87,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
87 u8 *tmp; 87 u8 *tmp;
88 88
89 input_len = le16_to_cpu(ie_list->len); 89 input_len = le16_to_cpu(ie_list->len);
90 travel_len = sizeof(struct host_cmd_tlv); 90 travel_len = sizeof(struct mwifiex_ie_types_header);
91 91
92 ie_list->len = 0; 92 ie_list->len = 0;
93 93
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 2cf8b964e966..6499117fce43 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -135,6 +135,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
135 135
136 priv->csa_chan = 0; 136 priv->csa_chan = 0;
137 priv->csa_expire_time = 0; 137 priv->csa_expire_time = 0;
138 priv->del_list_idx = 0;
139 priv->hs2_enabled = false;
138 140
139 return mwifiex_add_bss_prio_tbl(priv); 141 return mwifiex_add_bss_prio_tbl(priv);
140} 142}
@@ -377,18 +379,11 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
377static void 379static void
378mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) 380mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
379{ 381{
380 int i;
381
382 if (!adapter) { 382 if (!adapter) {
383 pr_err("%s: adapter is NULL\n", __func__); 383 pr_err("%s: adapter is NULL\n", __func__);
384 return; 384 return;
385 } 385 }
386 386
387 for (i = 0; i < adapter->priv_num; i++) {
388 if (adapter->priv[i])
389 del_timer_sync(&adapter->priv[i]->scan_delay_timer);
390 }
391
392 mwifiex_cancel_all_pending_cmd(adapter); 387 mwifiex_cancel_all_pending_cmd(adapter);
393 388
394 /* Free lock variables */ 389 /* Free lock variables */
@@ -398,13 +393,8 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
398 dev_dbg(adapter->dev, "info: free cmd buffer\n"); 393 dev_dbg(adapter->dev, "info: free cmd buffer\n");
399 mwifiex_free_cmd_buffer(adapter); 394 mwifiex_free_cmd_buffer(adapter);
400 395
401 del_timer(&adapter->cmd_timer);
402
403 dev_dbg(adapter->dev, "info: free scan table\n"); 396 dev_dbg(adapter->dev, "info: free scan table\n");
404 397
405 if (adapter->if_ops.cleanup_if)
406 adapter->if_ops.cleanup_if(adapter);
407
408 if (adapter->sleep_cfm) 398 if (adapter->sleep_cfm)
409 dev_kfree_skb_any(adapter->sleep_cfm); 399 dev_kfree_skb_any(adapter->sleep_cfm);
410} 400}
@@ -702,7 +692,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
702 if (!adapter->winner) { 692 if (!adapter->winner) {
703 dev_notice(adapter->dev, 693 dev_notice(adapter->dev,
704 "FW already running! Skip FW dnld\n"); 694 "FW already running! Skip FW dnld\n");
705 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
706 goto poll_fw; 695 goto poll_fw;
707 } 696 }
708 } 697 }
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7f27e45680b5..00a95f4c6a6c 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -362,13 +362,13 @@ struct mwifiex_ds_misc_subsc_evt {
362 struct subsc_evt_cfg bcn_h_rssi_cfg; 362 struct subsc_evt_cfg bcn_h_rssi_cfg;
363}; 363};
364 364
365#define MAX_BYTESEQ 6 /* non-adjustable */ 365#define MWIFIEX_MEF_MAX_BYTESEQ 6 /* non-adjustable */
366#define MWIFIEX_MAX_FILTERS 10 366#define MWIFIEX_MEF_MAX_FILTERS 10
367 367
368struct mwifiex_mef_filter { 368struct mwifiex_mef_filter {
369 u16 repeat; 369 u16 repeat;
370 u16 offset; 370 u16 offset;
371 s8 byte_seq[MAX_BYTESEQ + 1]; 371 s8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
372 u8 filt_type; 372 u8 filt_type;
373 u8 filt_action; 373 u8 filt_action;
374}; 374};
@@ -376,7 +376,7 @@ struct mwifiex_mef_filter {
376struct mwifiex_mef_entry { 376struct mwifiex_mef_entry {
377 u8 mode; 377 u8 mode;
378 u8 action; 378 u8 action;
379 struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS]; 379 struct mwifiex_mef_filter filter[MWIFIEX_MEF_MAX_FILTERS];
380}; 380};
381 381
382struct mwifiex_ds_mef_cfg { 382struct mwifiex_ds_mef_cfg {
@@ -397,4 +397,39 @@ enum {
397 MWIFIEX_FUNC_SHUTDOWN, 397 MWIFIEX_FUNC_SHUTDOWN,
398}; 398};
399 399
400enum COALESCE_OPERATION {
401 RECV_FILTER_MATCH_TYPE_EQ = 0x80,
402 RECV_FILTER_MATCH_TYPE_NE,
403};
404
405enum COALESCE_PACKET_TYPE {
406 PACKET_TYPE_UNICAST = 1,
407 PACKET_TYPE_MULTICAST = 2,
408 PACKET_TYPE_BROADCAST = 3
409};
410
411#define MWIFIEX_COALESCE_MAX_RULES 8
412#define MWIFIEX_COALESCE_MAX_BYTESEQ 4 /* non-adjustable */
413#define MWIFIEX_COALESCE_MAX_FILTERS 4
414#define MWIFIEX_MAX_COALESCING_DELAY 100 /* in msecs */
415
416struct filt_field_param {
417 u8 operation;
418 u8 operand_len;
419 u16 offset;
420 u8 operand_byte_stream[MWIFIEX_COALESCE_MAX_BYTESEQ];
421};
422
423struct mwifiex_coalesce_rule {
424 u16 max_coalescing_delay;
425 u8 num_of_fields;
426 u8 pkt_type;
427 struct filt_field_param params[MWIFIEX_COALESCE_MAX_FILTERS];
428};
429
430struct mwifiex_ds_coalesce_cfg {
431 u16 num_of_rules;
432 struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
433};
434
400#endif /* !_MWIFIEX_IOCTL_H_ */ 435#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 12e778159ec5..9d7c0e6c4fc7 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1427,6 +1427,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1427 1427
1428 switch (priv->bss_mode) { 1428 switch (priv->bss_mode) {
1429 case NL80211_IFTYPE_STATION: 1429 case NL80211_IFTYPE_STATION:
1430 case NL80211_IFTYPE_P2P_CLIENT:
1430 return mwifiex_deauthenticate_infra(priv, mac); 1431 return mwifiex_deauthenticate_infra(priv, mac);
1431 case NL80211_IFTYPE_ADHOC: 1432 case NL80211_IFTYPE_ADHOC:
1432 return mwifiex_send_cmd_sync(priv, 1433 return mwifiex_send_cmd_sync(priv,
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 1753431de361..fd778337deee 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -191,12 +191,16 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
191{ 191{
192 s32 i; 192 s32 i;
193 193
194 if (adapter->if_ops.cleanup_if)
195 adapter->if_ops.cleanup_if(adapter);
196
194 del_timer(&adapter->cmd_timer); 197 del_timer(&adapter->cmd_timer);
195 198
196 /* Free private structures */ 199 /* Free private structures */
197 for (i = 0; i < adapter->priv_num; i++) { 200 for (i = 0; i < adapter->priv_num; i++) {
198 if (adapter->priv[i]) { 201 if (adapter->priv[i]) {
199 mwifiex_free_curr_bcn(adapter->priv[i]); 202 mwifiex_free_curr_bcn(adapter->priv[i]);
203 del_timer_sync(&adapter->priv[i]->scan_delay_timer);
200 kfree(adapter->priv[i]); 204 kfree(adapter->priv[i]);
201 } 205 }
202 } 206 }
@@ -386,6 +390,17 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
386} 390}
387 391
388/* 392/*
393 * This function cancels all works in the queue and destroys
394 * the main workqueue.
395 */
396static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
397{
398 flush_workqueue(adapter->workqueue);
399 destroy_workqueue(adapter->workqueue);
400 adapter->workqueue = NULL;
401}
402
403/*
389 * This function gets firmware and initializes it. 404 * This function gets firmware and initializes it.
390 * 405 *
391 * The main initialization steps followed are - 406 * The main initialization steps followed are -
@@ -394,16 +409,18 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
394 */ 409 */
395static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) 410static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
396{ 411{
397 int ret; 412 int ret, i;
398 char fmt[64]; 413 char fmt[64];
399 struct mwifiex_private *priv; 414 struct mwifiex_private *priv;
400 struct mwifiex_adapter *adapter = context; 415 struct mwifiex_adapter *adapter = context;
401 struct mwifiex_fw_image fw; 416 struct mwifiex_fw_image fw;
417 struct semaphore *sem = adapter->card_sem;
418 bool init_failed = false;
402 419
403 if (!firmware) { 420 if (!firmware) {
404 dev_err(adapter->dev, 421 dev_err(adapter->dev,
405 "Failed to get firmware %s\n", adapter->fw_name); 422 "Failed to get firmware %s\n", adapter->fw_name);
406 goto done; 423 goto err_dnld_fw;
407 } 424 }
408 425
409 memset(&fw, 0, sizeof(struct mwifiex_fw_image)); 426 memset(&fw, 0, sizeof(struct mwifiex_fw_image));
@@ -416,7 +433,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
416 else 433 else
417 ret = mwifiex_dnld_fw(adapter, &fw); 434 ret = mwifiex_dnld_fw(adapter, &fw);
418 if (ret == -1) 435 if (ret == -1)
419 goto done; 436 goto err_dnld_fw;
420 437
421 dev_notice(adapter->dev, "WLAN FW is active\n"); 438 dev_notice(adapter->dev, "WLAN FW is active\n");
422 439
@@ -428,13 +445,15 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
428 } 445 }
429 446
430 /* enable host interrupt after fw dnld is successful */ 447 /* enable host interrupt after fw dnld is successful */
431 if (adapter->if_ops.enable_int) 448 if (adapter->if_ops.enable_int) {
432 adapter->if_ops.enable_int(adapter); 449 if (adapter->if_ops.enable_int(adapter))
450 goto err_dnld_fw;
451 }
433 452
434 adapter->init_wait_q_woken = false; 453 adapter->init_wait_q_woken = false;
435 ret = mwifiex_init_fw(adapter); 454 ret = mwifiex_init_fw(adapter);
436 if (ret == -1) { 455 if (ret == -1) {
437 goto done; 456 goto err_init_fw;
438 } else if (!ret) { 457 } else if (!ret) {
439 adapter->hw_status = MWIFIEX_HW_STATUS_READY; 458 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
440 goto done; 459 goto done;
@@ -443,12 +462,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
443 wait_event_interruptible(adapter->init_wait_q, 462 wait_event_interruptible(adapter->init_wait_q,
444 adapter->init_wait_q_woken); 463 adapter->init_wait_q_woken);
445 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) 464 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
446 goto done; 465 goto err_init_fw;
447 466
448 priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; 467 priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
449 if (mwifiex_register_cfg80211(adapter)) { 468 if (mwifiex_register_cfg80211(adapter)) {
450 dev_err(adapter->dev, "cannot register with cfg80211\n"); 469 dev_err(adapter->dev, "cannot register with cfg80211\n");
451 goto err_init_fw; 470 goto err_register_cfg80211;
452 } 471 }
453 472
454 rtnl_lock(); 473 rtnl_lock();
@@ -458,20 +477,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
458 dev_err(adapter->dev, "cannot create default STA interface\n"); 477 dev_err(adapter->dev, "cannot create default STA interface\n");
459 goto err_add_intf; 478 goto err_add_intf;
460 } 479 }
461
462 /* Create AP interface by default */
463 if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
464 NL80211_IFTYPE_AP, NULL, NULL)) {
465 dev_err(adapter->dev, "cannot create default AP interface\n");
466 goto err_add_intf;
467 }
468
469 /* Create P2P interface by default */
470 if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
471 NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
472 dev_err(adapter->dev, "cannot create default P2P interface\n");
473 goto err_add_intf;
474 }
475 rtnl_unlock(); 480 rtnl_unlock();
476 481
477 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); 482 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -479,20 +484,52 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
479 goto done; 484 goto done;
480 485
481err_add_intf: 486err_add_intf:
482 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); 487 for (i = 0; i < adapter->priv_num; i++) {
488 priv = adapter->priv[i];
489
490 if (!priv)
491 continue;
492
493 if (priv->wdev && priv->netdev)
494 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
495 }
483 rtnl_unlock(); 496 rtnl_unlock();
497err_register_cfg80211:
498 wiphy_unregister(adapter->wiphy);
499 wiphy_free(adapter->wiphy);
484err_init_fw: 500err_init_fw:
485 if (adapter->if_ops.disable_int) 501 if (adapter->if_ops.disable_int)
486 adapter->if_ops.disable_int(adapter); 502 adapter->if_ops.disable_int(adapter);
503err_dnld_fw:
487 pr_debug("info: %s: unregister device\n", __func__); 504 pr_debug("info: %s: unregister device\n", __func__);
488 adapter->if_ops.unregister_dev(adapter); 505 if (adapter->if_ops.unregister_dev)
506 adapter->if_ops.unregister_dev(adapter);
507
508 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
509 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
510 pr_debug("info: %s: shutdown mwifiex\n", __func__);
511 adapter->init_wait_q_woken = false;
512
513 if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
514 wait_event_interruptible(adapter->init_wait_q,
515 adapter->init_wait_q_woken);
516 }
517 adapter->surprise_removed = true;
518 mwifiex_terminate_workqueue(adapter);
519 init_failed = true;
489done: 520done:
490 if (adapter->cal_data) { 521 if (adapter->cal_data) {
491 release_firmware(adapter->cal_data); 522 release_firmware(adapter->cal_data);
492 adapter->cal_data = NULL; 523 adapter->cal_data = NULL;
493 } 524 }
494 release_firmware(adapter->firmware); 525 if (adapter->firmware) {
526 release_firmware(adapter->firmware);
527 adapter->firmware = NULL;
528 }
495 complete(&adapter->fw_load); 529 complete(&adapter->fw_load);
530 if (init_failed)
531 mwifiex_free_adapter(adapter);
532 up(sem);
496 return; 533 return;
497} 534}
498 535
@@ -803,18 +840,6 @@ static void mwifiex_main_work_queue(struct work_struct *work)
803} 840}
804 841
805/* 842/*
806 * This function cancels all works in the queue and destroys
807 * the main workqueue.
808 */
809static void
810mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
811{
812 flush_workqueue(adapter->workqueue);
813 destroy_workqueue(adapter->workqueue);
814 adapter->workqueue = NULL;
815}
816
817/*
818 * This function adds the card. 843 * This function adds the card.
819 * 844 *
820 * This function follows the following major steps to set up the device - 845 * This function follows the following major steps to set up the device -
@@ -842,6 +867,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
842 } 867 }
843 868
844 adapter->iface_type = iface_type; 869 adapter->iface_type = iface_type;
870 adapter->card_sem = sem;
845 871
846 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; 872 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
847 adapter->surprise_removed = false; 873 adapter->surprise_removed = false;
@@ -872,17 +898,12 @@ mwifiex_add_card(void *card, struct semaphore *sem,
872 goto err_init_fw; 898 goto err_init_fw;
873 } 899 }
874 900
875 up(sem);
876 return 0; 901 return 0;
877 902
878err_init_fw: 903err_init_fw:
879 pr_debug("info: %s: unregister device\n", __func__); 904 pr_debug("info: %s: unregister device\n", __func__);
880 if (adapter->if_ops.unregister_dev) 905 if (adapter->if_ops.unregister_dev)
881 adapter->if_ops.unregister_dev(adapter); 906 adapter->if_ops.unregister_dev(adapter);
882err_registerdev:
883 adapter->surprise_removed = true;
884 mwifiex_terminate_workqueue(adapter);
885err_kmalloc:
886 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) || 907 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
887 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) { 908 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
888 pr_debug("info: %s: shutdown mwifiex\n", __func__); 909 pr_debug("info: %s: shutdown mwifiex\n", __func__);
@@ -892,7 +913,10 @@ err_kmalloc:
892 wait_event_interruptible(adapter->init_wait_q, 913 wait_event_interruptible(adapter->init_wait_q,
893 adapter->init_wait_q_woken); 914 adapter->init_wait_q_woken);
894 } 915 }
895 916err_registerdev:
917 adapter->surprise_removed = true;
918 mwifiex_terminate_workqueue(adapter);
919err_kmalloc:
896 mwifiex_free_adapter(adapter); 920 mwifiex_free_adapter(adapter);
897 921
898err_init_sw: 922err_init_sw:
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 253e0bd38e25..1d72f13adb9d 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -204,11 +204,11 @@ struct mwifiex_ra_list_tbl {
204 struct list_head list; 204 struct list_head list;
205 struct sk_buff_head skb_head; 205 struct sk_buff_head skb_head;
206 u8 ra[ETH_ALEN]; 206 u8 ra[ETH_ALEN];
207 u32 total_pkts_size;
208 u32 is_11n_enabled; 207 u32 is_11n_enabled;
209 u16 max_amsdu; 208 u16 max_amsdu;
210 u16 pkt_count; 209 u16 ba_pkt_count;
211 u8 ba_packet_thr; 210 u8 ba_packet_thr;
211 u16 total_pkt_count;
212}; 212};
213 213
214struct mwifiex_tid_tbl { 214struct mwifiex_tid_tbl {
@@ -515,6 +515,8 @@ struct mwifiex_private {
515 bool scan_aborting; 515 bool scan_aborting;
516 u8 csa_chan; 516 u8 csa_chan;
517 unsigned long csa_expire_time; 517 unsigned long csa_expire_time;
518 u8 del_list_idx;
519 bool hs2_enabled;
518}; 520};
519 521
520enum mwifiex_ba_status { 522enum mwifiex_ba_status {
@@ -748,6 +750,7 @@ struct mwifiex_adapter {
748 750
749 atomic_t is_tx_received; 751 atomic_t is_tx_received;
750 atomic_t pending_bridged_pkts; 752 atomic_t pending_bridged_pkts;
753 struct semaphore *card_sem;
751}; 754};
752 755
753int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 756int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -900,6 +903,8 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
900u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv, 903u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
901 u8 *rates); 904 u8 *rates);
902u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates); 905u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
906u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
907 u8 *rates, u8 radio_type);
903u8 mwifiex_is_rate_auto(struct mwifiex_private *priv); 908u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
904extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE]; 909extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
905void mwifiex_save_curr_bcn(struct mwifiex_private *priv); 910void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
@@ -1021,7 +1026,7 @@ mwifiex_netdev_get_priv(struct net_device *dev)
1021 */ 1026 */
1022static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb) 1027static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
1023{ 1028{
1024 return (*(u32 *)skb->data == PKT_TYPE_MGMT); 1029 return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT);
1025} 1030}
1026 1031
1027/* This function retrieves channel closed for operation by Channel 1032/* This function retrieves channel closed for operation by Channel
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 20c9c4c7b0b2..52da8ee7599a 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -76,7 +76,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
76 return false; 76 return false;
77} 77}
78 78
79#ifdef CONFIG_PM 79#ifdef CONFIG_PM_SLEEP
80/* 80/*
81 * Kernel needs to suspend all functions separately. Therefore all 81 * Kernel needs to suspend all functions separately. Therefore all
82 * registered functions must have drivers with suspend and resume 82 * registered functions must have drivers with suspend and resume
@@ -85,11 +85,12 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
85 * If already not suspended, this function allocates and sends a host 85 * If already not suspended, this function allocates and sends a host
86 * sleep activate request to the firmware and turns off the traffic. 86 * sleep activate request to the firmware and turns off the traffic.
87 */ 87 */
88static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state) 88static int mwifiex_pcie_suspend(struct device *dev)
89{ 89{
90 struct mwifiex_adapter *adapter; 90 struct mwifiex_adapter *adapter;
91 struct pcie_service_card *card; 91 struct pcie_service_card *card;
92 int hs_actived; 92 int hs_actived;
93 struct pci_dev *pdev = to_pci_dev(dev);
93 94
94 if (pdev) { 95 if (pdev) {
95 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 96 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -120,10 +121,11 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
120 * If already not resumed, this function turns on the traffic and 121 * If already not resumed, this function turns on the traffic and
121 * sends a host sleep cancel request to the firmware. 122 * sends a host sleep cancel request to the firmware.
122 */ 123 */
123static int mwifiex_pcie_resume(struct pci_dev *pdev) 124static int mwifiex_pcie_resume(struct device *dev)
124{ 125{
125 struct mwifiex_adapter *adapter; 126 struct mwifiex_adapter *adapter;
126 struct pcie_service_card *card; 127 struct pcie_service_card *card;
128 struct pci_dev *pdev = to_pci_dev(dev);
127 129
128 if (pdev) { 130 if (pdev) {
129 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 131 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -211,9 +213,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
211 wait_for_completion(&adapter->fw_load); 213 wait_for_completion(&adapter->fw_load);
212 214
213 if (user_rmmod) { 215 if (user_rmmod) {
214#ifdef CONFIG_PM 216#ifdef CONFIG_PM_SLEEP
215 if (adapter->is_suspended) 217 if (adapter->is_suspended)
216 mwifiex_pcie_resume(pdev); 218 mwifiex_pcie_resume(&pdev->dev);
217#endif 219#endif
218 220
219 for (i = 0; i < adapter->priv_num; i++) 221 for (i = 0; i < adapter->priv_num; i++)
@@ -233,6 +235,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
233 kfree(card); 235 kfree(card);
234} 236}
235 237
238static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
239{
240 user_rmmod = 1;
241 mwifiex_pcie_remove(pdev);
242
243 return;
244}
245
236static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { 246static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
237 { 247 {
238 PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, 248 PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -249,17 +259,24 @@ static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
249 259
250MODULE_DEVICE_TABLE(pci, mwifiex_ids); 260MODULE_DEVICE_TABLE(pci, mwifiex_ids);
251 261
262#ifdef CONFIG_PM_SLEEP
263/* Power Management Hooks */
264static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
265 mwifiex_pcie_resume);
266#endif
267
252/* PCI Device Driver */ 268/* PCI Device Driver */
253static struct pci_driver __refdata mwifiex_pcie = { 269static struct pci_driver __refdata mwifiex_pcie = {
254 .name = "mwifiex_pcie", 270 .name = "mwifiex_pcie",
255 .id_table = mwifiex_ids, 271 .id_table = mwifiex_ids,
256 .probe = mwifiex_pcie_probe, 272 .probe = mwifiex_pcie_probe,
257 .remove = mwifiex_pcie_remove, 273 .remove = mwifiex_pcie_remove,
258#ifdef CONFIG_PM 274#ifdef CONFIG_PM_SLEEP
259 /* Power Management Hooks */ 275 .driver = {
260 .suspend = mwifiex_pcie_suspend, 276 .pm = &mwifiex_pcie_pm_ops,
261 .resume = mwifiex_pcie_resume, 277 },
262#endif 278#endif
279 .shutdown = mwifiex_pcie_shutdown,
263}; 280};
264 281
265/* 282/*
@@ -1925,7 +1942,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
1925 ret = 0; 1942 ret = 0;
1926 break; 1943 break;
1927 } else { 1944 } else {
1928 mdelay(100); 1945 msleep(100);
1929 ret = -1; 1946 ret = -1;
1930 } 1947 }
1931 } 1948 }
@@ -1937,12 +1954,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
1937 else if (!winner_status) { 1954 else if (!winner_status) {
1938 dev_err(adapter->dev, "PCI-E is the winner\n"); 1955 dev_err(adapter->dev, "PCI-E is the winner\n");
1939 adapter->winner = 1; 1956 adapter->winner = 1;
1940 ret = -1;
1941 } else { 1957 } else {
1942 dev_err(adapter->dev, 1958 dev_err(adapter->dev,
1943 "PCI-E is not the winner <%#x,%d>, exit dnld\n", 1959 "PCI-E is not the winner <%#x,%d>, exit dnld\n",
1944 ret, adapter->winner); 1960 ret, adapter->winner);
1945 ret = 0;
1946 } 1961 }
1947 } 1962 }
1948 1963
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index c447d9bd1aa9..8cf7d50a7603 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -543,6 +543,37 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
543 return chan_idx; 543 return chan_idx;
544} 544}
545 545
546/* This function appends rate TLV to scan config command. */
547static int
548mwifiex_append_rate_tlv(struct mwifiex_private *priv,
549 struct mwifiex_scan_cmd_config *scan_cfg_out,
550 u8 radio)
551{
552 struct mwifiex_ie_types_rates_param_set *rates_tlv;
553 u8 rates[MWIFIEX_SUPPORTED_RATES], *tlv_pos;
554 u32 rates_size;
555
556 memset(rates, 0, sizeof(rates));
557
558 tlv_pos = (u8 *)scan_cfg_out->tlv_buf + scan_cfg_out->tlv_buf_len;
559
560 if (priv->scan_request)
561 rates_size = mwifiex_get_rates_from_cfg80211(priv, rates,
562 radio);
563 else
564 rates_size = mwifiex_get_supported_rates(priv, rates);
565
566 dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
567 rates_size);
568 rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
569 rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
570 rates_tlv->header.len = cpu_to_le16((u16) rates_size);
571 memcpy(rates_tlv->rates, rates, rates_size);
572 scan_cfg_out->tlv_buf_len += sizeof(rates_tlv->header) + rates_size;
573
574 return rates_size;
575}
576
546/* 577/*
547 * This function constructs and sends multiple scan config commands to 578 * This function constructs and sends multiple scan config commands to
548 * the firmware. 579 * the firmware.
@@ -564,9 +595,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
564 struct mwifiex_chan_scan_param_set *tmp_chan_list; 595 struct mwifiex_chan_scan_param_set *tmp_chan_list;
565 struct mwifiex_chan_scan_param_set *start_chan; 596 struct mwifiex_chan_scan_param_set *start_chan;
566 597
567 u32 tlv_idx; 598 u32 tlv_idx, rates_size;
568 u32 total_scan_time; 599 u32 total_scan_time;
569 u32 done_early; 600 u32 done_early;
601 u8 radio_type;
570 602
571 if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) { 603 if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
572 dev_dbg(priv->adapter->dev, 604 dev_dbg(priv->adapter->dev,
@@ -591,6 +623,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
591 623
592 tlv_idx = 0; 624 tlv_idx = 0;
593 total_scan_time = 0; 625 total_scan_time = 0;
626 radio_type = 0;
594 chan_tlv_out->header.len = 0; 627 chan_tlv_out->header.len = 0;
595 start_chan = tmp_chan_list; 628 start_chan = tmp_chan_list;
596 done_early = false; 629 done_early = false;
@@ -612,6 +645,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
612 continue; 645 continue;
613 } 646 }
614 647
648 radio_type = tmp_chan_list->radio_type;
615 dev_dbg(priv->adapter->dev, 649 dev_dbg(priv->adapter->dev,
616 "info: Scan: Chan(%3d), Radio(%d)," 650 "info: Scan: Chan(%3d), Radio(%d),"
617 " Mode(%d, %d), Dur(%d)\n", 651 " Mode(%d, %d), Dur(%d)\n",
@@ -692,6 +726,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
692 break; 726 break;
693 } 727 }
694 728
729 rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out,
730 radio_type);
731
695 priv->adapter->scan_channels = start_chan; 732 priv->adapter->scan_channels = start_chan;
696 733
697 /* Send the scan command to the firmware with the specified 734 /* Send the scan command to the firmware with the specified
@@ -699,6 +736,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
699 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN, 736 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
700 HostCmd_ACT_GEN_SET, 0, 737 HostCmd_ACT_GEN_SET, 0,
701 scan_cfg_out); 738 scan_cfg_out);
739
740 /* rate IE is updated per scan command but same starting
741 * pointer is used each time so that rate IE from earlier
742 * scan_cfg_out->buf is overwritten with new one.
743 */
744 scan_cfg_out->tlv_buf_len -=
745 sizeof(struct mwifiex_ie_types_header) + rates_size;
746
702 if (ret) 747 if (ret)
703 break; 748 break;
704 } 749 }
@@ -741,7 +786,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
741 struct mwifiex_adapter *adapter = priv->adapter; 786 struct mwifiex_adapter *adapter = priv->adapter;
742 struct mwifiex_ie_types_num_probes *num_probes_tlv; 787 struct mwifiex_ie_types_num_probes *num_probes_tlv;
743 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; 788 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
744 struct mwifiex_ie_types_rates_param_set *rates_tlv;
745 u8 *tlv_pos; 789 u8 *tlv_pos;
746 u32 num_probes; 790 u32 num_probes;
747 u32 ssid_len; 791 u32 ssid_len;
@@ -753,8 +797,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
753 u8 radio_type; 797 u8 radio_type;
754 int i; 798 int i;
755 u8 ssid_filter; 799 u8 ssid_filter;
756 u8 rates[MWIFIEX_SUPPORTED_RATES];
757 u32 rates_size;
758 struct mwifiex_ie_types_htcap *ht_cap; 800 struct mwifiex_ie_types_htcap *ht_cap;
759 801
760 /* The tlv_buf_len is calculated for each scan command. The TLVs added 802 /* The tlv_buf_len is calculated for each scan command. The TLVs added
@@ -889,19 +931,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
889 931
890 } 932 }
891 933
892 /* Append rates tlv */
893 memset(rates, 0, sizeof(rates));
894
895 rates_size = mwifiex_get_supported_rates(priv, rates);
896
897 rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
898 rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
899 rates_tlv->header.len = cpu_to_le16((u16) rates_size);
900 memcpy(rates_tlv->rates, rates, rates_size);
901 tlv_pos += sizeof(rates_tlv->header) + rates_size;
902
903 dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
904
905 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) && 934 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
906 (priv->adapter->config_bands & BAND_GN || 935 (priv->adapter->config_bands & BAND_GN ||
907 priv->adapter->config_bands & BAND_AN)) { 936 priv->adapter->config_bands & BAND_AN)) {
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 09185c963248..1576104e3d95 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -50,9 +50,6 @@ static struct mwifiex_if_ops sdio_ops;
50 50
51static struct semaphore add_remove_card_sem; 51static struct semaphore add_remove_card_sem;
52 52
53static int mwifiex_sdio_resume(struct device *dev);
54static void mwifiex_sdio_interrupt(struct sdio_func *func);
55
56/* 53/*
57 * SDIO probe. 54 * SDIO probe.
58 * 55 *
@@ -113,6 +110,51 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
113} 110}
114 111
115/* 112/*
113 * SDIO resume.
114 *
115 * Kernel needs to suspend all functions separately. Therefore all
116 * registered functions must have drivers with suspend and resume
117 * methods. Failing that the kernel simply removes the whole card.
118 *
119 * If already not resumed, this function turns on the traffic and
120 * sends a host sleep cancel request to the firmware.
121 */
122static int mwifiex_sdio_resume(struct device *dev)
123{
124 struct sdio_func *func = dev_to_sdio_func(dev);
125 struct sdio_mmc_card *card;
126 struct mwifiex_adapter *adapter;
127 mmc_pm_flag_t pm_flag = 0;
128
129 if (func) {
130 pm_flag = sdio_get_host_pm_caps(func);
131 card = sdio_get_drvdata(func);
132 if (!card || !card->adapter) {
133 pr_err("resume: invalid card or adapter\n");
134 return 0;
135 }
136 } else {
137 pr_err("resume: sdio_func is not specified\n");
138 return 0;
139 }
140
141 adapter = card->adapter;
142
143 if (!adapter->is_suspended) {
144 dev_warn(adapter->dev, "device already resumed\n");
145 return 0;
146 }
147
148 adapter->is_suspended = false;
149
150 /* Disable Host Sleep */
151 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
152 MWIFIEX_ASYNC_CMD);
153
154 return 0;
155}
156
157/*
116 * SDIO remove. 158 * SDIO remove.
117 * 159 *
118 * This function removes the interface and frees up the card structure. 160 * This function removes the interface and frees up the card structure.
@@ -212,51 +254,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
212 return ret; 254 return ret;
213} 255}
214 256
215/*
216 * SDIO resume.
217 *
218 * Kernel needs to suspend all functions separately. Therefore all
219 * registered functions must have drivers with suspend and resume
220 * methods. Failing that the kernel simply removes the whole card.
221 *
222 * If already not resumed, this function turns on the traffic and
223 * sends a host sleep cancel request to the firmware.
224 */
225static int mwifiex_sdio_resume(struct device *dev)
226{
227 struct sdio_func *func = dev_to_sdio_func(dev);
228 struct sdio_mmc_card *card;
229 struct mwifiex_adapter *adapter;
230 mmc_pm_flag_t pm_flag = 0;
231
232 if (func) {
233 pm_flag = sdio_get_host_pm_caps(func);
234 card = sdio_get_drvdata(func);
235 if (!card || !card->adapter) {
236 pr_err("resume: invalid card or adapter\n");
237 return 0;
238 }
239 } else {
240 pr_err("resume: sdio_func is not specified\n");
241 return 0;
242 }
243
244 adapter = card->adapter;
245
246 if (!adapter->is_suspended) {
247 dev_warn(adapter->dev, "device already resumed\n");
248 return 0;
249 }
250
251 adapter->is_suspended = false;
252
253 /* Disable Host Sleep */
254 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
255 MWIFIEX_ASYNC_CMD);
256
257 return 0;
258}
259
260/* Device ID for SD8786 */ 257/* Device ID for SD8786 */
261#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116) 258#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116)
262/* Device ID for SD8787 */ 259/* Device ID for SD8787 */
@@ -707,6 +704,65 @@ static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
707} 704}
708 705
709/* 706/*
707 * This function reads the interrupt status from card.
708 */
709static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
710{
711 struct sdio_mmc_card *card = adapter->card;
712 u8 sdio_ireg;
713 unsigned long flags;
714
715 if (mwifiex_read_data_sync(adapter, card->mp_regs,
716 card->reg->max_mp_regs,
717 REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
718 dev_err(adapter->dev, "read mp_regs failed\n");
719 return;
720 }
721
722 sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
723 if (sdio_ireg) {
724 /*
725 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
726 * For SDIO new mode CMD port interrupts
727 * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
728 * UP_LD_CMD_PORT_HOST_INT_STATUS
729 * Clear the interrupt status register
730 */
731 dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
732 spin_lock_irqsave(&adapter->int_lock, flags);
733 adapter->int_status |= sdio_ireg;
734 spin_unlock_irqrestore(&adapter->int_lock, flags);
735 }
736}
737
738/*
739 * SDIO interrupt handler.
740 *
741 * This function reads the interrupt status from firmware and handles
742 * the interrupt in current thread (ksdioirqd) right away.
743 */
744static void
745mwifiex_sdio_interrupt(struct sdio_func *func)
746{
747 struct mwifiex_adapter *adapter;
748 struct sdio_mmc_card *card;
749
750 card = sdio_get_drvdata(func);
751 if (!card || !card->adapter) {
752 pr_debug("int: func=%p card=%p adapter=%p\n",
753 func, card, card ? card->adapter : NULL);
754 return;
755 }
756 adapter = card->adapter;
757
758 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
759 adapter->ps_state = PS_STATE_AWAKE;
760
761 mwifiex_interrupt_status(adapter);
762 mwifiex_main_process(adapter);
763}
764
765/*
710 * This function enables the host interrupt. 766 * This function enables the host interrupt.
711 * 767 *
712 * The host interrupt enable mask is written to the card 768 * The host interrupt enable mask is written to the card
@@ -944,7 +1000,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
944 ret = 0; 1000 ret = 0;
945 break; 1001 break;
946 } else { 1002 } else {
947 mdelay(100); 1003 msleep(100);
948 ret = -1; 1004 ret = -1;
949 } 1005 }
950 } 1006 }
@@ -963,65 +1019,6 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
963} 1019}
964 1020
965/* 1021/*
966 * This function reads the interrupt status from card.
967 */
968static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
969{
970 struct sdio_mmc_card *card = adapter->card;
971 u8 sdio_ireg;
972 unsigned long flags;
973
974 if (mwifiex_read_data_sync(adapter, card->mp_regs,
975 card->reg->max_mp_regs,
976 REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
977 dev_err(adapter->dev, "read mp_regs failed\n");
978 return;
979 }
980
981 sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
982 if (sdio_ireg) {
983 /*
984 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
985 * For SDIO new mode CMD port interrupts
986 * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
987 * UP_LD_CMD_PORT_HOST_INT_STATUS
988 * Clear the interrupt status register
989 */
990 dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
991 spin_lock_irqsave(&adapter->int_lock, flags);
992 adapter->int_status |= sdio_ireg;
993 spin_unlock_irqrestore(&adapter->int_lock, flags);
994 }
995}
996
997/*
998 * SDIO interrupt handler.
999 *
1000 * This function reads the interrupt status from firmware and handles
1001 * the interrupt in current thread (ksdioirqd) right away.
1002 */
1003static void
1004mwifiex_sdio_interrupt(struct sdio_func *func)
1005{
1006 struct mwifiex_adapter *adapter;
1007 struct sdio_mmc_card *card;
1008
1009 card = sdio_get_drvdata(func);
1010 if (!card || !card->adapter) {
1011 pr_debug("int: func=%p card=%p adapter=%p\n",
1012 func, card, card ? card->adapter : NULL);
1013 return;
1014 }
1015 adapter = card->adapter;
1016
1017 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
1018 adapter->ps_state = PS_STATE_AWAKE;
1019
1020 mwifiex_interrupt_status(adapter);
1021 mwifiex_main_process(adapter);
1022}
1023
1024/*
1025 * This function decodes a received packet. 1022 * This function decodes a received packet.
1026 * 1023 *
1027 * Based on the type, the packet is treated as either a data, or 1024 * Based on the type, the packet is treated as either a data, or
@@ -1065,7 +1062,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
1065 1062
1066 case MWIFIEX_TYPE_EVENT: 1063 case MWIFIEX_TYPE_EVENT:
1067 dev_dbg(adapter->dev, "info: --- Rx: Event ---\n"); 1064 dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
1068 adapter->event_cause = *(u32 *) skb->data; 1065 adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
1069 1066
1070 if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE)) 1067 if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
1071 memcpy(adapter->event_body, 1068 memcpy(adapter->event_body,
@@ -1210,8 +1207,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1210 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { 1207 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
1211 1208
1212 /* get curr PKT len & type */ 1209 /* get curr PKT len & type */
1213 pkt_len = *(u16 *) &curr_ptr[0]; 1210 pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
1214 pkt_type = *(u16 *) &curr_ptr[2]; 1211 pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
1215 1212
1216 /* copy pkt to deaggr buf */ 1213 /* copy pkt to deaggr buf */
1217 skb_deaggr = card->mpa_rx.skb_arr[pind]; 1214 skb_deaggr = card->mpa_rx.skb_arr[pind];
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 8ece48580642..c0268b597748 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -707,8 +707,9 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
707 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) { 707 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
708 tlv_mac = (void *)((u8 *)&key_material->key_param_set + 708 tlv_mac = (void *)((u8 *)&key_material->key_param_set +
709 key_param_len); 709 key_param_len);
710 tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR); 710 tlv_mac->header.type =
711 tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN); 711 cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
712 tlv_mac->header.len = cpu_to_le16(ETH_ALEN);
712 memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN); 713 memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN);
713 cmd_size = key_param_len + S_DS_GEN + 714 cmd_size = key_param_len + S_DS_GEN +
714 sizeof(key_material->action) + 715 sizeof(key_material->action) +
@@ -1069,7 +1070,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
1069 int i, byte_len; 1070 int i, byte_len;
1070 u8 *stack_ptr = *buffer; 1071 u8 *stack_ptr = *buffer;
1071 1072
1072 for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) { 1073 for (i = 0; i < MWIFIEX_MEF_MAX_FILTERS; i++) {
1073 filter = &mef_entry->filter[i]; 1074 filter = &mef_entry->filter[i];
1074 if (!filter->filt_type) 1075 if (!filter->filt_type)
1075 break; 1076 break;
@@ -1078,7 +1079,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
1078 *stack_ptr = TYPE_DNUM; 1079 *stack_ptr = TYPE_DNUM;
1079 stack_ptr += 1; 1080 stack_ptr += 1;
1080 1081
1081 byte_len = filter->byte_seq[MAX_BYTESEQ]; 1082 byte_len = filter->byte_seq[MWIFIEX_MEF_MAX_BYTESEQ];
1082 memcpy(stack_ptr, filter->byte_seq, byte_len); 1083 memcpy(stack_ptr, filter->byte_seq, byte_len);
1083 stack_ptr += byte_len; 1084 stack_ptr += byte_len;
1084 *stack_ptr = byte_len; 1085 *stack_ptr = byte_len;
@@ -1183,6 +1184,70 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
1183 return 0; 1184 return 0;
1184} 1185}
1185 1186
1187static int
1188mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
1189 struct host_cmd_ds_command *cmd,
1190 u16 cmd_action, void *data_buf)
1191{
1192 struct host_cmd_ds_coalesce_cfg *coalesce_cfg =
1193 &cmd->params.coalesce_cfg;
1194 struct mwifiex_ds_coalesce_cfg *cfg = data_buf;
1195 struct coalesce_filt_field_param *param;
1196 u16 cnt, idx, length;
1197 struct coalesce_receive_filt_rule *rule;
1198
1199 cmd->command = cpu_to_le16(HostCmd_CMD_COALESCE_CFG);
1200 cmd->size = cpu_to_le16(S_DS_GEN);
1201
1202 coalesce_cfg->action = cpu_to_le16(cmd_action);
1203 coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
1204 rule = coalesce_cfg->rule;
1205
1206 for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
1207 rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
1208 rule->max_coalescing_delay =
1209 cpu_to_le16(cfg->rule[cnt].max_coalescing_delay);
1210 rule->pkt_type = cfg->rule[cnt].pkt_type;
1211 rule->num_of_fields = cfg->rule[cnt].num_of_fields;
1212
1213 length = 0;
1214
1215 param = rule->params;
1216 for (idx = 0; idx < cfg->rule[cnt].num_of_fields; idx++) {
1217 param->operation = cfg->rule[cnt].params[idx].operation;
1218 param->operand_len =
1219 cfg->rule[cnt].params[idx].operand_len;
1220 param->offset =
1221 cpu_to_le16(cfg->rule[cnt].params[idx].offset);
1222 memcpy(param->operand_byte_stream,
1223 cfg->rule[cnt].params[idx].operand_byte_stream,
1224 param->operand_len);
1225
1226 length += sizeof(struct coalesce_filt_field_param);
1227
1228 param++;
1229 }
1230
1231 /* Total rule length is sizeof max_coalescing_delay(u16),
1232 * num_of_fields(u8), pkt_type(u8) and total length of the all
1233 * params
1234 */
1235 rule->header.len = cpu_to_le16(length + sizeof(u16) +
1236 sizeof(u8) + sizeof(u8));
1237
1238 /* Add the rule length to the command size*/
1239 le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) +
1240 sizeof(struct mwifiex_ie_types_header));
1241
1242 rule = (void *)((u8 *)rule->params + length);
1243 }
1244
1245 /* Add sizeof action, num_of_rules to total command length */
1246 le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
1247
1248 return 0;
1249}
1250
1186/* 1251/*
1187 * This function prepares the commands before sending them to the firmware. 1252 * This function prepares the commands before sending them to the firmware.
1188 * 1253 *
@@ -1406,6 +1471,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1406 case HostCmd_CMD_MEF_CFG: 1471 case HostCmd_CMD_MEF_CFG:
1407 ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf); 1472 ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
1408 break; 1473 break;
1474 case HostCmd_CMD_COALESCE_CFG:
1475 ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
1476 data_buf);
1477 break;
1409 default: 1478 default:
1410 dev_err(priv->adapter->dev, 1479 dev_err(priv->adapter->dev,
1411 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1480 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index d85df158cc6c..58a6013712d2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -280,7 +280,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
280 280
281 tlv_buf = ((u8 *)rate_cfg) + 281 tlv_buf = ((u8 *)rate_cfg) +
282 sizeof(struct host_cmd_ds_tx_rate_cfg); 282 sizeof(struct host_cmd_ds_tx_rate_cfg);
283 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16)); 283 tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16)));
284 284
285 while (tlv_buf && tlv_buf_len > 0) { 285 while (tlv_buf && tlv_buf_len > 0) {
286 tlv = (*tlv_buf); 286 tlv = (*tlv_buf);
@@ -997,6 +997,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
997 break; 997 break;
998 case HostCmd_CMD_MEF_CFG: 998 case HostCmd_CMD_MEF_CFG:
999 break; 999 break;
1000 case HostCmd_CMD_COALESCE_CFG:
1001 break;
1000 default: 1002 default:
1001 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 1003 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
1002 resp->command); 1004 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index ea265ec0e522..8b057524b252 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -201,6 +201,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
201 201
202 case EVENT_DEAUTHENTICATED: 202 case EVENT_DEAUTHENTICATED:
203 dev_dbg(adapter->dev, "event: Deauthenticated\n"); 203 dev_dbg(adapter->dev, "event: Deauthenticated\n");
204 if (priv->wps.session_enable) {
205 dev_dbg(adapter->dev,
206 "info: receive deauth event in wps session\n");
207 break;
208 }
204 adapter->dbg.num_event_deauth++; 209 adapter->dbg.num_event_deauth++;
205 if (priv->media_connected) { 210 if (priv->media_connected) {
206 reason_code = 211 reason_code =
@@ -211,6 +216,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
211 216
212 case EVENT_DISASSOCIATED: 217 case EVENT_DISASSOCIATED:
213 dev_dbg(adapter->dev, "event: Disassociated\n"); 218 dev_dbg(adapter->dev, "event: Disassociated\n");
219 if (priv->wps.session_enable) {
220 dev_dbg(adapter->dev,
221 "info: receive disassoc event in wps session\n");
222 break;
223 }
214 adapter->dbg.num_event_disassoc++; 224 adapter->dbg.num_event_disassoc++;
215 if (priv->media_connected) { 225 if (priv->media_connected) {
216 reason_code = 226 reason_code =
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 8af97abf7108..f084412eee0b 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -797,15 +797,16 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
797 u8 *ie_data_ptr, u16 ie_len) 797 u8 *ie_data_ptr, u16 ie_len)
798{ 798{
799 if (ie_len) { 799 if (ie_len) {
800 priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL); 800 if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
801 if (!priv->wps_ie)
802 return -ENOMEM;
803 if (ie_len > sizeof(priv->wps_ie)) {
804 dev_dbg(priv->adapter->dev, 801 dev_dbg(priv->adapter->dev,
805 "info: failed to copy WPS IE, too big\n"); 802 "info: failed to copy WPS IE, too big\n");
806 kfree(priv->wps_ie);
807 return -1; 803 return -1;
808 } 804 }
805
806 priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
807 if (!priv->wps_ie)
808 return -ENOMEM;
809
809 memcpy(priv->wps_ie, ie_data_ptr, ie_len); 810 memcpy(priv->wps_ie, ie_data_ptr, ie_len);
810 priv->wps_ie_len = ie_len; 811 priv->wps_ie_len = ie_len;
811 dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n", 812 dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index b5c109504393..bb22664923ef 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -17,6 +17,8 @@
17 * this warranty disclaimer. 17 * this warranty disclaimer.
18 */ 18 */
19 19
20#include <uapi/linux/ipv6.h>
21#include <net/ndisc.h>
20#include "decl.h" 22#include "decl.h"
21#include "ioctl.h" 23#include "ioctl.h"
22#include "util.h" 24#include "util.h"
@@ -25,6 +27,46 @@
25#include "11n_aggr.h" 27#include "11n_aggr.h"
26#include "11n_rxreorder.h" 28#include "11n_rxreorder.h"
27 29
30/* This function checks if a frame is IPv4 ARP or IPv6 Neighbour advertisement
31 * frame. If frame has both source and destination mac address as same, this
32 * function drops such gratuitous frames.
33 */
34static bool
35mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv,
36 struct sk_buff *skb)
37{
38 const struct mwifiex_arp_eth_header *arp;
39 struct ethhdr *eth_hdr;
40 struct ipv6hdr *ipv6;
41 struct icmp6hdr *icmpv6;
42
43 eth_hdr = (struct ethhdr *)skb->data;
44 switch (ntohs(eth_hdr->h_proto)) {
45 case ETH_P_ARP:
46 arp = (void *)(skb->data + sizeof(struct ethhdr));
47 if (arp->hdr.ar_op == htons(ARPOP_REPLY) ||
48 arp->hdr.ar_op == htons(ARPOP_REQUEST)) {
49 if (!memcmp(arp->ar_sip, arp->ar_tip, 4))
50 return true;
51 }
52 break;
53 case ETH_P_IPV6:
54 ipv6 = (void *)(skb->data + sizeof(struct ethhdr));
55 icmpv6 = (void *)(skb->data + sizeof(struct ethhdr) +
56 sizeof(struct ipv6hdr));
57 if (NDISC_NEIGHBOUR_ADVERTISEMENT == icmpv6->icmp6_type) {
58 if (!memcmp(&ipv6->saddr, &ipv6->daddr,
59 sizeof(struct in6_addr)))
60 return true;
61 }
62 break;
63 default:
64 break;
65 }
66
67 return false;
68}
69
28/* 70/*
29 * This function processes the received packet and forwards it 71 * This function processes the received packet and forwards it
30 * to kernel/upper layer. 72 * to kernel/upper layer.
@@ -90,6 +132,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
90 either the reconstructed EthII frame or the 802.2/llc/snap frame */ 132 either the reconstructed EthII frame or the 802.2/llc/snap frame */
91 skb_pull(skb, hdr_chop); 133 skb_pull(skb, hdr_chop);
92 134
135 if (priv->hs2_enabled &&
136 mwifiex_discard_gratuitous_arp(priv, skb)) {
137 dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
138 dev_kfree_skb_any(skb);
139 return 0;
140 }
141
93 priv->rxpd_rate = local_rx_pd->rx_rate; 142 priv->rxpd_rate = local_rx_pd->rx_rate;
94 143
95 priv->rxpd_htinfo = local_rx_pd->ht_info; 144 priv->rxpd_htinfo = local_rx_pd->ht_info;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 2de882dead0f..64424c81b44f 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -293,9 +293,9 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
293 u8 *tlv = *tlv_buf; 293 u8 *tlv = *tlv_buf;
294 294
295 tlv_akmp = (struct host_cmd_tlv_akmp *)tlv; 295 tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
296 tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP); 296 tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
297 tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) - 297 tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
298 sizeof(struct host_cmd_tlv)); 298 sizeof(struct mwifiex_ie_types_header));
299 tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation); 299 tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
300 tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt); 300 tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
301 cmd_size += sizeof(struct host_cmd_tlv_akmp); 301 cmd_size += sizeof(struct host_cmd_tlv_akmp);
@@ -303,10 +303,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
303 303
304 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) { 304 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
305 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv; 305 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
306 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); 306 pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
307 pwk_cipher->tlv.len = 307 pwk_cipher->header.len =
308 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) - 308 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
309 sizeof(struct host_cmd_tlv)); 309 sizeof(struct mwifiex_ie_types_header));
310 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA); 310 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
311 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa; 311 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
312 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher); 312 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -315,10 +315,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
315 315
316 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) { 316 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
317 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv; 317 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
318 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); 318 pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
319 pwk_cipher->tlv.len = 319 pwk_cipher->header.len =
320 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) - 320 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
321 sizeof(struct host_cmd_tlv)); 321 sizeof(struct mwifiex_ie_types_header));
322 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2); 322 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
323 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2; 323 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
324 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher); 324 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -327,10 +327,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
327 327
328 if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) { 328 if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
329 gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv; 329 gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
330 gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER); 330 gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
331 gwk_cipher->tlv.len = 331 gwk_cipher->header.len =
332 cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) - 332 cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
333 sizeof(struct host_cmd_tlv)); 333 sizeof(struct mwifiex_ie_types_header));
334 gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher; 334 gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
335 cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher); 335 cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
336 tlv += sizeof(struct host_cmd_tlv_gwk_cipher); 336 tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
@@ -338,13 +338,15 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
338 338
339 if (bss_cfg->wpa_cfg.length) { 339 if (bss_cfg->wpa_cfg.length) {
340 passphrase = (struct host_cmd_tlv_passphrase *)tlv; 340 passphrase = (struct host_cmd_tlv_passphrase *)tlv;
341 passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE); 341 passphrase->header.type =
342 passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length); 342 cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
343 passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
343 memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase, 344 memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
344 bss_cfg->wpa_cfg.length); 345 bss_cfg->wpa_cfg.length);
345 cmd_size += sizeof(struct host_cmd_tlv) + 346 cmd_size += sizeof(struct mwifiex_ie_types_header) +
346 bss_cfg->wpa_cfg.length; 347 bss_cfg->wpa_cfg.length;
347 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length; 348 tlv += sizeof(struct mwifiex_ie_types_header) +
349 bss_cfg->wpa_cfg.length;
348 } 350 }
349 351
350 *param_size = cmd_size; 352 *param_size = cmd_size;
@@ -403,16 +405,17 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
403 (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 || 405 (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
404 bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) { 406 bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
405 wep_key = (struct host_cmd_tlv_wep_key *)tlv; 407 wep_key = (struct host_cmd_tlv_wep_key *)tlv;
406 wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY); 408 wep_key->header.type =
407 wep_key->tlv.len = 409 cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
410 wep_key->header.len =
408 cpu_to_le16(bss_cfg->wep_cfg[i].length + 2); 411 cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
409 wep_key->key_index = bss_cfg->wep_cfg[i].key_index; 412 wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
410 wep_key->is_default = bss_cfg->wep_cfg[i].is_default; 413 wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
411 memcpy(wep_key->key, bss_cfg->wep_cfg[i].key, 414 memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
412 bss_cfg->wep_cfg[i].length); 415 bss_cfg->wep_cfg[i].length);
413 cmd_size += sizeof(struct host_cmd_tlv) + 2 + 416 cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
414 bss_cfg->wep_cfg[i].length; 417 bss_cfg->wep_cfg[i].length;
415 tlv += sizeof(struct host_cmd_tlv) + 2 + 418 tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
416 bss_cfg->wep_cfg[i].length; 419 bss_cfg->wep_cfg[i].length;
417 } 420 }
418 } 421 }
@@ -449,16 +452,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
449 452
450 if (bss_cfg->ssid.ssid_len) { 453 if (bss_cfg->ssid.ssid_len) {
451 ssid = (struct host_cmd_tlv_ssid *)tlv; 454 ssid = (struct host_cmd_tlv_ssid *)tlv;
452 ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID); 455 ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
453 ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len); 456 ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
454 memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len); 457 memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
455 cmd_size += sizeof(struct host_cmd_tlv) + 458 cmd_size += sizeof(struct mwifiex_ie_types_header) +
456 bss_cfg->ssid.ssid_len; 459 bss_cfg->ssid.ssid_len;
457 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; 460 tlv += sizeof(struct mwifiex_ie_types_header) +
461 bss_cfg->ssid.ssid_len;
458 462
459 bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; 463 bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
460 bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); 464 bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
461 bcast_ssid->tlv.len = 465 bcast_ssid->header.len =
462 cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); 466 cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
463 bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; 467 bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
464 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); 468 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
@@ -466,13 +470,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
466 } 470 }
467 if (bss_cfg->rates[0]) { 471 if (bss_cfg->rates[0]) {
468 tlv_rates = (struct host_cmd_tlv_rates *)tlv; 472 tlv_rates = (struct host_cmd_tlv_rates *)tlv;
469 tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES); 473 tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
470 474
471 for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i]; 475 for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
472 i++) 476 i++)
473 tlv_rates->rates[i] = bss_cfg->rates[i]; 477 tlv_rates->rates[i] = bss_cfg->rates[i];
474 478
475 tlv_rates->tlv.len = cpu_to_le16(i); 479 tlv_rates->header.len = cpu_to_le16(i);
476 cmd_size += sizeof(struct host_cmd_tlv_rates) + i; 480 cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
477 tlv += sizeof(struct host_cmd_tlv_rates) + i; 481 tlv += sizeof(struct host_cmd_tlv_rates) + i;
478 } 482 }
@@ -482,10 +486,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
482 (bss_cfg->band_cfg == BAND_CONFIG_A && 486 (bss_cfg->band_cfg == BAND_CONFIG_A &&
483 bss_cfg->channel <= MAX_CHANNEL_BAND_A))) { 487 bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
484 chan_band = (struct host_cmd_tlv_channel_band *)tlv; 488 chan_band = (struct host_cmd_tlv_channel_band *)tlv;
485 chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); 489 chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
486 chan_band->tlv.len = 490 chan_band->header.len =
487 cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) - 491 cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
488 sizeof(struct host_cmd_tlv)); 492 sizeof(struct mwifiex_ie_types_header));
489 chan_band->band_config = bss_cfg->band_cfg; 493 chan_band->band_config = bss_cfg->band_cfg;
490 chan_band->channel = bss_cfg->channel; 494 chan_band->channel = bss_cfg->channel;
491 cmd_size += sizeof(struct host_cmd_tlv_channel_band); 495 cmd_size += sizeof(struct host_cmd_tlv_channel_band);
@@ -494,11 +498,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
494 if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD && 498 if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
495 bss_cfg->beacon_period <= MAX_BEACON_PERIOD) { 499 bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
496 beacon_period = (struct host_cmd_tlv_beacon_period *)tlv; 500 beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
497 beacon_period->tlv.type = 501 beacon_period->header.type =
498 cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD); 502 cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
499 beacon_period->tlv.len = 503 beacon_period->header.len =
500 cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) - 504 cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
501 sizeof(struct host_cmd_tlv)); 505 sizeof(struct mwifiex_ie_types_header));
502 beacon_period->period = cpu_to_le16(bss_cfg->beacon_period); 506 beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
503 cmd_size += sizeof(struct host_cmd_tlv_beacon_period); 507 cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
504 tlv += sizeof(struct host_cmd_tlv_beacon_period); 508 tlv += sizeof(struct host_cmd_tlv_beacon_period);
@@ -506,21 +510,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
506 if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD && 510 if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
507 bss_cfg->dtim_period <= MAX_DTIM_PERIOD) { 511 bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
508 dtim_period = (struct host_cmd_tlv_dtim_period *)tlv; 512 dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
509 dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD); 513 dtim_period->header.type =
510 dtim_period->tlv.len = 514 cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
515 dtim_period->header.len =
511 cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) - 516 cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
512 sizeof(struct host_cmd_tlv)); 517 sizeof(struct mwifiex_ie_types_header));
513 dtim_period->period = bss_cfg->dtim_period; 518 dtim_period->period = bss_cfg->dtim_period;
514 cmd_size += sizeof(struct host_cmd_tlv_dtim_period); 519 cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
515 tlv += sizeof(struct host_cmd_tlv_dtim_period); 520 tlv += sizeof(struct host_cmd_tlv_dtim_period);
516 } 521 }
517 if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) { 522 if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
518 rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv; 523 rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
519 rts_threshold->tlv.type = 524 rts_threshold->header.type =
520 cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD); 525 cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
521 rts_threshold->tlv.len = 526 rts_threshold->header.len =
522 cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) - 527 cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
523 sizeof(struct host_cmd_tlv)); 528 sizeof(struct mwifiex_ie_types_header));
524 rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold); 529 rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
525 cmd_size += sizeof(struct host_cmd_tlv_frag_threshold); 530 cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
526 tlv += sizeof(struct host_cmd_tlv_frag_threshold); 531 tlv += sizeof(struct host_cmd_tlv_frag_threshold);
@@ -528,21 +533,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
528 if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) && 533 if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
529 (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) { 534 (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
530 frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv; 535 frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
531 frag_threshold->tlv.type = 536 frag_threshold->header.type =
532 cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD); 537 cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
533 frag_threshold->tlv.len = 538 frag_threshold->header.len =
534 cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) - 539 cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
535 sizeof(struct host_cmd_tlv)); 540 sizeof(struct mwifiex_ie_types_header));
536 frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold); 541 frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
537 cmd_size += sizeof(struct host_cmd_tlv_frag_threshold); 542 cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
538 tlv += sizeof(struct host_cmd_tlv_frag_threshold); 543 tlv += sizeof(struct host_cmd_tlv_frag_threshold);
539 } 544 }
540 if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) { 545 if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
541 retry_limit = (struct host_cmd_tlv_retry_limit *)tlv; 546 retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
542 retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT); 547 retry_limit->header.type =
543 retry_limit->tlv.len = 548 cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
549 retry_limit->header.len =
544 cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) - 550 cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
545 sizeof(struct host_cmd_tlv)); 551 sizeof(struct mwifiex_ie_types_header));
546 retry_limit->limit = (u8)bss_cfg->retry_limit; 552 retry_limit->limit = (u8)bss_cfg->retry_limit;
547 cmd_size += sizeof(struct host_cmd_tlv_retry_limit); 553 cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
548 tlv += sizeof(struct host_cmd_tlv_retry_limit); 554 tlv += sizeof(struct host_cmd_tlv_retry_limit);
@@ -557,21 +563,21 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
557 if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) || 563 if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
558 (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) { 564 (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
559 auth_type = (struct host_cmd_tlv_auth_type *)tlv; 565 auth_type = (struct host_cmd_tlv_auth_type *)tlv;
560 auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); 566 auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
561 auth_type->tlv.len = 567 auth_type->header.len =
562 cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) - 568 cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
563 sizeof(struct host_cmd_tlv)); 569 sizeof(struct mwifiex_ie_types_header));
564 auth_type->auth_type = (u8)bss_cfg->auth_mode; 570 auth_type->auth_type = (u8)bss_cfg->auth_mode;
565 cmd_size += sizeof(struct host_cmd_tlv_auth_type); 571 cmd_size += sizeof(struct host_cmd_tlv_auth_type);
566 tlv += sizeof(struct host_cmd_tlv_auth_type); 572 tlv += sizeof(struct host_cmd_tlv_auth_type);
567 } 573 }
568 if (bss_cfg->protocol) { 574 if (bss_cfg->protocol) {
569 encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv; 575 encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
570 encrypt_protocol->tlv.type = 576 encrypt_protocol->header.type =
571 cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL); 577 cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
572 encrypt_protocol->tlv.len = 578 encrypt_protocol->header.len =
573 cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol) 579 cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
574 - sizeof(struct host_cmd_tlv)); 580 - sizeof(struct mwifiex_ie_types_header));
575 encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol); 581 encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
576 cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol); 582 cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
577 tlv += sizeof(struct host_cmd_tlv_encrypt_protocol); 583 tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
@@ -608,9 +614,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
608 614
609 if (bss_cfg->sta_ao_timer) { 615 if (bss_cfg->sta_ao_timer) {
610 ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; 616 ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
611 ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER); 617 ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
612 ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) - 618 ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
613 sizeof(struct host_cmd_tlv)); 619 sizeof(struct mwifiex_ie_types_header));
614 ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer); 620 ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
615 cmd_size += sizeof(*ao_timer); 621 cmd_size += sizeof(*ao_timer);
616 tlv += sizeof(*ao_timer); 622 tlv += sizeof(*ao_timer);
@@ -618,9 +624,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
618 624
619 if (bss_cfg->ps_sta_ao_timer) { 625 if (bss_cfg->ps_sta_ao_timer) {
620 ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; 626 ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
621 ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER); 627 ps_ao_timer->header.type =
622 ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) - 628 cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
623 sizeof(struct host_cmd_tlv)); 629 ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
630 sizeof(struct mwifiex_ie_types_header));
624 ps_ao_timer->sta_ao_timer = 631 ps_ao_timer->sta_ao_timer =
625 cpu_to_le32(bss_cfg->ps_sta_ao_timer); 632 cpu_to_le32(bss_cfg->ps_sta_ao_timer);
626 cmd_size += sizeof(*ps_ao_timer); 633 cmd_size += sizeof(*ps_ao_timer);
@@ -636,16 +643,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
636static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size) 643static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
637{ 644{
638 struct mwifiex_ie_list *ap_ie = cmd_buf; 645 struct mwifiex_ie_list *ap_ie = cmd_buf;
639 struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv; 646 struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
640 647
641 if (!ap_ie || !ap_ie->len || !ap_ie->ie_list) 648 if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
642 return -1; 649 return -1;
643 650
644 *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv); 651 *ie_size += le16_to_cpu(ap_ie->len) +
652 sizeof(struct mwifiex_ie_types_header);
645 653
646 tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE); 654 tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
647 tlv_ie->len = ap_ie->len; 655 tlv_ie->len = ap_ie->len;
648 tlv += sizeof(struct host_cmd_tlv); 656 tlv += sizeof(struct mwifiex_ie_types_header);
649 657
650 memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len)); 658 memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
651 659
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index a018e42d117e..1cfe5a738c47 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -24,6 +24,69 @@
24#include "11n_aggr.h" 24#include "11n_aggr.h"
25#include "11n_rxreorder.h" 25#include "11n_rxreorder.h"
26 26
27/* This function checks if particular RA list has packets more than low bridge
28 * packet threshold and then deletes packet from this RA list.
29 * Function deletes packets from such RA list and returns true. If no such list
30 * is found, false is returned.
31 */
32static bool
33mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
34 struct list_head *ra_list_head)
35{
36 struct mwifiex_ra_list_tbl *ra_list;
37 struct sk_buff *skb, *tmp;
38 bool pkt_deleted = false;
39 struct mwifiex_txinfo *tx_info;
40 struct mwifiex_adapter *adapter = priv->adapter;
41
42 list_for_each_entry(ra_list, ra_list_head, list) {
43 if (skb_queue_empty(&ra_list->skb_head))
44 continue;
45
46 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
47 tx_info = MWIFIEX_SKB_TXCB(skb);
48 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
49 __skb_unlink(skb, &ra_list->skb_head);
50 mwifiex_write_data_complete(adapter, skb, 0,
51 -1);
52 atomic_dec(&priv->wmm.tx_pkts_queued);
53 pkt_deleted = true;
54 }
55 if ((atomic_read(&adapter->pending_bridged_pkts) <=
56 MWIFIEX_BRIDGED_PKTS_THR_LOW))
57 break;
58 }
59 }
60
61 return pkt_deleted;
62}
63
64/* This function deletes packets from particular RA List. RA list index
65 * from which packets are deleted is preserved so that packets from next RA
66 * list are deleted upon subsequent call thus maintaining fairness.
67 */
68static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
69{
70 unsigned long flags;
71 struct list_head *ra_list;
72 int i;
73
74 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
75
76 for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
77 if (priv->del_list_idx == MAX_NUM_TID)
78 priv->del_list_idx = 0;
79 ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
80 if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list)) {
81 priv->del_list_idx++;
82 break;
83 }
84 }
85
86 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
87}
88
89
27static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, 90static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
28 struct sk_buff *skb) 91 struct sk_buff *skb)
29{ 92{
@@ -40,10 +103,11 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
40 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 103 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
41 104
42 if ((atomic_read(&adapter->pending_bridged_pkts) >= 105 if ((atomic_read(&adapter->pending_bridged_pkts) >=
43 MWIFIEX_BRIDGED_PKTS_THRESHOLD)) { 106 MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
44 dev_err(priv->adapter->dev, 107 dev_err(priv->adapter->dev,
45 "Tx: Bridge packet limit reached. Drop packet!\n"); 108 "Tx: Bridge packet limit reached. Drop packet!\n");
46 kfree_skb(skb); 109 kfree_skb(skb);
110 mwifiex_uap_cleanup_tx_queues(priv);
47 return; 111 return;
48 } 112 }
49 113
@@ -95,10 +159,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
95 atomic_inc(&adapter->tx_pending); 159 atomic_inc(&adapter->tx_pending);
96 atomic_inc(&adapter->pending_bridged_pkts); 160 atomic_inc(&adapter->pending_bridged_pkts);
97 161
98 if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
99 mwifiex_set_trans_start(priv->netdev);
100 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
101 }
102 return; 162 return;
103} 163}
104 164
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index f90fe21e5bfd..2472d4b7f00e 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -24,9 +24,9 @@
24 24
25static const char usbdriver_name[] = "usb8797"; 25static const char usbdriver_name[] = "usb8797";
26 26
27static u8 user_rmmod;
28static struct mwifiex_if_ops usb_ops; 27static struct mwifiex_if_ops usb_ops;
29static struct semaphore add_remove_card_sem; 28static struct semaphore add_remove_card_sem;
29static struct usb_card_rec *usb_card;
30 30
31static struct usb_device_id mwifiex_usb_table[] = { 31static struct usb_device_id mwifiex_usb_table[] = {
32 {USB_DEVICE(USB8797_VID, USB8797_PID_1)}, 32 {USB_DEVICE(USB8797_VID, USB8797_PID_1)},
@@ -350,6 +350,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
350 350
351 card->udev = udev; 351 card->udev = udev;
352 card->intf = intf; 352 card->intf = intf;
353 usb_card = card;
353 354
354 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n", 355 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
355 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, 356 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
@@ -532,7 +533,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
532{ 533{
533 struct usb_card_rec *card = usb_get_intfdata(intf); 534 struct usb_card_rec *card = usb_get_intfdata(intf);
534 struct mwifiex_adapter *adapter; 535 struct mwifiex_adapter *adapter;
535 int i;
536 536
537 if (!card || !card->adapter) { 537 if (!card || !card->adapter) {
538 pr_err("%s: card or card->adapter is NULL\n", __func__); 538 pr_err("%s: card or card->adapter is NULL\n", __func__);
@@ -543,27 +543,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
543 if (!adapter->priv_num) 543 if (!adapter->priv_num)
544 return; 544 return;
545 545
546 /* In case driver is removed when asynchronous FW downloading is
547 * in progress
548 */
549 wait_for_completion(&adapter->fw_load);
550
551 if (user_rmmod) {
552#ifdef CONFIG_PM
553 if (adapter->is_suspended)
554 mwifiex_usb_resume(intf);
555#endif
556 for (i = 0; i < adapter->priv_num; i++)
557 if ((GET_BSS_ROLE(adapter->priv[i]) ==
558 MWIFIEX_BSS_ROLE_STA) &&
559 adapter->priv[i]->media_connected)
560 mwifiex_deauthenticate(adapter->priv[i], NULL);
561
562 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
563 MWIFIEX_BSS_ROLE_ANY),
564 MWIFIEX_FUNC_SHUTDOWN);
565 }
566
567 mwifiex_usb_free(card); 546 mwifiex_usb_free(card);
568 547
569 dev_dbg(adapter->dev, "%s: removing card\n", __func__); 548 dev_dbg(adapter->dev, "%s: removing card\n", __func__);
@@ -786,6 +765,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
786 return 0; 765 return 0;
787} 766}
788 767
768static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
769{
770 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
771
772 usb_set_intfdata(card->intf, NULL);
773}
774
789static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, 775static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
790 struct mwifiex_fw_image *fw) 776 struct mwifiex_fw_image *fw)
791{ 777{
@@ -978,6 +964,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
978 964
979static struct mwifiex_if_ops usb_ops = { 965static struct mwifiex_if_ops usb_ops = {
980 .register_dev = mwifiex_register_dev, 966 .register_dev = mwifiex_register_dev,
967 .unregister_dev = mwifiex_unregister_dev,
981 .wakeup = mwifiex_pm_wakeup_card, 968 .wakeup = mwifiex_pm_wakeup_card,
982 .wakeup_complete = mwifiex_pm_wakeup_card_complete, 969 .wakeup_complete = mwifiex_pm_wakeup_card_complete,
983 970
@@ -1024,8 +1011,29 @@ static void mwifiex_usb_cleanup_module(void)
1024 if (!down_interruptible(&add_remove_card_sem)) 1011 if (!down_interruptible(&add_remove_card_sem))
1025 up(&add_remove_card_sem); 1012 up(&add_remove_card_sem);
1026 1013
1027 /* set the flag as user is removing this module */ 1014 if (usb_card) {
1028 user_rmmod = 1; 1015 struct mwifiex_adapter *adapter = usb_card->adapter;
1016 int i;
1017
1018 /* In case driver is removed when asynchronous FW downloading is
1019 * in progress
1020 */
1021 wait_for_completion(&adapter->fw_load);
1022
1023#ifdef CONFIG_PM
1024 if (adapter->is_suspended)
1025 mwifiex_usb_resume(usb_card->intf);
1026#endif
1027 for (i = 0; i < adapter->priv_num; i++)
1028 if ((GET_BSS_ROLE(adapter->priv[i]) ==
1029 MWIFIEX_BSS_ROLE_STA) &&
1030 adapter->priv[i]->media_connected)
1031 mwifiex_deauthenticate(adapter->priv[i], NULL);
1032
1033 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
1034 MWIFIEX_BSS_ROLE_ANY),
1035 MWIFIEX_FUNC_SHUTDOWN);
1036 }
1029 1037
1030 usb_deregister(&mwifiex_usb_driver); 1038 usb_deregister(&mwifiex_usb_driver);
1031} 1039}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index e57ac0dd3ab5..5d9e150f4111 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -171,8 +171,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
171 rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); 171 rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
172 172
173 cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq, 173 cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
174 CAL_RSSI(rx_pd->snr, rx_pd->nf), 174 CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
175 skb->data, pkt_len, GFP_ATOMIC); 175 0, GFP_ATOMIC);
176 176
177 return 0; 177 return 0;
178} 178}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 944e8846f6fc..2e8f9cdea54d 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -120,7 +120,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
120 120
121 memcpy(ra_list->ra, ra, ETH_ALEN); 121 memcpy(ra_list->ra, ra, ETH_ALEN);
122 122
123 ra_list->total_pkts_size = 0; 123 ra_list->total_pkt_count = 0;
124 124
125 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list); 125 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
126 126
@@ -188,7 +188,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
188 ra_list, ra_list->is_11n_enabled); 188 ra_list, ra_list->is_11n_enabled);
189 189
190 if (ra_list->is_11n_enabled) { 190 if (ra_list->is_11n_enabled) {
191 ra_list->pkt_count = 0; 191 ra_list->ba_pkt_count = 0;
192 ra_list->ba_packet_thr = 192 ra_list->ba_packet_thr =
193 mwifiex_get_random_ba_threshold(); 193 mwifiex_get_random_ba_threshold();
194 } 194 }
@@ -679,8 +679,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
679 679
680 skb_queue_tail(&ra_list->skb_head, skb); 680 skb_queue_tail(&ra_list->skb_head, skb);
681 681
682 ra_list->total_pkts_size += skb->len; 682 ra_list->ba_pkt_count++;
683 ra_list->pkt_count++; 683 ra_list->total_pkt_count++;
684 684
685 if (atomic_read(&priv->wmm.highest_queued_prio) < 685 if (atomic_read(&priv->wmm.highest_queued_prio) <
686 tos_to_tid_inv[tid_down]) 686 tos_to_tid_inv[tid_down])
@@ -1037,7 +1037,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1037 tx_info = MWIFIEX_SKB_TXCB(skb); 1037 tx_info = MWIFIEX_SKB_TXCB(skb);
1038 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb); 1038 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1039 1039
1040 ptr->total_pkts_size -= skb->len; 1040 ptr->total_pkt_count--;
1041 1041
1042 if (!skb_queue_empty(&ptr->skb_head)) 1042 if (!skb_queue_empty(&ptr->skb_head))
1043 skb_next = skb_peek(&ptr->skb_head); 1043 skb_next = skb_peek(&ptr->skb_head);
@@ -1062,8 +1062,8 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1062 1062
1063 skb_queue_tail(&ptr->skb_head, skb); 1063 skb_queue_tail(&ptr->skb_head, skb);
1064 1064
1065 ptr->total_pkts_size += skb->len; 1065 ptr->total_pkt_count++;
1066 ptr->pkt_count++; 1066 ptr->ba_pkt_count++;
1067 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; 1067 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1068 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1068 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1069 ra_list_flags); 1069 ra_list_flags);
@@ -1224,7 +1224,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1224 mwifiex_send_single_packet() */ 1224 mwifiex_send_single_packet() */
1225 } else { 1225 } else {
1226 if (mwifiex_is_ampdu_allowed(priv, tid) && 1226 if (mwifiex_is_ampdu_allowed(priv, tid) &&
1227 ptr->pkt_count > ptr->ba_packet_thr) { 1227 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1228 if (mwifiex_space_avail_for_new_ba_stream(adapter)) { 1228 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1229 mwifiex_create_ba_tbl(priv, ptr->ra, tid, 1229 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1230 BA_SETUP_INPROGRESS); 1230 BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 3e60a31582f8..68dbbb9c6d12 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -166,6 +166,12 @@ config RT2800USB_RT35XX
166 rt2800usb driver. 166 rt2800usb driver.
167 Supported chips: RT3572 167 Supported chips: RT3572
168 168
169config RT2800USB_RT3573
170 bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)"
171 ---help---
172 This enables support for RT3573 chipset based wireless USB devices
173 in the rt2800usb driver.
174
169config RT2800USB_RT53XX 175config RT2800USB_RT53XX
170 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" 176 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
171 ---help--- 177 ---help---
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index d78c495a86a0..fa33b5edf931 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -88,6 +88,7 @@
88#define REV_RT3071E 0x0211 88#define REV_RT3071E 0x0211
89#define REV_RT3090E 0x0211 89#define REV_RT3090E 0x0211
90#define REV_RT3390E 0x0211 90#define REV_RT3390E 0x0211
91#define REV_RT3593E 0x0211
91#define REV_RT5390F 0x0502 92#define REV_RT5390F 0x0502
92#define REV_RT5390R 0x1502 93#define REV_RT5390R 0x1502
93#define REV_RT5592C 0x0221 94#define REV_RT5592C 0x0221
@@ -1082,6 +1083,15 @@
1082#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000) 1083#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
1083#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000) 1084#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
1084#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000) 1085#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
1086/* bits for 3T devices */
1087#define TX_PWR_CFG_0_CCK1_CH0 FIELD32(0x0000000f)
1088#define TX_PWR_CFG_0_CCK1_CH1 FIELD32(0x000000f0)
1089#define TX_PWR_CFG_0_CCK5_CH0 FIELD32(0x00000f00)
1090#define TX_PWR_CFG_0_CCK5_CH1 FIELD32(0x0000f000)
1091#define TX_PWR_CFG_0_OFDM6_CH0 FIELD32(0x000f0000)
1092#define TX_PWR_CFG_0_OFDM6_CH1 FIELD32(0x00f00000)
1093#define TX_PWR_CFG_0_OFDM12_CH0 FIELD32(0x0f000000)
1094#define TX_PWR_CFG_0_OFDM12_CH1 FIELD32(0xf0000000)
1085 1095
1086/* 1096/*
1087 * TX_PWR_CFG_1: 1097 * TX_PWR_CFG_1:
@@ -1095,6 +1105,15 @@
1095#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000) 1105#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
1096#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000) 1106#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
1097#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000) 1107#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
1108/* bits for 3T devices */
1109#define TX_PWR_CFG_1_OFDM24_CH0 FIELD32(0x0000000f)
1110#define TX_PWR_CFG_1_OFDM24_CH1 FIELD32(0x000000f0)
1111#define TX_PWR_CFG_1_OFDM48_CH0 FIELD32(0x00000f00)
1112#define TX_PWR_CFG_1_OFDM48_CH1 FIELD32(0x0000f000)
1113#define TX_PWR_CFG_1_MCS0_CH0 FIELD32(0x000f0000)
1114#define TX_PWR_CFG_1_MCS0_CH1 FIELD32(0x00f00000)
1115#define TX_PWR_CFG_1_MCS2_CH0 FIELD32(0x0f000000)
1116#define TX_PWR_CFG_1_MCS2_CH1 FIELD32(0xf0000000)
1098 1117
1099/* 1118/*
1100 * TX_PWR_CFG_2: 1119 * TX_PWR_CFG_2:
@@ -1108,6 +1127,15 @@
1108#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000) 1127#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
1109#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000) 1128#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
1110#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000) 1129#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
1130/* bits for 3T devices */
1131#define TX_PWR_CFG_2_MCS4_CH0 FIELD32(0x0000000f)
1132#define TX_PWR_CFG_2_MCS4_CH1 FIELD32(0x000000f0)
1133#define TX_PWR_CFG_2_MCS6_CH0 FIELD32(0x00000f00)
1134#define TX_PWR_CFG_2_MCS6_CH1 FIELD32(0x0000f000)
1135#define TX_PWR_CFG_2_MCS8_CH0 FIELD32(0x000f0000)
1136#define TX_PWR_CFG_2_MCS8_CH1 FIELD32(0x00f00000)
1137#define TX_PWR_CFG_2_MCS10_CH0 FIELD32(0x0f000000)
1138#define TX_PWR_CFG_2_MCS10_CH1 FIELD32(0xf0000000)
1111 1139
1112/* 1140/*
1113 * TX_PWR_CFG_3: 1141 * TX_PWR_CFG_3:
@@ -1121,6 +1149,15 @@
1121#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000) 1149#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
1122#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000) 1150#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
1123#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000) 1151#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
1152/* bits for 3T devices */
1153#define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f)
1154#define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0)
1155#define TX_PWR_CFG_3_MCS14_CH0 FIELD32(0x00000f00)
1156#define TX_PWR_CFG_3_MCS14_CH1 FIELD32(0x0000f000)
1157#define TX_PWR_CFG_3_STBC0_CH0 FIELD32(0x000f0000)
1158#define TX_PWR_CFG_3_STBC0_CH1 FIELD32(0x00f00000)
1159#define TX_PWR_CFG_3_STBC2_CH0 FIELD32(0x0f000000)
1160#define TX_PWR_CFG_3_STBC2_CH1 FIELD32(0xf0000000)
1124 1161
1125/* 1162/*
1126 * TX_PWR_CFG_4: 1163 * TX_PWR_CFG_4:
@@ -1130,6 +1167,11 @@
1130#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0) 1167#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
1131#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00) 1168#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
1132#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000) 1169#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
1170/* bits for 3T devices */
1171#define TX_PWR_CFG_3_STBC4_CH0 FIELD32(0x0000000f)
1172#define TX_PWR_CFG_3_STBC4_CH1 FIELD32(0x000000f0)
1173#define TX_PWR_CFG_3_STBC6_CH0 FIELD32(0x00000f00)
1174#define TX_PWR_CFG_3_STBC6_CH1 FIELD32(0x0000f000)
1133 1175
1134/* 1176/*
1135 * TX_PIN_CFG: 1177 * TX_PIN_CFG:
@@ -1451,6 +1493,81 @@
1451 */ 1493 */
1452#define EXP_ACK_TIME 0x1380 1494#define EXP_ACK_TIME 0x1380
1453 1495
1496/* TX_PWR_CFG_5 */
1497#define TX_PWR_CFG_5 0x1384
1498#define TX_PWR_CFG_5_MCS16_CH0 FIELD32(0x0000000f)
1499#define TX_PWR_CFG_5_MCS16_CH1 FIELD32(0x000000f0)
1500#define TX_PWR_CFG_5_MCS16_CH2 FIELD32(0x00000f00)
1501#define TX_PWR_CFG_5_MCS18_CH0 FIELD32(0x000f0000)
1502#define TX_PWR_CFG_5_MCS18_CH1 FIELD32(0x00f00000)
1503#define TX_PWR_CFG_5_MCS18_CH2 FIELD32(0x0f000000)
1504
1505/* TX_PWR_CFG_6 */
1506#define TX_PWR_CFG_6 0x1388
1507#define TX_PWR_CFG_6_MCS20_CH0 FIELD32(0x0000000f)
1508#define TX_PWR_CFG_6_MCS20_CH1 FIELD32(0x000000f0)
1509#define TX_PWR_CFG_6_MCS20_CH2 FIELD32(0x00000f00)
1510#define TX_PWR_CFG_6_MCS22_CH0 FIELD32(0x000f0000)
1511#define TX_PWR_CFG_6_MCS22_CH1 FIELD32(0x00f00000)
1512#define TX_PWR_CFG_6_MCS22_CH2 FIELD32(0x0f000000)
1513
1514/* TX_PWR_CFG_0_EXT */
1515#define TX_PWR_CFG_0_EXT 0x1390
1516#define TX_PWR_CFG_0_EXT_CCK1_CH2 FIELD32(0x0000000f)
1517#define TX_PWR_CFG_0_EXT_CCK5_CH2 FIELD32(0x00000f00)
1518#define TX_PWR_CFG_0_EXT_OFDM6_CH2 FIELD32(0x000f0000)
1519#define TX_PWR_CFG_0_EXT_OFDM12_CH2 FIELD32(0x0f000000)
1520
1521/* TX_PWR_CFG_1_EXT */
1522#define TX_PWR_CFG_1_EXT 0x1394
1523#define TX_PWR_CFG_1_EXT_OFDM24_CH2 FIELD32(0x0000000f)
1524#define TX_PWR_CFG_1_EXT_OFDM48_CH2 FIELD32(0x00000f00)
1525#define TX_PWR_CFG_1_EXT_MCS0_CH2 FIELD32(0x000f0000)
1526#define TX_PWR_CFG_1_EXT_MCS2_CH2 FIELD32(0x0f000000)
1527
1528/* TX_PWR_CFG_2_EXT */
1529#define TX_PWR_CFG_2_EXT 0x1398
1530#define TX_PWR_CFG_2_EXT_MCS4_CH2 FIELD32(0x0000000f)
1531#define TX_PWR_CFG_2_EXT_MCS6_CH2 FIELD32(0x00000f00)
1532#define TX_PWR_CFG_2_EXT_MCS8_CH2 FIELD32(0x000f0000)
1533#define TX_PWR_CFG_2_EXT_MCS10_CH2 FIELD32(0x0f000000)
1534
1535/* TX_PWR_CFG_3_EXT */
1536#define TX_PWR_CFG_3_EXT 0x139c
1537#define TX_PWR_CFG_3_EXT_MCS12_CH2 FIELD32(0x0000000f)
1538#define TX_PWR_CFG_3_EXT_MCS14_CH2 FIELD32(0x00000f00)
1539#define TX_PWR_CFG_3_EXT_STBC0_CH2 FIELD32(0x000f0000)
1540#define TX_PWR_CFG_3_EXT_STBC2_CH2 FIELD32(0x0f000000)
1541
1542/* TX_PWR_CFG_4_EXT */
1543#define TX_PWR_CFG_4_EXT 0x13a0
1544#define TX_PWR_CFG_4_EXT_STBC4_CH2 FIELD32(0x0000000f)
1545#define TX_PWR_CFG_4_EXT_STBC6_CH2 FIELD32(0x00000f00)
1546
1547/* TX_PWR_CFG_7 */
1548#define TX_PWR_CFG_7 0x13d4
1549#define TX_PWR_CFG_7_OFDM54_CH0 FIELD32(0x0000000f)
1550#define TX_PWR_CFG_7_OFDM54_CH1 FIELD32(0x000000f0)
1551#define TX_PWR_CFG_7_OFDM54_CH2 FIELD32(0x00000f00)
1552#define TX_PWR_CFG_7_MCS7_CH0 FIELD32(0x000f0000)
1553#define TX_PWR_CFG_7_MCS7_CH1 FIELD32(0x00f00000)
1554#define TX_PWR_CFG_7_MCS7_CH2 FIELD32(0x0f000000)
1555
1556/* TX_PWR_CFG_8 */
1557#define TX_PWR_CFG_8 0x13d8
1558#define TX_PWR_CFG_8_MCS15_CH0 FIELD32(0x0000000f)
1559#define TX_PWR_CFG_8_MCS15_CH1 FIELD32(0x000000f0)
1560#define TX_PWR_CFG_8_MCS15_CH2 FIELD32(0x00000f00)
1561#define TX_PWR_CFG_8_MCS23_CH0 FIELD32(0x000f0000)
1562#define TX_PWR_CFG_8_MCS23_CH1 FIELD32(0x00f00000)
1563#define TX_PWR_CFG_8_MCS23_CH2 FIELD32(0x0f000000)
1564
1565/* TX_PWR_CFG_9 */
1566#define TX_PWR_CFG_9 0x13dc
1567#define TX_PWR_CFG_9_STBC7_CH0 FIELD32(0x0000000f)
1568#define TX_PWR_CFG_9_STBC7_CH1 FIELD32(0x000000f0)
1569#define TX_PWR_CFG_9_STBC7_CH2 FIELD32(0x00000f00)
1570
1454/* 1571/*
1455 * RX_FILTER_CFG: RX configuration register. 1572 * RX_FILTER_CFG: RX configuration register.
1456 */ 1573 */
@@ -1902,11 +2019,13 @@ struct mac_iveiv_entry {
1902#define HW_BEACON_BASE6 0x5dc0 2019#define HW_BEACON_BASE6 0x5dc0
1903#define HW_BEACON_BASE7 0x5bc0 2020#define HW_BEACON_BASE7 0x5bc0
1904 2021
1905#define HW_BEACON_OFFSET(__index) \ 2022#define HW_BEACON_BASE(__index) \
1906 (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \ 2023 (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
1907 (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \ 2024 (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
1908 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200)))) 2025 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
1909 2026
2027#define BEACON_BASE_TO_OFFSET(_base) (((_base) - 0x4000) / 64)
2028
1910/* 2029/*
1911 * BBP registers. 2030 * BBP registers.
1912 * The wordsize of the BBP is 8 bits. 2031 * The wordsize of the BBP is 8 bits.
@@ -1975,6 +2094,10 @@ struct mac_iveiv_entry {
1975#define BBP109_TX0_POWER FIELD8(0x0f) 2094#define BBP109_TX0_POWER FIELD8(0x0f)
1976#define BBP109_TX1_POWER FIELD8(0xf0) 2095#define BBP109_TX1_POWER FIELD8(0xf0)
1977 2096
2097/* BBP 110 */
2098#define BBP110_TX2_POWER FIELD8(0x0f)
2099
2100
1978/* 2101/*
1979 * BBP 138: Unknown 2102 * BBP 138: Unknown
1980 */ 2103 */
@@ -2024,6 +2147,12 @@ struct mac_iveiv_entry {
2024#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80) 2147#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
2025/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */ 2148/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
2026#define RFCSR3_VCOCAL_EN FIELD8(0x80) 2149#define RFCSR3_VCOCAL_EN FIELD8(0x80)
2150/* Bits for RF3050 */
2151#define RFCSR3_BIT1 FIELD8(0x02)
2152#define RFCSR3_BIT2 FIELD8(0x04)
2153#define RFCSR3_BIT3 FIELD8(0x08)
2154#define RFCSR3_BIT4 FIELD8(0x10)
2155#define RFCSR3_BIT5 FIELD8(0x20)
2027 2156
2028/* 2157/*
2029 * FRCSR 5: 2158 * FRCSR 5:
@@ -2036,6 +2165,8 @@ struct mac_iveiv_entry {
2036#define RFCSR6_R1 FIELD8(0x03) 2165#define RFCSR6_R1 FIELD8(0x03)
2037#define RFCSR6_R2 FIELD8(0x40) 2166#define RFCSR6_R2 FIELD8(0x40)
2038#define RFCSR6_TXDIV FIELD8(0x0c) 2167#define RFCSR6_TXDIV FIELD8(0x0c)
2168/* bits for RF3053 */
2169#define RFCSR6_VCO_IC FIELD8(0xc0)
2039 2170
2040/* 2171/*
2041 * RFCSR 7: 2172 * RFCSR 7:
@@ -2060,7 +2191,12 @@ struct mac_iveiv_entry {
2060 * RFCSR 11: 2191 * RFCSR 11:
2061 */ 2192 */
2062#define RFCSR11_R FIELD8(0x03) 2193#define RFCSR11_R FIELD8(0x03)
2194#define RFCSR11_PLL_MOD FIELD8(0x0c)
2063#define RFCSR11_MOD FIELD8(0xc0) 2195#define RFCSR11_MOD FIELD8(0xc0)
2196/* bits for RF3053 */
2197/* TODO: verify RFCSR11_MOD usage on other chips */
2198#define RFCSR11_PLL_IDOH FIELD8(0x40)
2199
2064 2200
2065/* 2201/*
2066 * RFCSR 12: 2202 * RFCSR 12:
@@ -2092,6 +2228,10 @@ struct mac_iveiv_entry {
2092#define RFCSR17_R FIELD8(0x20) 2228#define RFCSR17_R FIELD8(0x20)
2093#define RFCSR17_CODE FIELD8(0x7f) 2229#define RFCSR17_CODE FIELD8(0x7f)
2094 2230
2231/* RFCSR 18 */
2232#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
2233
2234
2095/* 2235/*
2096 * RFCSR 20: 2236 * RFCSR 20:
2097 */ 2237 */
@@ -2152,6 +2292,12 @@ struct mac_iveiv_entry {
2152#define RFCSR31_RX_H20M FIELD8(0x20) 2292#define RFCSR31_RX_H20M FIELD8(0x20)
2153#define RFCSR31_RX_CALIB FIELD8(0x7f) 2293#define RFCSR31_RX_CALIB FIELD8(0x7f)
2154 2294
2295/* RFCSR 32 bits for RF3053 */
2296#define RFCSR32_TX_AGC_FC FIELD8(0xf8)
2297
2298/* RFCSR 36 bits for RF3053 */
2299#define RFCSR36_RF_BS FIELD8(0x80)
2300
2155/* 2301/*
2156 * RFCSR 38: 2302 * RFCSR 38:
2157 */ 2303 */
@@ -2160,6 +2306,7 @@ struct mac_iveiv_entry {
2160/* 2306/*
2161 * RFCSR 39: 2307 * RFCSR 39:
2162 */ 2308 */
2309#define RFCSR39_RX_DIV FIELD8(0x40)
2163#define RFCSR39_RX_LO2_EN FIELD8(0x80) 2310#define RFCSR39_RX_LO2_EN FIELD8(0x80)
2164 2311
2165/* 2312/*
@@ -2167,12 +2314,36 @@ struct mac_iveiv_entry {
2167 */ 2314 */
2168#define RFCSR49_TX FIELD8(0x3f) 2315#define RFCSR49_TX FIELD8(0x3f)
2169#define RFCSR49_EP FIELD8(0xc0) 2316#define RFCSR49_EP FIELD8(0xc0)
2317/* bits for RT3593 */
2318#define RFCSR49_TX_LO1_IC FIELD8(0x1c)
2319#define RFCSR49_TX_DIV FIELD8(0x20)
2170 2320
2171/* 2321/*
2172 * RFCSR 50: 2322 * RFCSR 50:
2173 */ 2323 */
2174#define RFCSR50_TX FIELD8(0x3f) 2324#define RFCSR50_TX FIELD8(0x3f)
2175#define RFCSR50_EP FIELD8(0xc0) 2325#define RFCSR50_EP FIELD8(0xc0)
2326/* bits for RT3593 */
2327#define RFCSR50_TX_LO1_EN FIELD8(0x20)
2328#define RFCSR50_TX_LO2_EN FIELD8(0x10)
2329
2330/* RFCSR 51 */
2331/* bits for RT3593 */
2332#define RFCSR51_BITS01 FIELD8(0x03)
2333#define RFCSR51_BITS24 FIELD8(0x1c)
2334#define RFCSR51_BITS57 FIELD8(0xe0)
2335
2336#define RFCSR53_TX_POWER FIELD8(0x3f)
2337#define RFCSR53_UNKNOWN FIELD8(0xc0)
2338
2339#define RFCSR54_TX_POWER FIELD8(0x3f)
2340#define RFCSR54_UNKNOWN FIELD8(0xc0)
2341
2342#define RFCSR55_TX_POWER FIELD8(0x3f)
2343#define RFCSR55_UNKNOWN FIELD8(0xc0)
2344
2345#define RFCSR57_DRV_CC FIELD8(0xfc)
2346
2176 2347
2177/* 2348/*
2178 * RF registers 2349 * RF registers
@@ -2206,28 +2377,67 @@ struct mac_iveiv_entry {
2206 * The wordsize of the EEPROM is 16 bits. 2377 * The wordsize of the EEPROM is 16 bits.
2207 */ 2378 */
2208 2379
2209/* 2380enum rt2800_eeprom_word {
2210 * Chip ID 2381 EEPROM_CHIP_ID = 0,
2211 */ 2382 EEPROM_VERSION,
2212#define EEPROM_CHIP_ID 0x0000 2383 EEPROM_MAC_ADDR_0,
2384 EEPROM_MAC_ADDR_1,
2385 EEPROM_MAC_ADDR_2,
2386 EEPROM_NIC_CONF0,
2387 EEPROM_NIC_CONF1,
2388 EEPROM_FREQ,
2389 EEPROM_LED_AG_CONF,
2390 EEPROM_LED_ACT_CONF,
2391 EEPROM_LED_POLARITY,
2392 EEPROM_NIC_CONF2,
2393 EEPROM_LNA,
2394 EEPROM_RSSI_BG,
2395 EEPROM_RSSI_BG2,
2396 EEPROM_TXMIXER_GAIN_BG,
2397 EEPROM_RSSI_A,
2398 EEPROM_RSSI_A2,
2399 EEPROM_TXMIXER_GAIN_A,
2400 EEPROM_EIRP_MAX_TX_POWER,
2401 EEPROM_TXPOWER_DELTA,
2402 EEPROM_TXPOWER_BG1,
2403 EEPROM_TXPOWER_BG2,
2404 EEPROM_TSSI_BOUND_BG1,
2405 EEPROM_TSSI_BOUND_BG2,
2406 EEPROM_TSSI_BOUND_BG3,
2407 EEPROM_TSSI_BOUND_BG4,
2408 EEPROM_TSSI_BOUND_BG5,
2409 EEPROM_TXPOWER_A1,
2410 EEPROM_TXPOWER_A2,
2411 EEPROM_TSSI_BOUND_A1,
2412 EEPROM_TSSI_BOUND_A2,
2413 EEPROM_TSSI_BOUND_A3,
2414 EEPROM_TSSI_BOUND_A4,
2415 EEPROM_TSSI_BOUND_A5,
2416 EEPROM_TXPOWER_BYRATE,
2417 EEPROM_BBP_START,
2418
2419 /* IDs for extended EEPROM format used by three-chain devices */
2420 EEPROM_EXT_LNA2,
2421 EEPROM_EXT_TXPOWER_BG3,
2422 EEPROM_EXT_TXPOWER_A3,
2423
2424 /* New values must be added before this */
2425 EEPROM_WORD_COUNT
2426};
2213 2427
2214/* 2428/*
2215 * EEPROM Version 2429 * EEPROM Version
2216 */ 2430 */
2217#define EEPROM_VERSION 0x0001
2218#define EEPROM_VERSION_FAE FIELD16(0x00ff) 2431#define EEPROM_VERSION_FAE FIELD16(0x00ff)
2219#define EEPROM_VERSION_VERSION FIELD16(0xff00) 2432#define EEPROM_VERSION_VERSION FIELD16(0xff00)
2220 2433
2221/* 2434/*
2222 * HW MAC address. 2435 * HW MAC address.
2223 */ 2436 */
2224#define EEPROM_MAC_ADDR_0 0x0002
2225#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) 2437#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
2226#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) 2438#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
2227#define EEPROM_MAC_ADDR_1 0x0003
2228#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) 2439#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
2229#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) 2440#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
2230#define EEPROM_MAC_ADDR_2 0x0004
2231#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) 2441#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
2232#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) 2442#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
2233 2443
@@ -2237,7 +2447,6 @@ struct mac_iveiv_entry {
2237 * TXPATH: 1: 1T, 2: 2T, 3: 3T 2447 * TXPATH: 1: 1T, 2: 2T, 3: 3T
2238 * RF_TYPE: RFIC type 2448 * RF_TYPE: RFIC type
2239 */ 2449 */
2240#define EEPROM_NIC_CONF0 0x001a
2241#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f) 2450#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
2242#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0) 2451#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
2243#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00) 2452#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
@@ -2261,7 +2470,6 @@ struct mac_iveiv_entry {
2261 * BT_COEXIST: 0: disable, 1: enable 2470 * BT_COEXIST: 0: disable, 1: enable
2262 * DAC_TEST: 0: disable, 1: enable 2471 * DAC_TEST: 0: disable, 1: enable
2263 */ 2472 */
2264#define EEPROM_NIC_CONF1 0x001b
2265#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001) 2473#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
2266#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002) 2474#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
2267#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004) 2475#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
@@ -2281,7 +2489,6 @@ struct mac_iveiv_entry {
2281/* 2489/*
2282 * EEPROM frequency 2490 * EEPROM frequency
2283 */ 2491 */
2284#define EEPROM_FREQ 0x001d
2285#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) 2492#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
2286#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00) 2493#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
2287#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000) 2494#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
@@ -2298,9 +2505,6 @@ struct mac_iveiv_entry {
2298 * POLARITY_GPIO_4: Polarity GPIO4 setting. 2505 * POLARITY_GPIO_4: Polarity GPIO4 setting.
2299 * LED_MODE: Led mode. 2506 * LED_MODE: Led mode.
2300 */ 2507 */
2301#define EEPROM_LED_AG_CONF 0x001e
2302#define EEPROM_LED_ACT_CONF 0x001f
2303#define EEPROM_LED_POLARITY 0x0020
2304#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001) 2508#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
2305#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) 2509#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
2306#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) 2510#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
@@ -2317,7 +2521,6 @@ struct mac_iveiv_entry {
2317 * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream 2521 * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
2318 * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved 2522 * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
2319 */ 2523 */
2320#define EEPROM_NIC_CONF2 0x0021
2321#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f) 2524#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
2322#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0) 2525#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
2323#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600) 2526#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
@@ -2325,54 +2528,46 @@ struct mac_iveiv_entry {
2325/* 2528/*
2326 * EEPROM LNA 2529 * EEPROM LNA
2327 */ 2530 */
2328#define EEPROM_LNA 0x0022
2329#define EEPROM_LNA_BG FIELD16(0x00ff) 2531#define EEPROM_LNA_BG FIELD16(0x00ff)
2330#define EEPROM_LNA_A0 FIELD16(0xff00) 2532#define EEPROM_LNA_A0 FIELD16(0xff00)
2331 2533
2332/* 2534/*
2333 * EEPROM RSSI BG offset 2535 * EEPROM RSSI BG offset
2334 */ 2536 */
2335#define EEPROM_RSSI_BG 0x0023
2336#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff) 2537#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
2337#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00) 2538#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
2338 2539
2339/* 2540/*
2340 * EEPROM RSSI BG2 offset 2541 * EEPROM RSSI BG2 offset
2341 */ 2542 */
2342#define EEPROM_RSSI_BG2 0x0024
2343#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff) 2543#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
2344#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00) 2544#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
2345 2545
2346/* 2546/*
2347 * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2). 2547 * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
2348 */ 2548 */
2349#define EEPROM_TXMIXER_GAIN_BG 0x0024
2350#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007) 2549#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
2351 2550
2352/* 2551/*
2353 * EEPROM RSSI A offset 2552 * EEPROM RSSI A offset
2354 */ 2553 */
2355#define EEPROM_RSSI_A 0x0025
2356#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff) 2554#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
2357#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00) 2555#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
2358 2556
2359/* 2557/*
2360 * EEPROM RSSI A2 offset 2558 * EEPROM RSSI A2 offset
2361 */ 2559 */
2362#define EEPROM_RSSI_A2 0x0026
2363#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff) 2560#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
2364#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) 2561#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
2365 2562
2366/* 2563/*
2367 * EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2). 2564 * EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2).
2368 */ 2565 */
2369#define EEPROM_TXMIXER_GAIN_A 0x0026
2370#define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007) 2566#define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007)
2371 2567
2372/* 2568/*
2373 * EEPROM EIRP Maximum TX power values(unit: dbm) 2569 * EEPROM EIRP Maximum TX power values(unit: dbm)
2374 */ 2570 */
2375#define EEPROM_EIRP_MAX_TX_POWER 0x0027
2376#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff) 2571#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
2377#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00) 2572#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
2378 2573
@@ -2383,7 +2578,6 @@ struct mac_iveiv_entry {
2383 * TYPE: 1: Plus the delta value, 0: minus the delta value 2578 * TYPE: 1: Plus the delta value, 0: minus the delta value
2384 * ENABLE: enable tx power compensation for 40BW 2579 * ENABLE: enable tx power compensation for 40BW
2385 */ 2580 */
2386#define EEPROM_TXPOWER_DELTA 0x0028
2387#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f) 2581#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
2388#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040) 2582#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
2389#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080) 2583#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
@@ -2394,8 +2588,6 @@ struct mac_iveiv_entry {
2394/* 2588/*
2395 * EEPROM TXPOWER 802.11BG 2589 * EEPROM TXPOWER 802.11BG
2396 */ 2590 */
2397#define EEPROM_TXPOWER_BG1 0x0029
2398#define EEPROM_TXPOWER_BG2 0x0030
2399#define EEPROM_TXPOWER_BG_SIZE 7 2591#define EEPROM_TXPOWER_BG_SIZE 7
2400#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff) 2592#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
2401#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00) 2593#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
@@ -2407,7 +2599,6 @@ struct mac_iveiv_entry {
2407 * MINUS3: If the actual TSSI is below this boundary, tx power needs to be 2599 * MINUS3: If the actual TSSI is below this boundary, tx power needs to be
2408 * reduced by (agc_step * -3) 2600 * reduced by (agc_step * -3)
2409 */ 2601 */
2410#define EEPROM_TSSI_BOUND_BG1 0x0037
2411#define EEPROM_TSSI_BOUND_BG1_MINUS4 FIELD16(0x00ff) 2602#define EEPROM_TSSI_BOUND_BG1_MINUS4 FIELD16(0x00ff)
2412#define EEPROM_TSSI_BOUND_BG1_MINUS3 FIELD16(0xff00) 2603#define EEPROM_TSSI_BOUND_BG1_MINUS3 FIELD16(0xff00)
2413 2604
@@ -2418,7 +2609,6 @@ struct mac_iveiv_entry {
2418 * MINUS1: If the actual TSSI is below this boundary, tx power needs to be 2609 * MINUS1: If the actual TSSI is below this boundary, tx power needs to be
2419 * reduced by (agc_step * -1) 2610 * reduced by (agc_step * -1)
2420 */ 2611 */
2421#define EEPROM_TSSI_BOUND_BG2 0x0038
2422#define EEPROM_TSSI_BOUND_BG2_MINUS2 FIELD16(0x00ff) 2612#define EEPROM_TSSI_BOUND_BG2_MINUS2 FIELD16(0x00ff)
2423#define EEPROM_TSSI_BOUND_BG2_MINUS1 FIELD16(0xff00) 2613#define EEPROM_TSSI_BOUND_BG2_MINUS1 FIELD16(0xff00)
2424 2614
@@ -2428,7 +2618,6 @@ struct mac_iveiv_entry {
2428 * PLUS1: If the actual TSSI is above this boundary, tx power needs to be 2618 * PLUS1: If the actual TSSI is above this boundary, tx power needs to be
2429 * increased by (agc_step * 1) 2619 * increased by (agc_step * 1)
2430 */ 2620 */
2431#define EEPROM_TSSI_BOUND_BG3 0x0039
2432#define EEPROM_TSSI_BOUND_BG3_REF FIELD16(0x00ff) 2621#define EEPROM_TSSI_BOUND_BG3_REF FIELD16(0x00ff)
2433#define EEPROM_TSSI_BOUND_BG3_PLUS1 FIELD16(0xff00) 2622#define EEPROM_TSSI_BOUND_BG3_PLUS1 FIELD16(0xff00)
2434 2623
@@ -2439,7 +2628,6 @@ struct mac_iveiv_entry {
2439 * PLUS3: If the actual TSSI is above this boundary, tx power needs to be 2628 * PLUS3: If the actual TSSI is above this boundary, tx power needs to be
2440 * increased by (agc_step * 3) 2629 * increased by (agc_step * 3)
2441 */ 2630 */
2442#define EEPROM_TSSI_BOUND_BG4 0x003a
2443#define EEPROM_TSSI_BOUND_BG4_PLUS2 FIELD16(0x00ff) 2631#define EEPROM_TSSI_BOUND_BG4_PLUS2 FIELD16(0x00ff)
2444#define EEPROM_TSSI_BOUND_BG4_PLUS3 FIELD16(0xff00) 2632#define EEPROM_TSSI_BOUND_BG4_PLUS3 FIELD16(0xff00)
2445 2633
@@ -2449,19 +2637,20 @@ struct mac_iveiv_entry {
2449 * increased by (agc_step * 4) 2637 * increased by (agc_step * 4)
2450 * AGC_STEP: Temperature compensation step. 2638 * AGC_STEP: Temperature compensation step.
2451 */ 2639 */
2452#define EEPROM_TSSI_BOUND_BG5 0x003b
2453#define EEPROM_TSSI_BOUND_BG5_PLUS4 FIELD16(0x00ff) 2640#define EEPROM_TSSI_BOUND_BG5_PLUS4 FIELD16(0x00ff)
2454#define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00) 2641#define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00)
2455 2642
2456/* 2643/*
2457 * EEPROM TXPOWER 802.11A 2644 * EEPROM TXPOWER 802.11A
2458 */ 2645 */
2459#define EEPROM_TXPOWER_A1 0x003c
2460#define EEPROM_TXPOWER_A2 0x0053
2461#define EEPROM_TXPOWER_A_SIZE 6 2646#define EEPROM_TXPOWER_A_SIZE 6
2462#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) 2647#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
2463#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) 2648#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
2464 2649
2650/* EEPROM_TXPOWER_{A,G} fields for RT3593 */
2651#define EEPROM_TXPOWER_ALC FIELD8(0x1f)
2652#define EEPROM_TXPOWER_FINE_CTRL FIELD8(0xe0)
2653
2465/* 2654/*
2466 * EEPROM temperature compensation boundaries 802.11A 2655 * EEPROM temperature compensation boundaries 802.11A
2467 * MINUS4: If the actual TSSI is below this boundary, tx power needs to be 2656 * MINUS4: If the actual TSSI is below this boundary, tx power needs to be
@@ -2469,7 +2658,6 @@ struct mac_iveiv_entry {
2469 * MINUS3: If the actual TSSI is below this boundary, tx power needs to be 2658 * MINUS3: If the actual TSSI is below this boundary, tx power needs to be
2470 * reduced by (agc_step * -3) 2659 * reduced by (agc_step * -3)
2471 */ 2660 */
2472#define EEPROM_TSSI_BOUND_A1 0x006a
2473#define EEPROM_TSSI_BOUND_A1_MINUS4 FIELD16(0x00ff) 2661#define EEPROM_TSSI_BOUND_A1_MINUS4 FIELD16(0x00ff)
2474#define EEPROM_TSSI_BOUND_A1_MINUS3 FIELD16(0xff00) 2662#define EEPROM_TSSI_BOUND_A1_MINUS3 FIELD16(0xff00)
2475 2663
@@ -2480,7 +2668,6 @@ struct mac_iveiv_entry {
2480 * MINUS1: If the actual TSSI is below this boundary, tx power needs to be 2668 * MINUS1: If the actual TSSI is below this boundary, tx power needs to be
2481 * reduced by (agc_step * -1) 2669 * reduced by (agc_step * -1)
2482 */ 2670 */
2483#define EEPROM_TSSI_BOUND_A2 0x006b
2484#define EEPROM_TSSI_BOUND_A2_MINUS2 FIELD16(0x00ff) 2671#define EEPROM_TSSI_BOUND_A2_MINUS2 FIELD16(0x00ff)
2485#define EEPROM_TSSI_BOUND_A2_MINUS1 FIELD16(0xff00) 2672#define EEPROM_TSSI_BOUND_A2_MINUS1 FIELD16(0xff00)
2486 2673
@@ -2490,7 +2677,6 @@ struct mac_iveiv_entry {
2490 * PLUS1: If the actual TSSI is above this boundary, tx power needs to be 2677 * PLUS1: If the actual TSSI is above this boundary, tx power needs to be
2491 * increased by (agc_step * 1) 2678 * increased by (agc_step * 1)
2492 */ 2679 */
2493#define EEPROM_TSSI_BOUND_A3 0x006c
2494#define EEPROM_TSSI_BOUND_A3_REF FIELD16(0x00ff) 2680#define EEPROM_TSSI_BOUND_A3_REF FIELD16(0x00ff)
2495#define EEPROM_TSSI_BOUND_A3_PLUS1 FIELD16(0xff00) 2681#define EEPROM_TSSI_BOUND_A3_PLUS1 FIELD16(0xff00)
2496 2682
@@ -2501,7 +2687,6 @@ struct mac_iveiv_entry {
2501 * PLUS3: If the actual TSSI is above this boundary, tx power needs to be 2687 * PLUS3: If the actual TSSI is above this boundary, tx power needs to be
2502 * increased by (agc_step * 3) 2688 * increased by (agc_step * 3)
2503 */ 2689 */
2504#define EEPROM_TSSI_BOUND_A4 0x006d
2505#define EEPROM_TSSI_BOUND_A4_PLUS2 FIELD16(0x00ff) 2690#define EEPROM_TSSI_BOUND_A4_PLUS2 FIELD16(0x00ff)
2506#define EEPROM_TSSI_BOUND_A4_PLUS3 FIELD16(0xff00) 2691#define EEPROM_TSSI_BOUND_A4_PLUS3 FIELD16(0xff00)
2507 2692
@@ -2511,14 +2696,12 @@ struct mac_iveiv_entry {
2511 * increased by (agc_step * 4) 2696 * increased by (agc_step * 4)
2512 * AGC_STEP: Temperature compensation step. 2697 * AGC_STEP: Temperature compensation step.
2513 */ 2698 */
2514#define EEPROM_TSSI_BOUND_A5 0x006e
2515#define EEPROM_TSSI_BOUND_A5_PLUS4 FIELD16(0x00ff) 2699#define EEPROM_TSSI_BOUND_A5_PLUS4 FIELD16(0x00ff)
2516#define EEPROM_TSSI_BOUND_A5_AGC_STEP FIELD16(0xff00) 2700#define EEPROM_TSSI_BOUND_A5_AGC_STEP FIELD16(0xff00)
2517 2701
2518/* 2702/*
2519 * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode 2703 * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode
2520 */ 2704 */
2521#define EEPROM_TXPOWER_BYRATE 0x006f
2522#define EEPROM_TXPOWER_BYRATE_SIZE 9 2705#define EEPROM_TXPOWER_BYRATE_SIZE 9
2523 2706
2524#define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f) 2707#define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f)
@@ -2529,11 +2712,14 @@ struct mac_iveiv_entry {
2529/* 2712/*
2530 * EEPROM BBP. 2713 * EEPROM BBP.
2531 */ 2714 */
2532#define EEPROM_BBP_START 0x0078
2533#define EEPROM_BBP_SIZE 16 2715#define EEPROM_BBP_SIZE 16
2534#define EEPROM_BBP_VALUE FIELD16(0x00ff) 2716#define EEPROM_BBP_VALUE FIELD16(0x00ff)
2535#define EEPROM_BBP_REG_ID FIELD16(0xff00) 2717#define EEPROM_BBP_REG_ID FIELD16(0xff00)
2536 2718
2719/* EEPROM_EXT_LNA2 */
2720#define EEPROM_EXT_LNA2_A1 FIELD16(0x00ff)
2721#define EEPROM_EXT_LNA2_A2 FIELD16(0xff00)
2722
2537/* 2723/*
2538 * EEPROM IQ Calibration, unlike other entries those are byte addresses. 2724 * EEPROM IQ Calibration, unlike other entries those are byte addresses.
2539 */ 2725 */
@@ -2610,6 +2796,7 @@ struct mac_iveiv_entry {
2610#define MCU_RADAR 0x60 2796#define MCU_RADAR 0x60
2611#define MCU_BOOT_SIGNAL 0x72 2797#define MCU_BOOT_SIGNAL 0x72
2612#define MCU_ANT_SELECT 0X73 2798#define MCU_ANT_SELECT 0X73
2799#define MCU_FREQ_OFFSET 0x74
2613#define MCU_BBP_SIGNAL 0x80 2800#define MCU_BBP_SIGNAL 0x80
2614#define MCU_POWER_SAVE 0x83 2801#define MCU_POWER_SAVE 0x83
2615#define MCU_BAND_SELECT 0x91 2802#define MCU_BAND_SELECT 0x91
@@ -2630,6 +2817,7 @@ struct mac_iveiv_entry {
2630#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32)) 2817#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
2631 2818
2632#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32)) 2819#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
2820#define RXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
2633#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32)) 2821#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32))
2634 2822
2635/* 2823/*
@@ -2750,18 +2938,15 @@ struct mac_iveiv_entry {
2750#define MAX_A_TXPOWER 15 2938#define MAX_A_TXPOWER 15
2751#define DEFAULT_TXPOWER 5 2939#define DEFAULT_TXPOWER 5
2752 2940
2941#define MIN_A_TXPOWER_3593 0
2942#define MAX_A_TXPOWER_3593 31
2943
2753#define TXPOWER_G_FROM_DEV(__txpower) \ 2944#define TXPOWER_G_FROM_DEV(__txpower) \
2754 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) 2945 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
2755 2946
2756#define TXPOWER_G_TO_DEV(__txpower) \
2757 clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
2758
2759#define TXPOWER_A_FROM_DEV(__txpower) \ 2947#define TXPOWER_A_FROM_DEV(__txpower) \
2760 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) 2948 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
2761 2949
2762#define TXPOWER_A_TO_DEV(__txpower) \
2763 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
2764
2765/* 2950/*
2766 * Board's maximun TX power limitation 2951 * Board's maximun TX power limitation
2767 */ 2952 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1b41c8eda12d..95e6e61c3de0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,157 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
221 mutex_unlock(&rt2x00dev->csr_mutex); 221 mutex_unlock(&rt2x00dev->csr_mutex);
222} 222}
223 223
224static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
225 [EEPROM_CHIP_ID] = 0x0000,
226 [EEPROM_VERSION] = 0x0001,
227 [EEPROM_MAC_ADDR_0] = 0x0002,
228 [EEPROM_MAC_ADDR_1] = 0x0003,
229 [EEPROM_MAC_ADDR_2] = 0x0004,
230 [EEPROM_NIC_CONF0] = 0x001a,
231 [EEPROM_NIC_CONF1] = 0x001b,
232 [EEPROM_FREQ] = 0x001d,
233 [EEPROM_LED_AG_CONF] = 0x001e,
234 [EEPROM_LED_ACT_CONF] = 0x001f,
235 [EEPROM_LED_POLARITY] = 0x0020,
236 [EEPROM_NIC_CONF2] = 0x0021,
237 [EEPROM_LNA] = 0x0022,
238 [EEPROM_RSSI_BG] = 0x0023,
239 [EEPROM_RSSI_BG2] = 0x0024,
240 [EEPROM_TXMIXER_GAIN_BG] = 0x0024, /* overlaps with RSSI_BG2 */
241 [EEPROM_RSSI_A] = 0x0025,
242 [EEPROM_RSSI_A2] = 0x0026,
243 [EEPROM_TXMIXER_GAIN_A] = 0x0026, /* overlaps with RSSI_A2 */
244 [EEPROM_EIRP_MAX_TX_POWER] = 0x0027,
245 [EEPROM_TXPOWER_DELTA] = 0x0028,
246 [EEPROM_TXPOWER_BG1] = 0x0029,
247 [EEPROM_TXPOWER_BG2] = 0x0030,
248 [EEPROM_TSSI_BOUND_BG1] = 0x0037,
249 [EEPROM_TSSI_BOUND_BG2] = 0x0038,
250 [EEPROM_TSSI_BOUND_BG3] = 0x0039,
251 [EEPROM_TSSI_BOUND_BG4] = 0x003a,
252 [EEPROM_TSSI_BOUND_BG5] = 0x003b,
253 [EEPROM_TXPOWER_A1] = 0x003c,
254 [EEPROM_TXPOWER_A2] = 0x0053,
255 [EEPROM_TSSI_BOUND_A1] = 0x006a,
256 [EEPROM_TSSI_BOUND_A2] = 0x006b,
257 [EEPROM_TSSI_BOUND_A3] = 0x006c,
258 [EEPROM_TSSI_BOUND_A4] = 0x006d,
259 [EEPROM_TSSI_BOUND_A5] = 0x006e,
260 [EEPROM_TXPOWER_BYRATE] = 0x006f,
261 [EEPROM_BBP_START] = 0x0078,
262};
263
264static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
265 [EEPROM_CHIP_ID] = 0x0000,
266 [EEPROM_VERSION] = 0x0001,
267 [EEPROM_MAC_ADDR_0] = 0x0002,
268 [EEPROM_MAC_ADDR_1] = 0x0003,
269 [EEPROM_MAC_ADDR_2] = 0x0004,
270 [EEPROM_NIC_CONF0] = 0x001a,
271 [EEPROM_NIC_CONF1] = 0x001b,
272 [EEPROM_NIC_CONF2] = 0x001c,
273 [EEPROM_EIRP_MAX_TX_POWER] = 0x0020,
274 [EEPROM_FREQ] = 0x0022,
275 [EEPROM_LED_AG_CONF] = 0x0023,
276 [EEPROM_LED_ACT_CONF] = 0x0024,
277 [EEPROM_LED_POLARITY] = 0x0025,
278 [EEPROM_LNA] = 0x0026,
279 [EEPROM_EXT_LNA2] = 0x0027,
280 [EEPROM_RSSI_BG] = 0x0028,
281 [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
282 [EEPROM_RSSI_BG2] = 0x0029,
283 [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
284 [EEPROM_RSSI_A] = 0x002a,
285 [EEPROM_RSSI_A2] = 0x002b,
286 [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
287 [EEPROM_TXPOWER_BG1] = 0x0030,
288 [EEPROM_TXPOWER_BG2] = 0x0037,
289 [EEPROM_EXT_TXPOWER_BG3] = 0x003e,
290 [EEPROM_TSSI_BOUND_BG1] = 0x0045,
291 [EEPROM_TSSI_BOUND_BG2] = 0x0046,
292 [EEPROM_TSSI_BOUND_BG3] = 0x0047,
293 [EEPROM_TSSI_BOUND_BG4] = 0x0048,
294 [EEPROM_TSSI_BOUND_BG5] = 0x0049,
295 [EEPROM_TXPOWER_A1] = 0x004b,
296 [EEPROM_TXPOWER_A2] = 0x0065,
297 [EEPROM_EXT_TXPOWER_A3] = 0x007f,
298 [EEPROM_TSSI_BOUND_A1] = 0x009a,
299 [EEPROM_TSSI_BOUND_A2] = 0x009b,
300 [EEPROM_TSSI_BOUND_A3] = 0x009c,
301 [EEPROM_TSSI_BOUND_A4] = 0x009d,
302 [EEPROM_TSSI_BOUND_A5] = 0x009e,
303 [EEPROM_TXPOWER_BYRATE] = 0x00a0,
304};
305
306static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
307 const enum rt2800_eeprom_word word)
308{
309 const unsigned int *map;
310 unsigned int index;
311
312 if (WARN_ONCE(word >= EEPROM_WORD_COUNT,
313 "%s: invalid EEPROM word %d\n",
314 wiphy_name(rt2x00dev->hw->wiphy), word))
315 return 0;
316
317 if (rt2x00_rt(rt2x00dev, RT3593))
318 map = rt2800_eeprom_map_ext;
319 else
320 map = rt2800_eeprom_map;
321
322 index = map[word];
323
324 /* Index 0 is valid only for EEPROM_CHIP_ID.
325 * Otherwise it means that the offset of the
326 * given word is not initialized in the map,
327 * or that the field is not usable on the
328 * actual chipset.
329 */
330 WARN_ONCE(word != EEPROM_CHIP_ID && index == 0,
331 "%s: invalid access of EEPROM word %d\n",
332 wiphy_name(rt2x00dev->hw->wiphy), word);
333
334 return index;
335}
336
337static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev,
338 const enum rt2800_eeprom_word word)
339{
340 unsigned int index;
341
342 index = rt2800_eeprom_word_index(rt2x00dev, word);
343 return rt2x00_eeprom_addr(rt2x00dev, index);
344}
345
346static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
347 const enum rt2800_eeprom_word word, u16 *data)
348{
349 unsigned int index;
350
351 index = rt2800_eeprom_word_index(rt2x00dev, word);
352 rt2x00_eeprom_read(rt2x00dev, index, data);
353}
354
355static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev,
356 const enum rt2800_eeprom_word word, u16 data)
357{
358 unsigned int index;
359
360 index = rt2800_eeprom_word_index(rt2x00dev, word);
361 rt2x00_eeprom_write(rt2x00dev, index, data);
362}
363
364static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
365 const enum rt2800_eeprom_word array,
366 unsigned int offset,
367 u16 *data)
368{
369 unsigned int index;
370
371 index = rt2800_eeprom_word_index(rt2x00dev, array);
372 rt2x00_eeprom_read(rt2x00dev, index + offset, data);
373}
374
224static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) 375static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
225{ 376{
226 u32 reg; 377 u32 reg;
@@ -370,6 +521,29 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
370} 521}
371EXPORT_SYMBOL_GPL(rt2800_disable_wpdma); 522EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
372 523
524void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
525 unsigned short *txwi_size,
526 unsigned short *rxwi_size)
527{
528 switch (rt2x00dev->chip.rt) {
529 case RT3593:
530 *txwi_size = TXWI_DESC_SIZE_4WORDS;
531 *rxwi_size = RXWI_DESC_SIZE_5WORDS;
532 break;
533
534 case RT5592:
535 *txwi_size = TXWI_DESC_SIZE_5WORDS;
536 *rxwi_size = RXWI_DESC_SIZE_6WORDS;
537 break;
538
539 default:
540 *txwi_size = TXWI_DESC_SIZE_4WORDS;
541 *rxwi_size = RXWI_DESC_SIZE_4WORDS;
542 break;
543 }
544}
545EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size);
546
373static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) 547static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
374{ 548{
375 u16 fw_crc; 549 u16 fw_crc;
@@ -609,16 +783,16 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
609 u8 offset2; 783 u8 offset2;
610 784
611 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 785 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
612 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); 786 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
613 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); 787 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
614 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); 788 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
615 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); 789 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
616 offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2); 790 offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
617 } else { 791 } else {
618 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom); 792 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
619 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0); 793 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
620 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1); 794 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
621 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); 795 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
622 offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2); 796 offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
623 } 797 }
624 798
@@ -766,6 +940,18 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
766} 940}
767EXPORT_SYMBOL_GPL(rt2800_txdone_entry); 941EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
768 942
943static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
944 unsigned int index)
945{
946 return HW_BEACON_BASE(index);
947}
948
949static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev,
950 unsigned int index)
951{
952 return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index));
953}
954
769void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) 955void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
770{ 956{
771 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 957 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -818,7 +1004,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
818 return; 1004 return;
819 } 1005 }
820 1006
821 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1007 beacon_base = rt2800_hw_beacon_base(rt2x00dev, entry->entry_idx);
1008
822 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, 1009 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
823 entry->skb->len + padding_len); 1010 entry->skb->len + padding_len);
824 1011
@@ -837,10 +1024,13 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
837EXPORT_SYMBOL_GPL(rt2800_write_beacon); 1024EXPORT_SYMBOL_GPL(rt2800_write_beacon);
838 1025
839static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, 1026static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
840 unsigned int beacon_base) 1027 unsigned int index)
841{ 1028{
842 int i; 1029 int i;
843 const int txwi_desc_size = rt2x00dev->bcn->winfo_size; 1030 const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
1031 unsigned int beacon_base;
1032
1033 beacon_base = rt2800_hw_beacon_base(rt2x00dev, index);
844 1034
845 /* 1035 /*
846 * For the Beacon base registers we only need to clear 1036 * For the Beacon base registers we only need to clear
@@ -867,8 +1057,7 @@ void rt2800_clear_beacon(struct queue_entry *entry)
867 /* 1057 /*
868 * Clear beacon. 1058 * Clear beacon.
869 */ 1059 */
870 rt2800_clear_beacon_register(rt2x00dev, 1060 rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
871 HW_BEACON_OFFSET(entry->entry_idx));
872 1061
873 /* 1062 /*
874 * Enabled beaconing again. 1063 * Enabled beaconing again.
@@ -890,6 +1079,9 @@ const struct rt2x00debug rt2800_rt2x00debug = {
890 .word_count = CSR_REG_SIZE / sizeof(u32), 1079 .word_count = CSR_REG_SIZE / sizeof(u32),
891 }, 1080 },
892 .eeprom = { 1081 .eeprom = {
1082 /* NOTE: The local EEPROM access functions can't
1083 * be used here, use the generic versions instead.
1084 */
893 .read = rt2x00_eeprom_read, 1085 .read = rt2x00_eeprom_read,
894 .write = rt2x00_eeprom_write, 1086 .write = rt2x00_eeprom_write,
895 .word_base = EEPROM_BASE, 1087 .word_base = EEPROM_BASE,
@@ -1547,7 +1739,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
1547 led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3; 1739 led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
1548 if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) || 1740 if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
1549 led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) { 1741 led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
1550 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); 1742 rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
1551 led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE); 1743 led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
1552 if (led_ctrl == 0 || led_ctrl > 0x40) { 1744 if (led_ctrl == 0 || led_ctrl > 0x40) {
1553 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode); 1745 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode);
@@ -1609,7 +1801,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1609 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); 1801 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
1610 break; 1802 break;
1611 case 3: 1803 case 3:
1612 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); 1804 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
1613 break; 1805 break;
1614 } 1806 }
1615 1807
@@ -1622,7 +1814,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1622 rt2x00_rt(rt2x00dev, RT3090) || 1814 rt2x00_rt(rt2x00dev, RT3090) ||
1623 rt2x00_rt(rt2x00dev, RT3352) || 1815 rt2x00_rt(rt2x00dev, RT3352) ||
1624 rt2x00_rt(rt2x00dev, RT3390)) { 1816 rt2x00_rt(rt2x00dev, RT3390)) {
1625 rt2x00_eeprom_read(rt2x00dev, 1817 rt2800_eeprom_read(rt2x00dev,
1626 EEPROM_NIC_CONF1, &eeprom); 1818 EEPROM_NIC_CONF1, &eeprom);
1627 if (rt2x00_get_field16(eeprom, 1819 if (rt2x00_get_field16(eeprom,
1628 EEPROM_NIC_CONF1_ANT_DIVERSITY)) 1820 EEPROM_NIC_CONF1_ANT_DIVERSITY))
@@ -1649,6 +1841,13 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1649 1841
1650 rt2800_bbp_write(rt2x00dev, 3, r3); 1842 rt2800_bbp_write(rt2x00dev, 3, r3);
1651 rt2800_bbp_write(rt2x00dev, 1, r1); 1843 rt2800_bbp_write(rt2x00dev, 1, r1);
1844
1845 if (rt2x00_rt(rt2x00dev, RT3593)) {
1846 if (ant->rx_chain_num == 1)
1847 rt2800_bbp_write(rt2x00dev, 86, 0x00);
1848 else
1849 rt2800_bbp_write(rt2x00dev, 86, 0x46);
1850 }
1652} 1851}
1653EXPORT_SYMBOL_GPL(rt2800_config_ant); 1852EXPORT_SYMBOL_GPL(rt2800_config_ant);
1654 1853
@@ -1659,22 +1858,73 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
1659 short lna_gain; 1858 short lna_gain;
1660 1859
1661 if (libconf->rf.channel <= 14) { 1860 if (libconf->rf.channel <= 14) {
1662 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); 1861 rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
1663 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG); 1862 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
1664 } else if (libconf->rf.channel <= 64) { 1863 } else if (libconf->rf.channel <= 64) {
1665 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); 1864 rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
1666 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0); 1865 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
1667 } else if (libconf->rf.channel <= 128) { 1866 } else if (libconf->rf.channel <= 128) {
1668 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); 1867 if (rt2x00_rt(rt2x00dev, RT3593)) {
1669 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1); 1868 rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
1869 lna_gain = rt2x00_get_field16(eeprom,
1870 EEPROM_EXT_LNA2_A1);
1871 } else {
1872 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
1873 lna_gain = rt2x00_get_field16(eeprom,
1874 EEPROM_RSSI_BG2_LNA_A1);
1875 }
1670 } else { 1876 } else {
1671 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); 1877 if (rt2x00_rt(rt2x00dev, RT3593)) {
1672 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2); 1878 rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
1879 lna_gain = rt2x00_get_field16(eeprom,
1880 EEPROM_EXT_LNA2_A2);
1881 } else {
1882 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
1883 lna_gain = rt2x00_get_field16(eeprom,
1884 EEPROM_RSSI_A2_LNA_A2);
1885 }
1673 } 1886 }
1674 1887
1675 rt2x00dev->lna_gain = lna_gain; 1888 rt2x00dev->lna_gain = lna_gain;
1676} 1889}
1677 1890
1891#define FREQ_OFFSET_BOUND 0x5f
1892
1893static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
1894{
1895 u8 freq_offset, prev_freq_offset;
1896 u8 rfcsr, prev_rfcsr;
1897
1898 freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE);
1899 freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND);
1900
1901 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1902 prev_rfcsr = rfcsr;
1903
1904 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset);
1905 if (rfcsr == prev_rfcsr)
1906 return;
1907
1908 if (rt2x00_is_usb(rt2x00dev)) {
1909 rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff,
1910 freq_offset, prev_rfcsr);
1911 return;
1912 }
1913
1914 prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE);
1915 while (prev_freq_offset != freq_offset) {
1916 if (prev_freq_offset < freq_offset)
1917 prev_freq_offset++;
1918 else
1919 prev_freq_offset--;
1920
1921 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset);
1922 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1923
1924 usleep_range(1000, 1500);
1925 }
1926}
1927
1678static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev, 1928static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
1679 struct ieee80211_conf *conf, 1929 struct ieee80211_conf *conf,
1680 struct rf_channel *rf, 1930 struct rf_channel *rf,
@@ -1993,22 +2243,306 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1993 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 2243 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
1994} 2244}
1995 2245
1996#define POWER_BOUND 0x27 2246static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
1997#define POWER_BOUND_5G 0x2b 2247 struct ieee80211_conf *conf,
1998#define FREQ_OFFSET_BOUND 0x5f 2248 struct rf_channel *rf,
1999 2249 struct channel_info *info)
2000static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
2001{ 2250{
2251 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
2252 u8 txrx_agc_fc;
2253 u8 txrx_h20m;
2002 u8 rfcsr; 2254 u8 rfcsr;
2255 u8 bbp;
2256 const bool txbf_enabled = false; /* TODO */
2003 2257
2004 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2258 /* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */
2005 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND) 2259 rt2800_bbp_read(rt2x00dev, 109, &bbp);
2006 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND); 2260 rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0);
2261 rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0);
2262 rt2800_bbp_write(rt2x00dev, 109, bbp);
2263
2264 rt2800_bbp_read(rt2x00dev, 110, &bbp);
2265 rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0);
2266 rt2800_bbp_write(rt2x00dev, 110, bbp);
2267
2268 if (rf->channel <= 14) {
2269 /* Restore BBP 25 & 26 for 2.4 GHz */
2270 rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
2271 rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
2272 } else {
2273 /* Hard code BBP 25 & 26 for 5GHz */
2274
2275 /* Enable IQ Phase correction */
2276 rt2800_bbp_write(rt2x00dev, 25, 0x09);
2277 /* Setup IQ Phase correction value */
2278 rt2800_bbp_write(rt2x00dev, 26, 0xff);
2279 }
2280
2281 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
2282 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf);
2283
2284 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
2285 rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3));
2286 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
2287
2288 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
2289 rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1);
2290 if (rf->channel <= 14)
2291 rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1);
2007 else 2292 else
2008 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); 2293 rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2);
2009 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 2294 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
2295
2296 rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr);
2297 if (rf->channel <= 14) {
2298 rfcsr = 0;
2299 rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
2300 info->default_power1 & 0x1f);
2301 } else {
2302 if (rt2x00_is_usb(rt2x00dev))
2303 rfcsr = 0x40;
2304
2305 rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
2306 ((info->default_power1 & 0x18) << 1) |
2307 (info->default_power1 & 7));
2308 }
2309 rt2800_rfcsr_write(rt2x00dev, 53, rfcsr);
2310
2311 rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr);
2312 if (rf->channel <= 14) {
2313 rfcsr = 0;
2314 rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
2315 info->default_power2 & 0x1f);
2316 } else {
2317 if (rt2x00_is_usb(rt2x00dev))
2318 rfcsr = 0x40;
2319
2320 rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
2321 ((info->default_power2 & 0x18) << 1) |
2322 (info->default_power2 & 7));
2323 }
2324 rt2800_rfcsr_write(rt2x00dev, 55, rfcsr);
2325
2326 rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr);
2327 if (rf->channel <= 14) {
2328 rfcsr = 0;
2329 rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
2330 info->default_power3 & 0x1f);
2331 } else {
2332 if (rt2x00_is_usb(rt2x00dev))
2333 rfcsr = 0x40;
2334
2335 rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
2336 ((info->default_power3 & 0x18) << 1) |
2337 (info->default_power3 & 7));
2338 }
2339 rt2800_rfcsr_write(rt2x00dev, 54, rfcsr);
2340
2341 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2342 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
2343 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
2344 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
2345 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
2346 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
2347 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
2348 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
2349 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
2350
2351 switch (rt2x00dev->default_ant.tx_chain_num) {
2352 case 3:
2353 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
2354 /* fallthrough */
2355 case 2:
2356 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
2357 /* fallthrough */
2358 case 1:
2359 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
2360 break;
2361 }
2362
2363 switch (rt2x00dev->default_ant.rx_chain_num) {
2364 case 3:
2365 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
2366 /* fallthrough */
2367 case 2:
2368 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
2369 /* fallthrough */
2370 case 1:
2371 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
2372 break;
2373 }
2374 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2375
2376 rt2800_adjust_freq_offset(rt2x00dev);
2377
2378 if (conf_is_ht40(conf)) {
2379 txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
2380 RFCSR24_TX_AGC_FC);
2381 txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40,
2382 RFCSR24_TX_H20M);
2383 } else {
2384 txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20,
2385 RFCSR24_TX_AGC_FC);
2386 txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20,
2387 RFCSR24_TX_H20M);
2388 }
2389
2390 /* NOTE: the reference driver does not writes the new value
2391 * back to RFCSR 32
2392 */
2393 rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr);
2394 rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc);
2395
2396 if (rf->channel <= 14)
2397 rfcsr = 0xa0;
2398 else
2399 rfcsr = 0x80;
2400 rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
2401
2402 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2403 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m);
2404 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m);
2405 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2406
2407 /* Band selection */
2408 rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr);
2409 if (rf->channel <= 14)
2410 rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
2411 else
2412 rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
2413 rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
2414
2415 rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr);
2416 if (rf->channel <= 14)
2417 rfcsr = 0x3c;
2418 else
2419 rfcsr = 0x20;
2420 rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
2421
2422 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
2423 if (rf->channel <= 14)
2424 rfcsr = 0x1a;
2425 else
2426 rfcsr = 0x12;
2427 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
2428
2429 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
2430 if (rf->channel >= 1 && rf->channel <= 14)
2431 rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
2432 else if (rf->channel >= 36 && rf->channel <= 64)
2433 rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
2434 else if (rf->channel >= 100 && rf->channel <= 128)
2435 rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
2436 else
2437 rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
2438 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
2439
2440 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2441 rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
2442 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2443
2444 rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
2445
2446 if (rf->channel <= 14) {
2447 rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
2448 rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
2449 } else {
2450 rt2800_rfcsr_write(rt2x00dev, 10, 0xd8);
2451 rt2800_rfcsr_write(rt2x00dev, 13, 0x23);
2452 }
2453
2454 rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
2455 rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1);
2456 rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
2457
2458 rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
2459 if (rf->channel <= 14) {
2460 rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5);
2461 rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3);
2462 } else {
2463 rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4);
2464 rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2);
2465 }
2466 rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
2467
2468 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
2469 if (rf->channel <= 14)
2470 rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3);
2471 else
2472 rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2);
2473
2474 if (txbf_enabled)
2475 rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1);
2476
2477 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
2478
2479 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2480 rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0);
2481 rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
2482
2483 rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr);
2484 if (rf->channel <= 14)
2485 rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b);
2486 else
2487 rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f);
2488 rt2800_rfcsr_write(rt2x00dev, 57, rfcsr);
2489
2490 if (rf->channel <= 14) {
2491 rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
2492 rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
2493 } else {
2494 rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
2495 rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
2496 }
2497
2498 /* Initiate VCO calibration */
2499 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2500 if (rf->channel <= 14) {
2501 rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
2502 } else {
2503 rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1);
2504 rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1);
2505 rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1);
2506 rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1);
2507 rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1);
2508 rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
2509 }
2510 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2511
2512 if (rf->channel >= 1 && rf->channel <= 14) {
2513 rfcsr = 0x23;
2514 if (txbf_enabled)
2515 rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
2516 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
2517
2518 rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
2519 } else if (rf->channel >= 36 && rf->channel <= 64) {
2520 rfcsr = 0x36;
2521 if (txbf_enabled)
2522 rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
2523 rt2800_rfcsr_write(rt2x00dev, 39, 0x36);
2524
2525 rt2800_rfcsr_write(rt2x00dev, 45, 0xeb);
2526 } else if (rf->channel >= 100 && rf->channel <= 128) {
2527 rfcsr = 0x32;
2528 if (txbf_enabled)
2529 rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
2530 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
2531
2532 rt2800_rfcsr_write(rt2x00dev, 45, 0xb3);
2533 } else {
2534 rfcsr = 0x30;
2535 if (txbf_enabled)
2536 rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
2537 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
2538
2539 rt2800_rfcsr_write(rt2x00dev, 45, 0x9b);
2540 }
2010} 2541}
2011 2542
2543#define POWER_BOUND 0x27
2544#define POWER_BOUND_5G 0x2b
2545
2012static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, 2546static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
2013 struct ieee80211_conf *conf, 2547 struct ieee80211_conf *conf,
2014 struct rf_channel *rf, 2548 struct rf_channel *rf,
@@ -2563,6 +3097,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
2563 rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0); 3097 rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
2564} 3098}
2565 3099
3100static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
3101 unsigned int channel,
3102 char txpower)
3103{
3104 if (rt2x00_rt(rt2x00dev, RT3593))
3105 txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
3106
3107 if (channel <= 14)
3108 return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
3109
3110 if (rt2x00_rt(rt2x00dev, RT3593))
3111 return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
3112 MAX_A_TXPOWER_3593);
3113 else
3114 return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
3115}
3116
2566static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 3117static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2567 struct ieee80211_conf *conf, 3118 struct ieee80211_conf *conf,
2568 struct rf_channel *rf, 3119 struct rf_channel *rf,
@@ -2572,13 +3123,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2572 unsigned int tx_pin; 3123 unsigned int tx_pin;
2573 u8 bbp, rfcsr; 3124 u8 bbp, rfcsr;
2574 3125
2575 if (rf->channel <= 14) { 3126 info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
2576 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1); 3127 info->default_power1);
2577 info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2); 3128 info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
2578 } else { 3129 info->default_power2);
2579 info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1); 3130 if (rt2x00dev->default_ant.tx_chain_num > 2)
2580 info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2); 3131 info->default_power3 =
2581 } 3132 rt2800_txpower_to_dev(rt2x00dev, rf->channel,
3133 info->default_power3);
2582 3134
2583 switch (rt2x00dev->chip.rf) { 3135 switch (rt2x00dev->chip.rf) {
2584 case RF2020: 3136 case RF2020:
@@ -2591,6 +3143,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2591 case RF3052: 3143 case RF3052:
2592 rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); 3144 rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
2593 break; 3145 break;
3146 case RF3053:
3147 rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info);
3148 break;
2594 case RF3290: 3149 case RF3290:
2595 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info); 3150 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
2596 break; 3151 break;
@@ -2636,6 +3191,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2636 rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); 3191 rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
2637 rt2800_bbp_write(rt2x00dev, 27, 0x20); 3192 rt2800_bbp_write(rt2x00dev, 27, 0x20);
2638 rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); 3193 rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
3194 } else if (rt2x00_rt(rt2x00dev, RT3593)) {
3195 if (rf->channel > 14) {
3196 /* Disable CCK Packet detection on 5GHz */
3197 rt2800_bbp_write(rt2x00dev, 70, 0x00);
3198 } else {
3199 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
3200 }
3201
3202 if (conf_is_ht40(conf))
3203 rt2800_bbp_write(rt2x00dev, 105, 0x04);
3204 else
3205 rt2800_bbp_write(rt2x00dev, 105, 0x34);
3206
3207 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
3208 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
3209 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
3210 rt2800_bbp_write(rt2x00dev, 77, 0x98);
2639 } else { 3211 } else {
2640 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); 3212 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
2641 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); 3213 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -2651,16 +3223,27 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2651 rt2800_bbp_write(rt2x00dev, 82, 0x62); 3223 rt2800_bbp_write(rt2x00dev, 82, 0x62);
2652 rt2800_bbp_write(rt2x00dev, 75, 0x46); 3224 rt2800_bbp_write(rt2x00dev, 75, 0x46);
2653 } else { 3225 } else {
2654 rt2800_bbp_write(rt2x00dev, 82, 0x84); 3226 if (rt2x00_rt(rt2x00dev, RT3593))
3227 rt2800_bbp_write(rt2x00dev, 82, 0x62);
3228 else
3229 rt2800_bbp_write(rt2x00dev, 82, 0x84);
2655 rt2800_bbp_write(rt2x00dev, 75, 0x50); 3230 rt2800_bbp_write(rt2x00dev, 75, 0x50);
2656 } 3231 }
3232 if (rt2x00_rt(rt2x00dev, RT3593))
3233 rt2800_bbp_write(rt2x00dev, 83, 0x8a);
2657 } 3234 }
3235
2658 } else { 3236 } else {
2659 if (rt2x00_rt(rt2x00dev, RT3572)) 3237 if (rt2x00_rt(rt2x00dev, RT3572))
2660 rt2800_bbp_write(rt2x00dev, 82, 0x94); 3238 rt2800_bbp_write(rt2x00dev, 82, 0x94);
3239 else if (rt2x00_rt(rt2x00dev, RT3593))
3240 rt2800_bbp_write(rt2x00dev, 82, 0x82);
2661 else 3241 else
2662 rt2800_bbp_write(rt2x00dev, 82, 0xf2); 3242 rt2800_bbp_write(rt2x00dev, 82, 0xf2);
2663 3243
3244 if (rt2x00_rt(rt2x00dev, RT3593))
3245 rt2800_bbp_write(rt2x00dev, 83, 0x9a);
3246
2664 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) 3247 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
2665 rt2800_bbp_write(rt2x00dev, 75, 0x46); 3248 rt2800_bbp_write(rt2x00dev, 75, 0x46);
2666 else 3249 else
@@ -2731,6 +3314,41 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2731 if (rt2x00_rt(rt2x00dev, RT3572)) 3314 if (rt2x00_rt(rt2x00dev, RT3572))
2732 rt2800_rfcsr_write(rt2x00dev, 8, 0x80); 3315 rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
2733 3316
3317 if (rt2x00_rt(rt2x00dev, RT3593)) {
3318 if (rt2x00_is_usb(rt2x00dev)) {
3319 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
3320
3321 /* Band selection. GPIO #8 controls all paths */
3322 rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
3323 if (rf->channel <= 14)
3324 rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
3325 else
3326 rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
3327
3328 rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
3329 rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
3330
3331 /* LNA PE control.
3332 * GPIO #4 controls PE0 and PE1,
3333 * GPIO #7 controls PE2
3334 */
3335 rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
3336 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
3337
3338 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
3339 }
3340
3341 /* AGC init */
3342 if (rf->channel <= 14)
3343 reg = 0x1c + 2 * rt2x00dev->lna_gain;
3344 else
3345 reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
3346
3347 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
3348
3349 usleep_range(1000, 1500);
3350 }
3351
2734 if (rt2x00_rt(rt2x00dev, RT5592)) { 3352 if (rt2x00_rt(rt2x00dev, RT5592)) {
2735 rt2800_bbp_write(rt2x00dev, 195, 141); 3353 rt2800_bbp_write(rt2x00dev, 195, 141);
2736 rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a); 3354 rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
@@ -2790,6 +3408,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
2790 int i; 3408 int i;
2791 3409
2792 /* 3410 /*
3411 * First check if temperature compensation is supported.
3412 */
3413 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
3414 if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
3415 return 0;
3416
3417 /*
2793 * Read TSSI boundaries for temperature compensation from 3418 * Read TSSI boundaries for temperature compensation from
2794 * the EEPROM. 3419 * the EEPROM.
2795 * 3420 *
@@ -2798,62 +3423,62 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
2798 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00 3423 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
2799 */ 3424 */
2800 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 3425 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
2801 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom); 3426 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
2802 tssi_bounds[0] = rt2x00_get_field16(eeprom, 3427 tssi_bounds[0] = rt2x00_get_field16(eeprom,
2803 EEPROM_TSSI_BOUND_BG1_MINUS4); 3428 EEPROM_TSSI_BOUND_BG1_MINUS4);
2804 tssi_bounds[1] = rt2x00_get_field16(eeprom, 3429 tssi_bounds[1] = rt2x00_get_field16(eeprom,
2805 EEPROM_TSSI_BOUND_BG1_MINUS3); 3430 EEPROM_TSSI_BOUND_BG1_MINUS3);
2806 3431
2807 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom); 3432 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
2808 tssi_bounds[2] = rt2x00_get_field16(eeprom, 3433 tssi_bounds[2] = rt2x00_get_field16(eeprom,
2809 EEPROM_TSSI_BOUND_BG2_MINUS2); 3434 EEPROM_TSSI_BOUND_BG2_MINUS2);
2810 tssi_bounds[3] = rt2x00_get_field16(eeprom, 3435 tssi_bounds[3] = rt2x00_get_field16(eeprom,
2811 EEPROM_TSSI_BOUND_BG2_MINUS1); 3436 EEPROM_TSSI_BOUND_BG2_MINUS1);
2812 3437
2813 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom); 3438 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
2814 tssi_bounds[4] = rt2x00_get_field16(eeprom, 3439 tssi_bounds[4] = rt2x00_get_field16(eeprom,
2815 EEPROM_TSSI_BOUND_BG3_REF); 3440 EEPROM_TSSI_BOUND_BG3_REF);
2816 tssi_bounds[5] = rt2x00_get_field16(eeprom, 3441 tssi_bounds[5] = rt2x00_get_field16(eeprom,
2817 EEPROM_TSSI_BOUND_BG3_PLUS1); 3442 EEPROM_TSSI_BOUND_BG3_PLUS1);
2818 3443
2819 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom); 3444 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
2820 tssi_bounds[6] = rt2x00_get_field16(eeprom, 3445 tssi_bounds[6] = rt2x00_get_field16(eeprom,
2821 EEPROM_TSSI_BOUND_BG4_PLUS2); 3446 EEPROM_TSSI_BOUND_BG4_PLUS2);
2822 tssi_bounds[7] = rt2x00_get_field16(eeprom, 3447 tssi_bounds[7] = rt2x00_get_field16(eeprom,
2823 EEPROM_TSSI_BOUND_BG4_PLUS3); 3448 EEPROM_TSSI_BOUND_BG4_PLUS3);
2824 3449
2825 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom); 3450 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
2826 tssi_bounds[8] = rt2x00_get_field16(eeprom, 3451 tssi_bounds[8] = rt2x00_get_field16(eeprom,
2827 EEPROM_TSSI_BOUND_BG5_PLUS4); 3452 EEPROM_TSSI_BOUND_BG5_PLUS4);
2828 3453
2829 step = rt2x00_get_field16(eeprom, 3454 step = rt2x00_get_field16(eeprom,
2830 EEPROM_TSSI_BOUND_BG5_AGC_STEP); 3455 EEPROM_TSSI_BOUND_BG5_AGC_STEP);
2831 } else { 3456 } else {
2832 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom); 3457 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
2833 tssi_bounds[0] = rt2x00_get_field16(eeprom, 3458 tssi_bounds[0] = rt2x00_get_field16(eeprom,
2834 EEPROM_TSSI_BOUND_A1_MINUS4); 3459 EEPROM_TSSI_BOUND_A1_MINUS4);
2835 tssi_bounds[1] = rt2x00_get_field16(eeprom, 3460 tssi_bounds[1] = rt2x00_get_field16(eeprom,
2836 EEPROM_TSSI_BOUND_A1_MINUS3); 3461 EEPROM_TSSI_BOUND_A1_MINUS3);
2837 3462
2838 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom); 3463 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
2839 tssi_bounds[2] = rt2x00_get_field16(eeprom, 3464 tssi_bounds[2] = rt2x00_get_field16(eeprom,
2840 EEPROM_TSSI_BOUND_A2_MINUS2); 3465 EEPROM_TSSI_BOUND_A2_MINUS2);
2841 tssi_bounds[3] = rt2x00_get_field16(eeprom, 3466 tssi_bounds[3] = rt2x00_get_field16(eeprom,
2842 EEPROM_TSSI_BOUND_A2_MINUS1); 3467 EEPROM_TSSI_BOUND_A2_MINUS1);
2843 3468
2844 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom); 3469 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
2845 tssi_bounds[4] = rt2x00_get_field16(eeprom, 3470 tssi_bounds[4] = rt2x00_get_field16(eeprom,
2846 EEPROM_TSSI_BOUND_A3_REF); 3471 EEPROM_TSSI_BOUND_A3_REF);
2847 tssi_bounds[5] = rt2x00_get_field16(eeprom, 3472 tssi_bounds[5] = rt2x00_get_field16(eeprom,
2848 EEPROM_TSSI_BOUND_A3_PLUS1); 3473 EEPROM_TSSI_BOUND_A3_PLUS1);
2849 3474
2850 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom); 3475 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
2851 tssi_bounds[6] = rt2x00_get_field16(eeprom, 3476 tssi_bounds[6] = rt2x00_get_field16(eeprom,
2852 EEPROM_TSSI_BOUND_A4_PLUS2); 3477 EEPROM_TSSI_BOUND_A4_PLUS2);
2853 tssi_bounds[7] = rt2x00_get_field16(eeprom, 3478 tssi_bounds[7] = rt2x00_get_field16(eeprom,
2854 EEPROM_TSSI_BOUND_A4_PLUS3); 3479 EEPROM_TSSI_BOUND_A4_PLUS3);
2855 3480
2856 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom); 3481 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
2857 tssi_bounds[8] = rt2x00_get_field16(eeprom, 3482 tssi_bounds[8] = rt2x00_get_field16(eeprom,
2858 EEPROM_TSSI_BOUND_A5_PLUS4); 3483 EEPROM_TSSI_BOUND_A5_PLUS4);
2859 3484
@@ -2899,7 +3524,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
2899 u8 comp_type; 3524 u8 comp_type;
2900 int comp_value = 0; 3525 int comp_value = 0;
2901 3526
2902 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom); 3527 rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
2903 3528
2904 /* 3529 /*
2905 * HT40 compensation not required. 3530 * HT40 compensation not required.
@@ -2966,6 +3591,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
2966 u8 eirp_txpower_criterion; 3591 u8 eirp_txpower_criterion;
2967 u8 reg_limit; 3592 u8 reg_limit;
2968 3593
3594 if (rt2x00_rt(rt2x00dev, RT3593))
3595 return min_t(u8, txpower, 0xc);
3596
2969 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) { 3597 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
2970 /* 3598 /*
2971 * Check if eirp txpower exceed txpower_limit. 3599 * Check if eirp txpower exceed txpower_limit.
@@ -2974,12 +3602,12 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
2974 * .11b data rate need add additional 4dbm 3602 * .11b data rate need add additional 4dbm
2975 * when calculating eirp txpower. 3603 * when calculating eirp txpower.
2976 */ 3604 */
2977 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1, 3605 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
2978 &eeprom); 3606 1, &eeprom);
2979 criterion = rt2x00_get_field16(eeprom, 3607 criterion = rt2x00_get_field16(eeprom,
2980 EEPROM_TXPOWER_BYRATE_RATE0); 3608 EEPROM_TXPOWER_BYRATE_RATE0);
2981 3609
2982 rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, 3610 rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
2983 &eeprom); 3611 &eeprom);
2984 3612
2985 if (band == IEEE80211_BAND_2GHZ) 3613 if (band == IEEE80211_BAND_2GHZ)
@@ -3001,6 +3629,412 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
3001 return min_t(u8, txpower, 0xc); 3629 return min_t(u8, txpower, 0xc);
3002} 3630}
3003 3631
3632
3633enum {
3634 TX_PWR_CFG_0_IDX,
3635 TX_PWR_CFG_1_IDX,
3636 TX_PWR_CFG_2_IDX,
3637 TX_PWR_CFG_3_IDX,
3638 TX_PWR_CFG_4_IDX,
3639 TX_PWR_CFG_5_IDX,
3640 TX_PWR_CFG_6_IDX,
3641 TX_PWR_CFG_7_IDX,
3642 TX_PWR_CFG_8_IDX,
3643 TX_PWR_CFG_9_IDX,
3644 TX_PWR_CFG_0_EXT_IDX,
3645 TX_PWR_CFG_1_EXT_IDX,
3646 TX_PWR_CFG_2_EXT_IDX,
3647 TX_PWR_CFG_3_EXT_IDX,
3648 TX_PWR_CFG_4_EXT_IDX,
3649 TX_PWR_CFG_IDX_COUNT,
3650};
3651
3652static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
3653 struct ieee80211_channel *chan,
3654 int power_level)
3655{
3656 u8 txpower;
3657 u16 eeprom;
3658 u32 regs[TX_PWR_CFG_IDX_COUNT];
3659 unsigned int offset;
3660 enum ieee80211_band band = chan->band;
3661 int delta;
3662 int i;
3663
3664 memset(regs, '\0', sizeof(regs));
3665
3666 /* TODO: adapt TX power reduction from the rt28xx code */
3667
3668 /* calculate temperature compensation delta */
3669 delta = rt2800_get_gain_calibration_delta(rt2x00dev);
3670
3671 if (band == IEEE80211_BAND_5GHZ)
3672 offset = 16;
3673 else
3674 offset = 0;
3675
3676 if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
3677 offset += 8;
3678
3679 /* read the next four txpower values */
3680 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3681 offset, &eeprom);
3682
3683 /* CCK 1MBS,2MBS */
3684 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3685 txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
3686 txpower, delta);
3687 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3688 TX_PWR_CFG_0_CCK1_CH0, txpower);
3689 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3690 TX_PWR_CFG_0_CCK1_CH1, txpower);
3691 rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
3692 TX_PWR_CFG_0_EXT_CCK1_CH2, txpower);
3693
3694 /* CCK 5.5MBS,11MBS */
3695 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3696 txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
3697 txpower, delta);
3698 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3699 TX_PWR_CFG_0_CCK5_CH0, txpower);
3700 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3701 TX_PWR_CFG_0_CCK5_CH1, txpower);
3702 rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
3703 TX_PWR_CFG_0_EXT_CCK5_CH2, txpower);
3704
3705 /* OFDM 6MBS,9MBS */
3706 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3707 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3708 txpower, delta);
3709 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3710 TX_PWR_CFG_0_OFDM6_CH0, txpower);
3711 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3712 TX_PWR_CFG_0_OFDM6_CH1, txpower);
3713 rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
3714 TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower);
3715
3716 /* OFDM 12MBS,18MBS */
3717 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
3718 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3719 txpower, delta);
3720 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3721 TX_PWR_CFG_0_OFDM12_CH0, txpower);
3722 rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
3723 TX_PWR_CFG_0_OFDM12_CH1, txpower);
3724 rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
3725 TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower);
3726
3727 /* read the next four txpower values */
3728 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3729 offset + 1, &eeprom);
3730
3731 /* OFDM 24MBS,36MBS */
3732 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3733 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3734 txpower, delta);
3735 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3736 TX_PWR_CFG_1_OFDM24_CH0, txpower);
3737 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3738 TX_PWR_CFG_1_OFDM24_CH1, txpower);
3739 rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
3740 TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower);
3741
3742 /* OFDM 48MBS */
3743 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3744 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3745 txpower, delta);
3746 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3747 TX_PWR_CFG_1_OFDM48_CH0, txpower);
3748 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3749 TX_PWR_CFG_1_OFDM48_CH1, txpower);
3750 rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
3751 TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower);
3752
3753 /* OFDM 54MBS */
3754 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3755 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3756 txpower, delta);
3757 rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
3758 TX_PWR_CFG_7_OFDM54_CH0, txpower);
3759 rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
3760 TX_PWR_CFG_7_OFDM54_CH1, txpower);
3761 rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
3762 TX_PWR_CFG_7_OFDM54_CH2, txpower);
3763
3764 /* read the next four txpower values */
3765 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3766 offset + 2, &eeprom);
3767
3768 /* MCS 0,1 */
3769 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3770 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3771 txpower, delta);
3772 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3773 TX_PWR_CFG_1_MCS0_CH0, txpower);
3774 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3775 TX_PWR_CFG_1_MCS0_CH1, txpower);
3776 rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
3777 TX_PWR_CFG_1_EXT_MCS0_CH2, txpower);
3778
3779 /* MCS 2,3 */
3780 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3781 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3782 txpower, delta);
3783 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3784 TX_PWR_CFG_1_MCS2_CH0, txpower);
3785 rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
3786 TX_PWR_CFG_1_MCS2_CH1, txpower);
3787 rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
3788 TX_PWR_CFG_1_EXT_MCS2_CH2, txpower);
3789
3790 /* MCS 4,5 */
3791 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3792 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3793 txpower, delta);
3794 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3795 TX_PWR_CFG_2_MCS4_CH0, txpower);
3796 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3797 TX_PWR_CFG_2_MCS4_CH1, txpower);
3798 rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
3799 TX_PWR_CFG_2_EXT_MCS4_CH2, txpower);
3800
3801 /* MCS 6 */
3802 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
3803 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3804 txpower, delta);
3805 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3806 TX_PWR_CFG_2_MCS6_CH0, txpower);
3807 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3808 TX_PWR_CFG_2_MCS6_CH1, txpower);
3809 rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
3810 TX_PWR_CFG_2_EXT_MCS6_CH2, txpower);
3811
3812 /* read the next four txpower values */
3813 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3814 offset + 3, &eeprom);
3815
3816 /* MCS 7 */
3817 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3818 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3819 txpower, delta);
3820 rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
3821 TX_PWR_CFG_7_MCS7_CH0, txpower);
3822 rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
3823 TX_PWR_CFG_7_MCS7_CH1, txpower);
3824 rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
3825 TX_PWR_CFG_7_MCS7_CH2, txpower);
3826
3827 /* MCS 8,9 */
3828 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3829 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3830 txpower, delta);
3831 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3832 TX_PWR_CFG_2_MCS8_CH0, txpower);
3833 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3834 TX_PWR_CFG_2_MCS8_CH1, txpower);
3835 rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
3836 TX_PWR_CFG_2_EXT_MCS8_CH2, txpower);
3837
3838 /* MCS 10,11 */
3839 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3840 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3841 txpower, delta);
3842 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3843 TX_PWR_CFG_2_MCS10_CH0, txpower);
3844 rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
3845 TX_PWR_CFG_2_MCS10_CH1, txpower);
3846 rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
3847 TX_PWR_CFG_2_EXT_MCS10_CH2, txpower);
3848
3849 /* MCS 12,13 */
3850 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
3851 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3852 txpower, delta);
3853 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3854 TX_PWR_CFG_3_MCS12_CH0, txpower);
3855 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3856 TX_PWR_CFG_3_MCS12_CH1, txpower);
3857 rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
3858 TX_PWR_CFG_3_EXT_MCS12_CH2, txpower);
3859
3860 /* read the next four txpower values */
3861 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3862 offset + 4, &eeprom);
3863
3864 /* MCS 14 */
3865 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3866 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3867 txpower, delta);
3868 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3869 TX_PWR_CFG_3_MCS14_CH0, txpower);
3870 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3871 TX_PWR_CFG_3_MCS14_CH1, txpower);
3872 rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
3873 TX_PWR_CFG_3_EXT_MCS14_CH2, txpower);
3874
3875 /* MCS 15 */
3876 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3877 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3878 txpower, delta);
3879 rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
3880 TX_PWR_CFG_8_MCS15_CH0, txpower);
3881 rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
3882 TX_PWR_CFG_8_MCS15_CH1, txpower);
3883 rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
3884 TX_PWR_CFG_8_MCS15_CH2, txpower);
3885
3886 /* MCS 16,17 */
3887 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3888 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3889 txpower, delta);
3890 rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
3891 TX_PWR_CFG_5_MCS16_CH0, txpower);
3892 rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
3893 TX_PWR_CFG_5_MCS16_CH1, txpower);
3894 rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
3895 TX_PWR_CFG_5_MCS16_CH2, txpower);
3896
3897 /* MCS 18,19 */
3898 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
3899 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3900 txpower, delta);
3901 rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
3902 TX_PWR_CFG_5_MCS18_CH0, txpower);
3903 rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
3904 TX_PWR_CFG_5_MCS18_CH1, txpower);
3905 rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
3906 TX_PWR_CFG_5_MCS18_CH2, txpower);
3907
3908 /* read the next four txpower values */
3909 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3910 offset + 5, &eeprom);
3911
3912 /* MCS 20,21 */
3913 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3914 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3915 txpower, delta);
3916 rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
3917 TX_PWR_CFG_6_MCS20_CH0, txpower);
3918 rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
3919 TX_PWR_CFG_6_MCS20_CH1, txpower);
3920 rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
3921 TX_PWR_CFG_6_MCS20_CH2, txpower);
3922
3923 /* MCS 22 */
3924 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3925 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3926 txpower, delta);
3927 rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
3928 TX_PWR_CFG_6_MCS22_CH0, txpower);
3929 rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
3930 TX_PWR_CFG_6_MCS22_CH1, txpower);
3931 rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
3932 TX_PWR_CFG_6_MCS22_CH2, txpower);
3933
3934 /* MCS 23 */
3935 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3936 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3937 txpower, delta);
3938 rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
3939 TX_PWR_CFG_8_MCS23_CH0, txpower);
3940 rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
3941 TX_PWR_CFG_8_MCS23_CH1, txpower);
3942 rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
3943 TX_PWR_CFG_8_MCS23_CH2, txpower);
3944
3945 /* read the next four txpower values */
3946 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3947 offset + 6, &eeprom);
3948
3949 /* STBC, MCS 0,1 */
3950 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3951 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3952 txpower, delta);
3953 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3954 TX_PWR_CFG_3_STBC0_CH0, txpower);
3955 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3956 TX_PWR_CFG_3_STBC0_CH1, txpower);
3957 rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
3958 TX_PWR_CFG_3_EXT_STBC0_CH2, txpower);
3959
3960 /* STBC, MCS 2,3 */
3961 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
3962 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3963 txpower, delta);
3964 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3965 TX_PWR_CFG_3_STBC2_CH0, txpower);
3966 rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
3967 TX_PWR_CFG_3_STBC2_CH1, txpower);
3968 rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
3969 TX_PWR_CFG_3_EXT_STBC2_CH2, txpower);
3970
3971 /* STBC, MCS 4,5 */
3972 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
3973 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3974 txpower, delta);
3975 rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower);
3976 rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower);
3977 rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0,
3978 txpower);
3979
3980 /* STBC, MCS 6 */
3981 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
3982 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3983 txpower, delta);
3984 rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower);
3985 rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower);
3986 rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2,
3987 txpower);
3988
3989 /* read the next four txpower values */
3990 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3991 offset + 7, &eeprom);
3992
3993 /* STBC, MCS 7 */
3994 txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
3995 txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
3996 txpower, delta);
3997 rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
3998 TX_PWR_CFG_9_STBC7_CH0, txpower);
3999 rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
4000 TX_PWR_CFG_9_STBC7_CH1, txpower);
4001 rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
4002 TX_PWR_CFG_9_STBC7_CH2, txpower);
4003
4004 rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]);
4005 rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]);
4006 rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]);
4007 rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]);
4008 rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]);
4009 rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]);
4010 rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]);
4011 rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]);
4012 rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]);
4013 rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]);
4014
4015 rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT,
4016 regs[TX_PWR_CFG_0_EXT_IDX]);
4017 rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT,
4018 regs[TX_PWR_CFG_1_EXT_IDX]);
4019 rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT,
4020 regs[TX_PWR_CFG_2_EXT_IDX]);
4021 rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT,
4022 regs[TX_PWR_CFG_3_EXT_IDX]);
4023 rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT,
4024 regs[TX_PWR_CFG_4_EXT_IDX]);
4025
4026 for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
4027 rt2x00_dbg(rt2x00dev,
4028 "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
4029 (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
4030 (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
4031 '4' : '2',
4032 (i > TX_PWR_CFG_9_IDX) ?
4033 (i - TX_PWR_CFG_9_IDX - 1) : i,
4034 (i > TX_PWR_CFG_9_IDX) ? "_EXT" : "",
4035 (unsigned long) regs[i]);
4036}
4037
3004/* 4038/*
3005 * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and 4039 * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
3006 * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values, 4040 * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
@@ -3010,9 +4044,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
3010 * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to 4044 * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
3011 * current conditions (i.e. band, bandwidth, temperature, user settings). 4045 * current conditions (i.e. band, bandwidth, temperature, user settings).
3012 */ 4046 */
3013static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 4047static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
3014 struct ieee80211_channel *chan, 4048 struct ieee80211_channel *chan,
3015 int power_level) 4049 int power_level)
3016{ 4050{
3017 u8 txpower, r1; 4051 u8 txpower, r1;
3018 u16 eeprom; 4052 u16 eeprom;
@@ -3080,8 +4114,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3080 rt2800_register_read(rt2x00dev, offset, &reg); 4114 rt2800_register_read(rt2x00dev, offset, &reg);
3081 4115
3082 /* read the next four txpower values */ 4116 /* read the next four txpower values */
3083 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, 4117 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3084 &eeprom); 4118 i, &eeprom);
3085 4119
3086 is_rate_b = i ? 0 : 1; 4120 is_rate_b = i ? 0 : 1;
3087 /* 4121 /*
@@ -3129,8 +4163,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3129 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower); 4163 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
3130 4164
3131 /* read the next four txpower values */ 4165 /* read the next four txpower values */
3132 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, 4166 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
3133 &eeprom); 4167 i + 1, &eeprom);
3134 4168
3135 is_rate_b = 0; 4169 is_rate_b = 0;
3136 /* 4170 /*
@@ -3184,6 +4218,16 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3184 } 4218 }
3185} 4219}
3186 4220
4221static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
4222 struct ieee80211_channel *chan,
4223 int power_level)
4224{
4225 if (rt2x00_rt(rt2x00dev, RT3593))
4226 rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
4227 else
4228 rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
4229}
4230
3187void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev) 4231void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
3188{ 4232{
3189 rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan, 4233 rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan,
@@ -3219,6 +4263,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
3219 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); 4263 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
3220 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 4264 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
3221 break; 4265 break;
4266 case RF3053:
3222 case RF3290: 4267 case RF3290:
3223 case RF5360: 4268 case RF5360:
3224 case RF5370: 4269 case RF5370:
@@ -3442,17 +4487,25 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3442 return ret; 4487 return ret;
3443 4488
3444 rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg); 4489 rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
3445 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */ 4490 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0,
3446 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */ 4491 rt2800_get_beacon_offset(rt2x00dev, 0));
3447 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */ 4492 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1,
3448 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */ 4493 rt2800_get_beacon_offset(rt2x00dev, 1));
4494 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2,
4495 rt2800_get_beacon_offset(rt2x00dev, 2));
4496 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3,
4497 rt2800_get_beacon_offset(rt2x00dev, 3));
3449 rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg); 4498 rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
3450 4499
3451 rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg); 4500 rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
3452 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */ 4501 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4,
3453 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */ 4502 rt2800_get_beacon_offset(rt2x00dev, 4));
3454 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */ 4503 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5,
3455 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */ 4504 rt2800_get_beacon_offset(rt2x00dev, 5));
4505 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6,
4506 rt2800_get_beacon_offset(rt2x00dev, 6));
4507 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7,
4508 rt2800_get_beacon_offset(rt2x00dev, 7));
3456 rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg); 4509 rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
3457 4510
3458 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f); 4511 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
@@ -3528,7 +4581,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3528 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 4581 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
3529 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 4582 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
3530 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 4583 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
3531 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 4584 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
4585 &eeprom);
3532 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) 4586 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
3533 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 4587 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
3534 0x0000002c); 4588 0x0000002c);
@@ -3559,6 +4613,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3559 } else if (rt2x00_rt(rt2x00dev, RT3572)) { 4613 } else if (rt2x00_rt(rt2x00dev, RT3572)) {
3560 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 4614 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
3561 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 4615 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
4616 } else if (rt2x00_rt(rt2x00dev, RT3593)) {
4617 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
4618 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
4619 if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) {
4620 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
4621 &eeprom);
4622 if (rt2x00_get_field16(eeprom,
4623 EEPROM_NIC_CONF1_DAC_TEST))
4624 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
4625 0x0000001f);
4626 else
4627 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
4628 0x0000000f);
4629 } else {
4630 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
4631 0x00000000);
4632 }
3562 } else if (rt2x00_rt(rt2x00dev, RT5390) || 4633 } else if (rt2x00_rt(rt2x00dev, RT5390) ||
3563 rt2x00_rt(rt2x00dev, RT5392) || 4634 rt2x00_rt(rt2x00dev, RT5392) ||
3564 rt2x00_rt(rt2x00dev, RT5592)) { 4635 rt2x00_rt(rt2x00dev, RT5592)) {
@@ -3786,14 +4857,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3786 /* 4857 /*
3787 * Clear all beacons 4858 * Clear all beacons
3788 */ 4859 */
3789 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0); 4860 for (i = 0; i < 8; i++)
3790 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1); 4861 rt2800_clear_beacon_register(rt2x00dev, i);
3791 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
3792 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
3793 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
3794 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
3795 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
3796 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
3797 4862
3798 if (rt2x00_is_usb(rt2x00dev)) { 4863 if (rt2x00_is_usb(rt2x00dev)) {
3799 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg); 4864 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
@@ -3989,7 +5054,7 @@ static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
3989 u8 value; 5054 u8 value;
3990 5055
3991 rt2800_bbp_read(rt2x00dev, 138, &value); 5056 rt2800_bbp_read(rt2x00dev, 138, &value);
3992 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 5057 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
3993 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) 5058 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
3994 value |= 0x20; 5059 value |= 0x20;
3995 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) 5060 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
@@ -4332,6 +5397,22 @@ static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
4332 rt2800_disable_unused_dac_adc(rt2x00dev); 5397 rt2800_disable_unused_dac_adc(rt2x00dev);
4333} 5398}
4334 5399
5400static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
5401{
5402 rt2800_init_bbp_early(rt2x00dev);
5403
5404 rt2800_bbp_write(rt2x00dev, 79, 0x13);
5405 rt2800_bbp_write(rt2x00dev, 80, 0x05);
5406 rt2800_bbp_write(rt2x00dev, 81, 0x33);
5407 rt2800_bbp_write(rt2x00dev, 137, 0x0f);
5408
5409 rt2800_bbp_write(rt2x00dev, 84, 0x19);
5410
5411 /* Enable DC filter */
5412 if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E))
5413 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
5414}
5415
4335static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) 5416static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
4336{ 5417{
4337 int ant, div_mode; 5418 int ant, div_mode;
@@ -4402,7 +5483,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
4402 5483
4403 rt2800_disable_unused_dac_adc(rt2x00dev); 5484 rt2800_disable_unused_dac_adc(rt2x00dev);
4404 5485
4405 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 5486 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4406 div_mode = rt2x00_get_field16(eeprom, 5487 div_mode = rt2x00_get_field16(eeprom,
4407 EEPROM_NIC_CONF1_ANT_DIVERSITY); 5488 EEPROM_NIC_CONF1_ANT_DIVERSITY);
4408 ant = (div_mode == 3) ? 1 : 0; 5489 ant = (div_mode == 3) ? 1 : 0;
@@ -4488,7 +5569,7 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
4488 5569
4489 rt2800_bbp4_mac_if_ctrl(rt2x00dev); 5570 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4490 5571
4491 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 5572 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4492 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); 5573 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
4493 ant = (div_mode == 3) ? 1 : 0; 5574 ant = (div_mode == 3) ? 1 : 0;
4494 rt2800_bbp_read(rt2x00dev, 152, &value); 5575 rt2800_bbp_read(rt2x00dev, 152, &value);
@@ -4547,6 +5628,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
4547 case RT3572: 5628 case RT3572:
4548 rt2800_init_bbp_3572(rt2x00dev); 5629 rt2800_init_bbp_3572(rt2x00dev);
4549 break; 5630 break;
5631 case RT3593:
5632 rt2800_init_bbp_3593(rt2x00dev);
5633 return;
4550 case RT5390: 5634 case RT5390:
4551 case RT5392: 5635 case RT5392:
4552 rt2800_init_bbp_53xx(rt2x00dev); 5636 rt2800_init_bbp_53xx(rt2x00dev);
@@ -4557,7 +5641,8 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
4557 } 5641 }
4558 5642
4559 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 5643 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
4560 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 5644 rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i,
5645 &eeprom);
4561 5646
4562 if (eeprom != 0xffff && eeprom != 0x0000) { 5647 if (eeprom != 0xffff && eeprom != 0x0000) {
4563 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); 5648 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -4728,7 +5813,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
4728 if (rt2x00_rt(rt2x00dev, RT3090)) { 5813 if (rt2x00_rt(rt2x00dev, RT3090)) {
4729 /* Turn off unused DAC1 and ADC1 to reduce power consumption */ 5814 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
4730 rt2800_bbp_read(rt2x00dev, 138, &bbp); 5815 rt2800_bbp_read(rt2x00dev, 138, &bbp);
4731 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 5816 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
4732 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) 5817 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4733 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); 5818 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
4734 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) 5819 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4771,6 +5856,42 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
4771 } 5856 }
4772} 5857}
4773 5858
5859static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev)
5860{
5861 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
5862 u8 rfcsr;
5863 u8 tx_gain;
5864
5865 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
5866 rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0);
5867 rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
5868
5869 rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
5870 tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g,
5871 RFCSR17_TXMIXER_GAIN);
5872 rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain);
5873 rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
5874
5875 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
5876 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
5877 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
5878
5879 rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
5880 rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
5881 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
5882
5883 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
5884 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
5885 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
5886 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
5887
5888 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
5889 rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
5890 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
5891
5892 /* TODO: enable stream mode */
5893}
5894
4774static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev) 5895static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
4775{ 5896{
4776 u8 reg; 5897 u8 reg;
@@ -4778,7 +5899,7 @@ static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
4778 5899
4779 /* Turn off unused DAC1 and ADC1 to reduce power consumption */ 5900 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
4780 rt2800_bbp_read(rt2x00dev, 138, &reg); 5901 rt2800_bbp_read(rt2x00dev, 138, &reg);
4781 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 5902 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
4782 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) 5903 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4783 rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0); 5904 rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
4784 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) 5905 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4884,7 +6005,8 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
4884 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); 6005 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4885 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 6006 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4886 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) { 6007 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
4887 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 6008 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
6009 &eeprom);
4888 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) 6010 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
4889 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3); 6011 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4890 else 6012 else
@@ -5152,6 +6274,136 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
5152 rt2800_normal_mode_setup_3xxx(rt2x00dev); 6274 rt2800_normal_mode_setup_3xxx(rt2x00dev);
5153} 6275}
5154 6276
6277static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev)
6278{
6279 u8 bbp;
6280 bool txbf_enabled = false; /* FIXME */
6281
6282 rt2800_bbp_read(rt2x00dev, 105, &bbp);
6283 if (rt2x00dev->default_ant.rx_chain_num == 1)
6284 rt2x00_set_field8(&bbp, BBP105_MLD, 0);
6285 else
6286 rt2x00_set_field8(&bbp, BBP105_MLD, 1);
6287 rt2800_bbp_write(rt2x00dev, 105, bbp);
6288
6289 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
6290
6291 rt2800_bbp_write(rt2x00dev, 92, 0x02);
6292 rt2800_bbp_write(rt2x00dev, 82, 0x82);
6293 rt2800_bbp_write(rt2x00dev, 106, 0x05);
6294 rt2800_bbp_write(rt2x00dev, 104, 0x92);
6295 rt2800_bbp_write(rt2x00dev, 88, 0x90);
6296 rt2800_bbp_write(rt2x00dev, 148, 0xc8);
6297 rt2800_bbp_write(rt2x00dev, 47, 0x48);
6298 rt2800_bbp_write(rt2x00dev, 120, 0x50);
6299
6300 if (txbf_enabled)
6301 rt2800_bbp_write(rt2x00dev, 163, 0xbd);
6302 else
6303 rt2800_bbp_write(rt2x00dev, 163, 0x9d);
6304
6305 /* SNR mapping */
6306 rt2800_bbp_write(rt2x00dev, 142, 6);
6307 rt2800_bbp_write(rt2x00dev, 143, 160);
6308 rt2800_bbp_write(rt2x00dev, 142, 7);
6309 rt2800_bbp_write(rt2x00dev, 143, 161);
6310 rt2800_bbp_write(rt2x00dev, 142, 8);
6311 rt2800_bbp_write(rt2x00dev, 143, 162);
6312
6313 /* ADC/DAC control */
6314 rt2800_bbp_write(rt2x00dev, 31, 0x08);
6315
6316 /* RX AGC energy lower bound in log2 */
6317 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
6318
6319 /* FIXME: BBP 105 owerwrite? */
6320 rt2800_bbp_write(rt2x00dev, 105, 0x04);
6321
6322}
6323
6324static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
6325{
6326 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
6327 u32 reg;
6328 u8 rfcsr;
6329
6330 /* Disable GPIO #4 and #7 function for LAN PE control */
6331 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
6332 rt2x00_set_field32(&reg, GPIO_SWITCH_4, 0);
6333 rt2x00_set_field32(&reg, GPIO_SWITCH_7, 0);
6334 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
6335
6336 /* Initialize default register values */
6337 rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
6338 rt2800_rfcsr_write(rt2x00dev, 3, 0x80);
6339 rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
6340 rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
6341 rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
6342 rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
6343 rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
6344 rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
6345 rt2800_rfcsr_write(rt2x00dev, 12, 0x4e);
6346 rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
6347 rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
6348 rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
6349 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
6350 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
6351 rt2800_rfcsr_write(rt2x00dev, 32, 0x78);
6352 rt2800_rfcsr_write(rt2x00dev, 33, 0x3b);
6353 rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
6354 rt2800_rfcsr_write(rt2x00dev, 35, 0xe0);
6355 rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
6356 rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
6357 rt2800_rfcsr_write(rt2x00dev, 44, 0xd3);
6358 rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
6359 rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
6360 rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
6361 rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
6362 rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
6363 rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
6364 rt2800_rfcsr_write(rt2x00dev, 53, 0x18);
6365 rt2800_rfcsr_write(rt2x00dev, 54, 0x18);
6366 rt2800_rfcsr_write(rt2x00dev, 55, 0x18);
6367 rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
6368 rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
6369
6370 /* Initiate calibration */
6371 /* TODO: use rt2800_rf_init_calibration ? */
6372 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
6373 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
6374 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
6375
6376 rt2800_adjust_freq_offset(rt2x00dev);
6377
6378 rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
6379 rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
6380 rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
6381
6382 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
6383 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
6384 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
6385 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
6386 usleep_range(1000, 1500);
6387 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
6388 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
6389 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
6390
6391 /* Set initial values for RX filter calibration */
6392 drv_data->calibration_bw20 = 0x1f;
6393 drv_data->calibration_bw40 = 0x2f;
6394
6395 /* Save BBP 25 & 26 values for later use in channel switching */
6396 rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
6397 rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
6398
6399 rt2800_led_open_drain_enable(rt2x00dev);
6400 rt2800_normal_mode_setup_3593(rt2x00dev);
6401
6402 rt3593_post_bbp_init(rt2x00dev);
6403
6404 /* TODO: enable stream mode support */
6405}
6406
5155static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) 6407static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
5156{ 6408{
5157 rt2800_rf_init_calibration(rt2x00dev, 2); 6409 rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -5380,6 +6632,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
5380 case RT3572: 6632 case RT3572:
5381 rt2800_init_rfcsr_3572(rt2x00dev); 6633 rt2800_init_rfcsr_3572(rt2x00dev);
5382 break; 6634 break;
6635 case RT3593:
6636 rt2800_init_rfcsr_3593(rt2x00dev);
6637 break;
5383 case RT5390: 6638 case RT5390:
5384 rt2800_init_rfcsr_5390(rt2x00dev); 6639 rt2800_init_rfcsr_5390(rt2x00dev);
5385 break; 6640 break;
@@ -5456,15 +6711,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
5456 /* 6711 /*
5457 * Initialize LED control 6712 * Initialize LED control
5458 */ 6713 */
5459 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word); 6714 rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
5460 rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff, 6715 rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
5461 word & 0xff, (word >> 8) & 0xff); 6716 word & 0xff, (word >> 8) & 0xff);
5462 6717
5463 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word); 6718 rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
5464 rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff, 6719 rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
5465 word & 0xff, (word >> 8) & 0xff); 6720 word & 0xff, (word >> 8) & 0xff);
5466 6721
5467 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word); 6722 rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
5468 rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff, 6723 rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
5469 word & 0xff, (word >> 8) & 0xff); 6724 word & 0xff, (word >> 8) & 0xff);
5470 6725
@@ -5560,6 +6815,34 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
5560} 6815}
5561EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); 6816EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
5562 6817
6818static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
6819{
6820 u16 word;
6821
6822 if (rt2x00_rt(rt2x00dev, RT3593))
6823 return 0;
6824
6825 rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
6826 if ((word & 0x00ff) != 0x00ff)
6827 return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
6828
6829 return 0;
6830}
6831
6832static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
6833{
6834 u16 word;
6835
6836 if (rt2x00_rt(rt2x00dev, RT3593))
6837 return 0;
6838
6839 rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
6840 if ((word & 0x00ff) != 0x00ff)
6841 return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
6842
6843 return 0;
6844}
6845
5563static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) 6846static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
5564{ 6847{
5565 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 6848 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -5578,18 +6861,18 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
5578 /* 6861 /*
5579 * Start validation of the data that has been read. 6862 * Start validation of the data that has been read.
5580 */ 6863 */
5581 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 6864 mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
5582 if (!is_valid_ether_addr(mac)) { 6865 if (!is_valid_ether_addr(mac)) {
5583 eth_random_addr(mac); 6866 eth_random_addr(mac);
5584 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); 6867 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
5585 } 6868 }
5586 6869
5587 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word); 6870 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
5588 if (word == 0xffff) { 6871 if (word == 0xffff) {
5589 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2); 6872 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
5590 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1); 6873 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
5591 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820); 6874 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
5592 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); 6875 rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
5593 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); 6876 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
5594 } else if (rt2x00_rt(rt2x00dev, RT2860) || 6877 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
5595 rt2x00_rt(rt2x00dev, RT2872)) { 6878 rt2x00_rt(rt2x00dev, RT2872)) {
@@ -5598,10 +6881,10 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
5598 */ 6881 */
5599 if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2) 6882 if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
5600 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2); 6883 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
5601 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); 6884 rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
5602 } 6885 }
5603 6886
5604 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word); 6887 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
5605 if (word == 0xffff) { 6888 if (word == 0xffff) {
5606 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0); 6889 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
5607 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0); 6890 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
@@ -5618,24 +6901,24 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
5618 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0); 6901 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
5619 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0); 6902 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
5620 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0); 6903 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
5621 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word); 6904 rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
5622 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); 6905 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
5623 } 6906 }
5624 6907
5625 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); 6908 rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
5626 if ((word & 0x00ff) == 0x00ff) { 6909 if ((word & 0x00ff) == 0x00ff) {
5627 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); 6910 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
5628 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); 6911 rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
5629 rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word); 6912 rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
5630 } 6913 }
5631 if ((word & 0xff00) == 0xff00) { 6914 if ((word & 0xff00) == 0xff00) {
5632 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, 6915 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
5633 LED_MODE_TXRX_ACTIVITY); 6916 LED_MODE_TXRX_ACTIVITY);
5634 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); 6917 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
5635 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); 6918 rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
5636 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555); 6919 rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
5637 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221); 6920 rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
5638 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8); 6921 rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
5639 rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word); 6922 rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word);
5640 } 6923 }
5641 6924
@@ -5644,56 +6927,61 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
5644 * lna0 as correct value. Note that EEPROM_LNA 6927 * lna0 as correct value. Note that EEPROM_LNA
5645 * is never validated. 6928 * is never validated.
5646 */ 6929 */
5647 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word); 6930 rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
5648 default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0); 6931 default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
5649 6932
5650 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word); 6933 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
5651 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10) 6934 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
5652 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0); 6935 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
5653 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10) 6936 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
5654 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0); 6937 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
5655 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word); 6938 rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
5656 6939
5657 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word); 6940 drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev);
5658 if ((word & 0x00ff) != 0x00ff) {
5659 drv_data->txmixer_gain_24g =
5660 rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
5661 } else {
5662 drv_data->txmixer_gain_24g = 0;
5663 }
5664 6941
5665 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word); 6942 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
5666 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10) 6943 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
5667 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0); 6944 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
5668 if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 || 6945 if (!rt2x00_rt(rt2x00dev, RT3593)) {
5669 rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff) 6946 if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
5670 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1, 6947 rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
5671 default_lna_gain); 6948 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
5672 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); 6949 default_lna_gain);
5673
5674 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
5675 if ((word & 0x00ff) != 0x00ff) {
5676 drv_data->txmixer_gain_5g =
5677 rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
5678 } else {
5679 drv_data->txmixer_gain_5g = 0;
5680 } 6950 }
6951 rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
5681 6952
5682 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word); 6953 drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev);
6954
6955 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
5683 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10) 6956 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
5684 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0); 6957 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
5685 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10) 6958 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
5686 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0); 6959 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
5687 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word); 6960 rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
5688 6961
5689 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word); 6962 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
5690 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10) 6963 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
5691 rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0); 6964 rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
5692 if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 || 6965 if (!rt2x00_rt(rt2x00dev, RT3593)) {
5693 rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff) 6966 if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
5694 rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2, 6967 rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
5695 default_lna_gain); 6968 rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
5696 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); 6969 default_lna_gain);
6970 }
6971 rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
6972
6973 if (rt2x00_rt(rt2x00dev, RT3593)) {
6974 rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word);
6975 if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
6976 rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
6977 rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
6978 default_lna_gain);
6979 if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 ||
6980 rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff)
6981 rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
6982 default_lna_gain);
6983 rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word);
6984 }
5697 6985
5698 return 0; 6986 return 0;
5699} 6987}
@@ -5707,7 +6995,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5707 /* 6995 /*
5708 * Read EEPROM word for configuration. 6996 * Read EEPROM word for configuration.
5709 */ 6997 */
5710 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 6998 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
5711 6999
5712 /* 7000 /*
5713 * Identify RF chipset by EEPROM value 7001 * Identify RF chipset by EEPROM value
@@ -5717,7 +7005,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5717 if (rt2x00_rt(rt2x00dev, RT3290) || 7005 if (rt2x00_rt(rt2x00dev, RT3290) ||
5718 rt2x00_rt(rt2x00dev, RT5390) || 7006 rt2x00_rt(rt2x00dev, RT5390) ||
5719 rt2x00_rt(rt2x00dev, RT5392)) 7007 rt2x00_rt(rt2x00dev, RT5392))
5720 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); 7008 rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
5721 else 7009 else
5722 rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); 7010 rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
5723 7011
@@ -5731,6 +7019,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5731 case RF3021: 7019 case RF3021:
5732 case RF3022: 7020 case RF3022:
5733 case RF3052: 7021 case RF3052:
7022 case RF3053:
5734 case RF3290: 7023 case RF3290:
5735 case RF3320: 7024 case RF3320:
5736 case RF3322: 7025 case RF3322:
@@ -5757,7 +7046,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5757 rt2x00dev->default_ant.rx_chain_num = 7046 rt2x00dev->default_ant.rx_chain_num =
5758 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH); 7047 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
5759 7048
5760 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 7049 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
5761 7050
5762 if (rt2x00_rt(rt2x00dev, RT3070) || 7051 if (rt2x00_rt(rt2x00dev, RT3070) ||
5763 rt2x00_rt(rt2x00dev, RT3090) || 7052 rt2x00_rt(rt2x00dev, RT3090) ||
@@ -5810,7 +7099,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5810 /* 7099 /*
5811 * Read frequency offset and RF programming sequence. 7100 * Read frequency offset and RF programming sequence.
5812 */ 7101 */
5813 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); 7102 rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
5814 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); 7103 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
5815 7104
5816 /* 7105 /*
@@ -5827,7 +7116,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5827 /* 7116 /*
5828 * Check if support EIRP tx power limit feature. 7117 * Check if support EIRP tx power limit feature.
5829 */ 7118 */
5830 rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom); 7119 rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
5831 7120
5832 if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) < 7121 if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
5833 EIRP_MAX_TX_POWER_LIMIT) 7122 EIRP_MAX_TX_POWER_LIMIT)
@@ -6109,12 +7398,79 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
6109 {196, 83, 0, 12, 1}, 7398 {196, 83, 0, 12, 1},
6110}; 7399};
6111 7400
7401static const struct rf_channel rf_vals_3053[] = {
7402 /* Channel, N, R, K */
7403 {1, 241, 2, 2},
7404 {2, 241, 2, 7},
7405 {3, 242, 2, 2},
7406 {4, 242, 2, 7},
7407 {5, 243, 2, 2},
7408 {6, 243, 2, 7},
7409 {7, 244, 2, 2},
7410 {8, 244, 2, 7},
7411 {9, 245, 2, 2},
7412 {10, 245, 2, 7},
7413 {11, 246, 2, 2},
7414 {12, 246, 2, 7},
7415 {13, 247, 2, 2},
7416 {14, 248, 2, 4},
7417
7418 {36, 0x56, 0, 4},
7419 {38, 0x56, 0, 6},
7420 {40, 0x56, 0, 8},
7421 {44, 0x57, 0, 0},
7422 {46, 0x57, 0, 2},
7423 {48, 0x57, 0, 4},
7424 {52, 0x57, 0, 8},
7425 {54, 0x57, 0, 10},
7426 {56, 0x58, 0, 0},
7427 {60, 0x58, 0, 4},
7428 {62, 0x58, 0, 6},
7429 {64, 0x58, 0, 8},
7430
7431 {100, 0x5B, 0, 8},
7432 {102, 0x5B, 0, 10},
7433 {104, 0x5C, 0, 0},
7434 {108, 0x5C, 0, 4},
7435 {110, 0x5C, 0, 6},
7436 {112, 0x5C, 0, 8},
7437
7438 /* NOTE: Channel 114 has been removed intentionally.
7439 * The EEPROM contains no TX power values for that,
7440 * and it is disabled in the vendor driver as well.
7441 */
7442
7443 {116, 0x5D, 0, 0},
7444 {118, 0x5D, 0, 2},
7445 {120, 0x5D, 0, 4},
7446 {124, 0x5D, 0, 8},
7447 {126, 0x5D, 0, 10},
7448 {128, 0x5E, 0, 0},
7449 {132, 0x5E, 0, 4},
7450 {134, 0x5E, 0, 6},
7451 {136, 0x5E, 0, 8},
7452 {140, 0x5F, 0, 0},
7453
7454 {149, 0x5F, 0, 9},
7455 {151, 0x5F, 0, 11},
7456 {153, 0x60, 0, 1},
7457 {157, 0x60, 0, 5},
7458 {159, 0x60, 0, 7},
7459 {161, 0x60, 0, 9},
7460 {165, 0x61, 0, 1},
7461 {167, 0x61, 0, 3},
7462 {169, 0x61, 0, 5},
7463 {171, 0x61, 0, 7},
7464 {173, 0x61, 0, 9},
7465};
7466
6112static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 7467static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6113{ 7468{
6114 struct hw_mode_spec *spec = &rt2x00dev->spec; 7469 struct hw_mode_spec *spec = &rt2x00dev->spec;
6115 struct channel_info *info; 7470 struct channel_info *info;
6116 char *default_power1; 7471 char *default_power1;
6117 char *default_power2; 7472 char *default_power2;
7473 char *default_power3;
6118 unsigned int i; 7474 unsigned int i;
6119 u16 eeprom; 7475 u16 eeprom;
6120 u32 reg; 7476 u32 reg;
@@ -6149,7 +7505,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6149 7505
6150 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 7506 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
6151 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 7507 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
6152 rt2x00_eeprom_addr(rt2x00dev, 7508 rt2800_eeprom_addr(rt2x00dev,
6153 EEPROM_MAC_ADDR_0)); 7509 EEPROM_MAC_ADDR_0));
6154 7510
6155 /* 7511 /*
@@ -6165,7 +7521,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6165 rt2x00dev->hw->max_report_rates = 7; 7521 rt2x00dev->hw->max_report_rates = 7;
6166 rt2x00dev->hw->max_rate_tries = 1; 7522 rt2x00dev->hw->max_rate_tries = 1;
6167 7523
6168 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 7524 rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
6169 7525
6170 /* 7526 /*
6171 * Initialize hw_mode information. 7527 * Initialize hw_mode information.
@@ -6200,6 +7556,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6200 spec->supported_bands |= SUPPORT_BAND_5GHZ; 7556 spec->supported_bands |= SUPPORT_BAND_5GHZ;
6201 spec->num_channels = ARRAY_SIZE(rf_vals_3x); 7557 spec->num_channels = ARRAY_SIZE(rf_vals_3x);
6202 spec->channels = rf_vals_3x; 7558 spec->channels = rf_vals_3x;
7559 } else if (rt2x00_rf(rt2x00dev, RF3053)) {
7560 spec->supported_bands |= SUPPORT_BAND_5GHZ;
7561 spec->num_channels = ARRAY_SIZE(rf_vals_3053);
7562 spec->channels = rf_vals_3053;
6203 } else if (rt2x00_rf(rt2x00dev, RF5592)) { 7563 } else if (rt2x00_rf(rt2x00dev, RF5592)) {
6204 spec->supported_bands |= SUPPORT_BAND_5GHZ; 7564 spec->supported_bands |= SUPPORT_BAND_5GHZ;
6205 7565
@@ -6265,21 +7625,40 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6265 7625
6266 spec->channels_info = info; 7626 spec->channels_info = info;
6267 7627
6268 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); 7628 default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
6269 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); 7629 default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
7630
7631 if (rt2x00dev->default_ant.tx_chain_num > 2)
7632 default_power3 = rt2800_eeprom_addr(rt2x00dev,
7633 EEPROM_EXT_TXPOWER_BG3);
7634 else
7635 default_power3 = NULL;
6270 7636
6271 for (i = 0; i < 14; i++) { 7637 for (i = 0; i < 14; i++) {
6272 info[i].default_power1 = default_power1[i]; 7638 info[i].default_power1 = default_power1[i];
6273 info[i].default_power2 = default_power2[i]; 7639 info[i].default_power2 = default_power2[i];
7640 if (default_power3)
7641 info[i].default_power3 = default_power3[i];
6274 } 7642 }
6275 7643
6276 if (spec->num_channels > 14) { 7644 if (spec->num_channels > 14) {
6277 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); 7645 default_power1 = rt2800_eeprom_addr(rt2x00dev,
6278 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); 7646 EEPROM_TXPOWER_A1);
7647 default_power2 = rt2800_eeprom_addr(rt2x00dev,
7648 EEPROM_TXPOWER_A2);
7649
7650 if (rt2x00dev->default_ant.tx_chain_num > 2)
7651 default_power3 =
7652 rt2800_eeprom_addr(rt2x00dev,
7653 EEPROM_EXT_TXPOWER_A3);
7654 else
7655 default_power3 = NULL;
6279 7656
6280 for (i = 14; i < spec->num_channels; i++) { 7657 for (i = 14; i < spec->num_channels; i++) {
6281 info[i].default_power1 = default_power1[i - 14]; 7658 info[i].default_power1 = default_power1[i - 14];
6282 info[i].default_power2 = default_power2[i - 14]; 7659 info[i].default_power2 = default_power2[i - 14];
7660 if (default_power3)
7661 info[i].default_power3 = default_power3[i - 14];
6283 } 7662 }
6284 } 7663 }
6285 7664
@@ -6290,6 +7669,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6290 case RF3022: 7669 case RF3022:
6291 case RF3320: 7670 case RF3320:
6292 case RF3052: 7671 case RF3052:
7672 case RF3053:
6293 case RF3290: 7673 case RF3290:
6294 case RF5360: 7674 case RF5360:
6295 case RF5370: 7675 case RF5370:
@@ -6328,6 +7708,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
6328 case RT3352: 7708 case RT3352:
6329 case RT3390: 7709 case RT3390:
6330 case RT3572: 7710 case RT3572:
7711 case RT3593:
6331 case RT5390: 7712 case RT5390:
6332 case RT5392: 7713 case RT5392:
6333 case RT5592: 7714 case RT5592:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 6ec739466db4..a94ba447e63c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -226,4 +226,8 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
226 struct survey_info *survey); 226 struct survey_info *survey);
227void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev); 227void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
228 228
229void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
230 unsigned short *txwi_size,
231 unsigned short *rxwi_size);
232
229#endif /* RT2800LIB_H */ 233#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 00055627eb8d..f8f2abbfbb65 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -507,9 +507,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
507 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 507 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
508 508
509 if (rt2x00_is_pcie(rt2x00dev) && 509 if (rt2x00_is_pcie(rt2x00dev) &&
510 (rt2x00_rt(rt2x00dev, RT3572) || 510 (rt2x00_rt(rt2x00dev, RT3090) ||
511 rt2x00_rt(rt2x00dev, RT3390) ||
512 rt2x00_rt(rt2x00dev, RT3572) ||
513 rt2x00_rt(rt2x00dev, RT3593) ||
511 rt2x00_rt(rt2x00dev, RT5390) || 514 rt2x00_rt(rt2x00dev, RT5390) ||
512 rt2x00_rt(rt2x00dev, RT5392))) { 515 rt2x00_rt(rt2x00dev, RT5392) ||
516 rt2x00_rt(rt2x00dev, RT5592))) {
513 rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg); 517 rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
514 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 518 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
515 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 519 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -1189,12 +1193,17 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1189 1193
1190static void rt2800pci_queue_init(struct data_queue *queue) 1194static void rt2800pci_queue_init(struct data_queue *queue)
1191{ 1195{
1196 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1197 unsigned short txwi_size, rxwi_size;
1198
1199 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
1200
1192 switch (queue->qid) { 1201 switch (queue->qid) {
1193 case QID_RX: 1202 case QID_RX:
1194 queue->limit = 128; 1203 queue->limit = 128;
1195 queue->data_size = AGGREGATION_SIZE; 1204 queue->data_size = AGGREGATION_SIZE;
1196 queue->desc_size = RXD_DESC_SIZE; 1205 queue->desc_size = RXD_DESC_SIZE;
1197 queue->winfo_size = RXWI_DESC_SIZE_4WORDS; 1206 queue->winfo_size = rxwi_size;
1198 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 1207 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1199 break; 1208 break;
1200 1209
@@ -1205,7 +1214,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
1205 queue->limit = 64; 1214 queue->limit = 64;
1206 queue->data_size = AGGREGATION_SIZE; 1215 queue->data_size = AGGREGATION_SIZE;
1207 queue->desc_size = TXD_DESC_SIZE; 1216 queue->desc_size = TXD_DESC_SIZE;
1208 queue->winfo_size = TXWI_DESC_SIZE_4WORDS; 1217 queue->winfo_size = txwi_size;
1209 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 1218 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1210 break; 1219 break;
1211 1220
@@ -1213,7 +1222,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
1213 queue->limit = 8; 1222 queue->limit = 8;
1214 queue->data_size = 0; /* No DMA required for beacons */ 1223 queue->data_size = 0; /* No DMA required for beacons */
1215 queue->desc_size = TXD_DESC_SIZE; 1224 queue->desc_size = TXD_DESC_SIZE;
1216 queue->winfo_size = TXWI_DESC_SIZE_4WORDS; 1225 queue->winfo_size = txwi_size;
1217 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 1226 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
1218 break; 1227 break;
1219 1228
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 840833b26bfa..96961b9a395c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -854,13 +854,7 @@ static void rt2800usb_queue_init(struct data_queue *queue)
854 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 854 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
855 unsigned short txwi_size, rxwi_size; 855 unsigned short txwi_size, rxwi_size;
856 856
857 if (rt2x00_rt(rt2x00dev, RT5592)) { 857 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
858 txwi_size = TXWI_DESC_SIZE_5WORDS;
859 rxwi_size = RXWI_DESC_SIZE_6WORDS;
860 } else {
861 txwi_size = TXWI_DESC_SIZE_4WORDS;
862 rxwi_size = RXWI_DESC_SIZE_4WORDS;
863 }
864 858
865 switch (queue->qid) { 859 switch (queue->qid) {
866 case QID_RX: 860 case QID_RX:
@@ -977,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
977 { USB_DEVICE(0x0411, 0x016f) }, 971 { USB_DEVICE(0x0411, 0x016f) },
978 { USB_DEVICE(0x0411, 0x01a2) }, 972 { USB_DEVICE(0x0411, 0x01a2) },
979 { USB_DEVICE(0x0411, 0x01ee) }, 973 { USB_DEVICE(0x0411, 0x01ee) },
974 { USB_DEVICE(0x0411, 0x01a8) },
980 /* Corega */ 975 /* Corega */
981 { USB_DEVICE(0x07aa, 0x002f) }, 976 { USB_DEVICE(0x07aa, 0x002f) },
982 { USB_DEVICE(0x07aa, 0x003c) }, 977 { USB_DEVICE(0x07aa, 0x003c) },
@@ -1194,6 +1189,40 @@ static struct usb_device_id rt2800usb_device_table[] = {
1194 /* Zinwell */ 1189 /* Zinwell */
1195 { USB_DEVICE(0x5a57, 0x0284) }, 1190 { USB_DEVICE(0x5a57, 0x0284) },
1196#endif 1191#endif
1192#ifdef CONFIG_RT2800USB_RT3573
1193 /* AirLive */
1194 { USB_DEVICE(0x1b75, 0x7733) },
1195 /* ASUS */
1196 { USB_DEVICE(0x0b05, 0x17bc) },
1197 { USB_DEVICE(0x0b05, 0x17ad) },
1198 /* Belkin */
1199 { USB_DEVICE(0x050d, 0x1103) },
1200 /* Cameo */
1201 { USB_DEVICE(0x148f, 0xf301) },
1202 /* Edimax */
1203 { USB_DEVICE(0x7392, 0x7733) },
1204 /* Hawking */
1205 { USB_DEVICE(0x0e66, 0x0020) },
1206 { USB_DEVICE(0x0e66, 0x0021) },
1207 /* I-O DATA */
1208 { USB_DEVICE(0x04bb, 0x094e) },
1209 /* Linksys */
1210 { USB_DEVICE(0x13b1, 0x003b) },
1211 /* Logitec */
1212 { USB_DEVICE(0x0789, 0x016b) },
1213 /* NETGEAR */
1214 { USB_DEVICE(0x0846, 0x9012) },
1215 { USB_DEVICE(0x0846, 0x9019) },
1216 /* Planex */
1217 { USB_DEVICE(0x2019, 0xed19) },
1218 /* Ralink */
1219 { USB_DEVICE(0x148f, 0x3573) },
1220 /* Sitecom */
1221 { USB_DEVICE(0x0df6, 0x0067) },
1222 { USB_DEVICE(0x0df6, 0x006a) },
1223 /* ZyXEL */
1224 { USB_DEVICE(0x0586, 0x3421) },
1225#endif
1197#ifdef CONFIG_RT2800USB_RT53XX 1226#ifdef CONFIG_RT2800USB_RT53XX
1198 /* Arcadyan */ 1227 /* Arcadyan */
1199 { USB_DEVICE(0x043e, 0x7a12) }, 1228 { USB_DEVICE(0x043e, 0x7a12) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ee3fc570b11d..fe4c572db52c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -211,6 +211,7 @@ struct channel_info {
211 short max_power; 211 short max_power;
212 short default_power1; 212 short default_power1;
213 short default_power2; 213 short default_power2;
214 short default_power3;
214}; 215};
215 216
216/* 217/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index b16521e6bf4a..712eea9d398f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -566,10 +566,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
566 566
567#undef TID_CHECK 567#undef TID_CHECK
568 568
569 if (compare_ether_addr(ba->ra, entry->ta)) 569 if (!ether_addr_equal(ba->ra, entry->ta))
570 continue; 570 continue;
571 571
572 if (compare_ether_addr(ba->ta, entry->ra)) 572 if (!ether_addr_equal(ba->ta, entry->ra))
573 continue; 573 continue;
574 574
575 /* Mark BAR since we received the according BA */ 575 /* Mark BAR since we received the according BA */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index aa95c6cf3545..6c8a33b6ee22 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,7 +936,7 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
936 spin_unlock_irqrestore(&queue->index_lock, irqflags); 936 spin_unlock_irqrestore(&queue->index_lock, irqflags);
937} 937}
938 938
939void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) 939static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
940{ 940{
941 switch (queue->qid) { 941 switch (queue->qid) {
942 case QID_AC_VO: 942 case QID_AC_VO:
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 91a04e2b8ece..fc207b268e4f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -3,10 +3,10 @@
3 * Linux device driver for RTL8180 / RTL8185 3 * Linux device driver for RTL8180 / RTL8185
4 * 4 *
5 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 5 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
6 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 6 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
7 * 7 *
8 * Based on the r8180 driver, which is: 8 * Based on the r8180 driver, which is:
9 * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al. 9 * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
10 * 10 *
11 * Thanks to Realtek for their support! 11 * Thanks to Realtek for their support!
12 * 12 *
@@ -32,7 +32,7 @@
32#include "grf5101.h" 32#include "grf5101.h"
33 33
34MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 34MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
35MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); 35MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
36MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); 36MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index 077ff92cc139..dc845693f321 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * Radio tuning for GCT GRF5101 on RTL8180 3 * Radio tuning for GCT GRF5101 on RTL8180
4 * 4 *
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Code from the BSD driver and the rtl8181 project have been 7 * Code from the BSD driver and the rtl8181 project have been
8 * very useful to understand certain things 8 * very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
index 76647111bcff..4d80a2785123 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
@@ -4,7 +4,7 @@
4/* 4/*
5 * Radio tuning for GCT GRF5101 on RTL8180 5 * Radio tuning for GCT GRF5101 on RTL8180
6 * 6 *
7 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 7 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
8 * 8 *
9 * Code from the BSD driver and the rtl8181 project have been 9 * Code from the BSD driver and the rtl8181 project have been
10 * very useful to understand certain things 10 * very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index 4715000c94dd..a63c443c3c6f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Radio tuning for Maxim max2820 on RTL8180 2 * Radio tuning for Maxim max2820 on RTL8180
3 * 3 *
4 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 4 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
5 * 5 *
6 * Code from the BSD driver and the rtl8181 project have been 6 * Code from the BSD driver and the rtl8181 project have been
7 * very useful to understand certain things 7 * very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
index 61cf6d1e7d57..8e982b72b690 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
@@ -4,7 +4,7 @@
4/* 4/*
5 * Radio tuning for Maxim max2820 on RTL8180 5 * Radio tuning for Maxim max2820 on RTL8180
6 * 6 *
7 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 7 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
8 * 8 *
9 * Code from the BSD driver and the rtl8181 project have been 9 * Code from the BSD driver and the rtl8181 project have been
10 * very useful to understand certain things 10 * very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index cc2a5412c1f0..ee638d0749d6 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -3,10 +3,10 @@
3 * Radio tuning for RTL8225 on RTL8180 3 * Radio tuning for RTL8225 on RTL8180
4 * 4 *
5 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 5 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
6 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 6 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
7 * 7 *
8 * Based on the r8180 driver, which is: 8 * Based on the r8180 driver, which is:
9 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 9 * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
10 * 10 *
11 * Thanks to Realtek for their support! 11 * Thanks to Realtek for their support!
12 * 12 *
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index b3ec40f6bd23..7614d9ccc729 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * Radio tuning for Philips SA2400 on RTL8180 3 * Radio tuning for Philips SA2400 on RTL8180
4 * 4 *
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Code from the BSD driver and the rtl8181 project have been 7 * Code from the BSD driver and the rtl8181 project have been
8 * very useful to understand certain things 8 * very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
index a4aaa0d413f1..fb0093f35148 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
@@ -4,7 +4,7 @@
4/* 4/*
5 * Radio tuning for Philips SA2400 on RTL8180 5 * Radio tuning for Philips SA2400 on RTL8180
6 * 6 *
7 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 7 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
8 * 8 *
9 * Code from the BSD driver and the rtl8181 project have been 9 * Code from the BSD driver and the rtl8181 project have been
10 * very useful to understand certain things 10 * very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index f49220e234b0..841fb9dfc9da 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -2,10 +2,10 @@
2 * Linux device driver for RTL8187 2 * Linux device driver for RTL8187
3 * 3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Based on the r8187 driver, which is: 7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 8 * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 * 9 *
10 * The driver was extended to the RTL8187B in 2008 by: 10 * The driver was extended to the RTL8187B in 2008 by:
11 * Herton Ronaldo Krzesinski <herton@mandriva.com.br> 11 * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
@@ -37,7 +37,7 @@
37#include "rfkill.h" 37#include "rfkill.h"
38 38
39MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 39MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
40MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); 40MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
41MODULE_AUTHOR("Herton Ronaldo Krzesinski <herton@mandriva.com.br>"); 41MODULE_AUTHOR("Herton Ronaldo Krzesinski <herton@mandriva.com.br>");
42MODULE_AUTHOR("Hin-Tak Leung <htl10@users.sourceforge.net>"); 42MODULE_AUTHOR("Hin-Tak Leung <htl10@users.sourceforge.net>");
43MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); 43MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index e19a20a8e955..56aee067f324 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -2,10 +2,10 @@
2 * Definitions for RTL8187 hardware 2 * Definitions for RTL8187 hardware
3 * 3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Based on the r8187 driver, which is: 7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 8 * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index f0bf35fedbaf..a26193a04447 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -2,10 +2,10 @@
2 * Radio tuning for RTL8225 on RTL8187 2 * Radio tuning for RTL8225 on RTL8187
3 * 3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Based on the r8187 driver, which is: 7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 8 * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 * 9 *
10 * Magic delays, register offsets, and phy value tables below are 10 * Magic delays, register offsets, and phy value tables below are
11 * taken from the original r8187 driver sources. Thanks to Realtek 11 * taken from the original r8187 driver sources. Thanks to Realtek
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
index 20c5b6ead0f6..141afb09a5b4 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
@@ -2,10 +2,10 @@
2 * Radio tuning definitions for RTL8225 on RTL8187 2 * Radio tuning definitions for RTL8225 on RTL8187
3 * 3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Based on the r8187 driver, which is: 7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 8 * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 1615f63b02f6..ce23dfd42381 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -2,10 +2,10 @@
2 * Definitions for RTL818x hardware 2 * Definitions for RTL818x hardware
3 * 3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net> 4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> 5 * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
6 * 6 *
7 * Based on the r8187 driver, which is: 7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 8 * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 7651f5acc14b..8bb4a9a01a18 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1304,7 +1304,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
1304 return; 1304 return;
1305 1305
1306 /* and only beacons from the associated BSSID, please */ 1306 /* and only beacons from the associated BSSID, please */
1307 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) 1307 if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
1308 return; 1308 return;
1309 1309
1310 rtlpriv->link_info.bcn_rx_inperiod++; 1310 rtlpriv->link_info.bcn_rx_inperiod++;
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 298b615964e8..0d81f766fd0f 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -688,7 +688,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
688 find_p2p_ie = true; 688 find_p2p_ie = true;
689 /*to find noa ie*/ 689 /*to find noa ie*/
690 while (ie + 1 < end) { 690 while (ie + 1 < end) {
691 noa_len = READEF2BYTE(&ie[1]); 691 noa_len = READEF2BYTE((__le16 *)&ie[1]);
692 if (ie + 3 + ie[1] > end) 692 if (ie + 3 + ie[1] > end)
693 return; 693 return;
694 694
@@ -717,13 +717,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
717 READEF1BYTE(ie+index); 717 READEF1BYTE(ie+index);
718 index += 1; 718 index += 1;
719 p2pinfo->noa_duration[i] = 719 p2pinfo->noa_duration[i] =
720 READEF4BYTE(ie+index); 720 READEF4BYTE((__le32 *)ie+index);
721 index += 4; 721 index += 4;
722 p2pinfo->noa_interval[i] = 722 p2pinfo->noa_interval[i] =
723 READEF4BYTE(ie+index); 723 READEF4BYTE((__le32 *)ie+index);
724 index += 4; 724 index += 4;
725 p2pinfo->noa_start_time[i] = 725 p2pinfo->noa_start_time[i] =
726 READEF4BYTE(ie+index); 726 READEF4BYTE((__le32 *)ie+index);
727 index += 4; 727 index += 4;
728 } 728 }
729 729
@@ -780,7 +780,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
780 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n"); 780 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
781 /*to find noa ie*/ 781 /*to find noa ie*/
782 while (ie + 1 < end) { 782 while (ie + 1 < end) {
783 noa_len = READEF2BYTE(&ie[1]); 783 noa_len = READEF2BYTE((__le16 *)&ie[1]);
784 if (ie + 3 + ie[1] > end) 784 if (ie + 3 + ie[1] > end)
785 return; 785 return;
786 786
@@ -809,13 +809,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
809 READEF1BYTE(ie+index); 809 READEF1BYTE(ie+index);
810 index += 1; 810 index += 1;
811 p2pinfo->noa_duration[i] = 811 p2pinfo->noa_duration[i] =
812 READEF4BYTE(ie+index); 812 READEF4BYTE((__le32 *)ie+index);
813 index += 4; 813 index += 4;
814 p2pinfo->noa_interval[i] = 814 p2pinfo->noa_interval[i] =
815 READEF4BYTE(ie+index); 815 READEF4BYTE((__le32 *)ie+index);
816 index += 4; 816 index += 4;
817 p2pinfo->noa_start_time[i] = 817 p2pinfo->noa_start_time[i] =
818 READEF4BYTE(ie+index); 818 READEF4BYTE((__le32 *)ie+index);
819 index += 4; 819 index += 4;
820 } 820 }
821 821
@@ -923,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
923 return; 923 return;
924 924
925 /* and only beacons from the associated BSSID, please */ 925 /* and only beacons from the associated BSSID, please */
926 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) 926 if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
927 return; 927 return;
928 928
929 /* check if this really is a beacon */ 929 /* check if this really is a beacon */
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index f9f059dadb73..a98acefb8c06 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -218,6 +218,7 @@ static void rtl_tx_status(void *ppriv,
218 218
219static void rtl_rate_init(void *ppriv, 219static void rtl_rate_init(void *ppriv,
220 struct ieee80211_supported_band *sband, 220 struct ieee80211_supported_band *sband,
221 struct cfg80211_chan_def *chandef,
221 struct ieee80211_sta *sta, void *priv_sta) 222 struct ieee80211_sta *sta, void *priv_sta)
222{ 223{
223} 224}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index a8871d66d56a..68685a898257 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -305,13 +305,14 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
305 psaddr = ieee80211_get_SA(hdr); 305 psaddr = ieee80211_get_SA(hdr);
306 memcpy(pstatus->psaddr, psaddr, ETH_ALEN); 306 memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
307 307
308 addr = (!compare_ether_addr(mac->bssid, (ufc & IEEE80211_FCTL_TODS) ? 308 addr = ether_addr_equal(mac->bssid,
309 hdr->addr1 : (ufc & IEEE80211_FCTL_FROMDS) ? 309 (ufc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
310 hdr->addr2 : hdr->addr3)); 310 (ufc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
311 hdr->addr3);
311 match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) && 312 match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) &&
312 (!pstatus->crc) && (!pstatus->icv)) && addr; 313 (!pstatus->crc) && (!pstatus->icv)) && addr;
313 314
314 addr = (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 315 addr = ether_addr_equal(praddr, rtlefuse->dev_addr);
315 packet_toself = match_bssid && addr; 316 packet_toself = match_bssid && addr;
316 317
317 if (ieee80211_is_beacon(fc)) 318 if (ieee80211_is_beacon(fc))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 8e3ec1e25644..0f7812e0c8aa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -109,5 +109,8 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
109void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, 109void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
110 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); 110 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
111bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw); 111bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
112void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
113 struct ieee80211_sta *sta,
114 u8 rssi_level);
112 115
113#endif 116#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index 262e1e4c6e5b..a1310abd0d54 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,8 +49,5 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, 49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
50 enum radio_path rfpath, u32 regaddr, u32 bitmask); 50 enum radio_path rfpath, u32 regaddr, u32 bitmask);
51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
52void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
53 struct ieee80211_sta *sta,
54 u8 rssi_level);
55 52
56#endif 53#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index c72758d8f4ed..bcd82a1020a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -255,16 +255,16 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
255 type = WLAN_FC_GET_TYPE(fc); 255 type = WLAN_FC_GET_TYPE(fc);
256 praddr = hdr->addr1; 256 praddr = hdr->addr1;
257 257
258 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && 258 packet_matchbssid =
259 (!compare_ether_addr(mac->bssid, 259 ((IEEE80211_FTYPE_CTL != type) &&
260 (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? 260 ether_addr_equal(mac->bssid,
261 hdr->addr1 : (le16_to_cpu(fc) & 261 (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? hdr->addr1 :
262 IEEE80211_FCTL_FROMDS) ? 262 (le16_to_cpu(fc) & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
263 hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) && 263 hdr->addr3) &&
264 (!pstatus->crc) && (!pstatus->icv)); 264 (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv));
265 265
266 packet_toself = packet_matchbssid && 266 packet_toself = (packet_matchbssid &&
267 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 267 ether_addr_equal(praddr, rtlefuse->dev_addr));
268 268
269 if (ieee80211_is_beacon(fc)) 269 if (ieee80211_is_beacon(fc))
270 packet_beacon = true; 270 packet_beacon = true;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b8db55c868c7..38995f90040d 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1315,7 +1315,7 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1315 1315
1316#ifdef CONFIG_PM 1316#ifdef CONFIG_PM
1317static int 1317static int
1318wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p) 1318wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1319{ 1319{
1320 int num_fields = 0, in_field = 0, fields_size = 0; 1320 int num_fields = 0, in_field = 0, fields_size = 0;
1321 int i, pattern_len = 0; 1321 int i, pattern_len = 0;
@@ -1458,9 +1458,9 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1458 * Allocates an RX filter returned through f 1458 * Allocates an RX filter returned through f
1459 * which needs to be freed using rx_filter_free() 1459 * which needs to be freed using rx_filter_free()
1460 */ 1460 */
1461static int wl1271_convert_wowlan_pattern_to_rx_filter( 1461static int
1462 struct cfg80211_wowlan_trig_pkt_pattern *p, 1462wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1463 struct wl12xx_rx_filter **f) 1463 struct wl12xx_rx_filter **f)
1464{ 1464{
1465 int i, j, ret = 0; 1465 int i, j, ret = 0;
1466 struct wl12xx_rx_filter *filter; 1466 struct wl12xx_rx_filter *filter;
@@ -1562,7 +1562,7 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
1562 1562
1563 /* Translate WoWLAN patterns into filters */ 1563 /* Translate WoWLAN patterns into filters */
1564 for (i = 0; i < wow->n_patterns; i++) { 1564 for (i = 0; i < wow->n_patterns; i++) {
1565 struct cfg80211_wowlan_trig_pkt_pattern *p; 1565 struct cfg80211_pkt_pattern *p;
1566 struct wl12xx_rx_filter *filter = NULL; 1566 struct wl12xx_rx_filter *filter = NULL;
1567 1567
1568 p = &wow->patterns[i]; 1568 p = &wow->patterns[i];
@@ -5623,7 +5623,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5623 wl->hw->wiphy->max_remain_on_channel_duration = 5000; 5623 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5624 5624
5625 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | 5625 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5626 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 5626 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5627 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5627 5628
5628 /* make sure all our channels fit in the scanned_ch bitmask */ 5629 /* make sure all our channels fit in the scanned_ch bitmask */
5629 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + 5630 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index f3442762d884..527590f2adfb 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -356,7 +356,8 @@ out:
356 return ret; 356 return ret;
357} 357}
358 358
359int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) 359int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
360 void *data, int len)
360{ 361{
361 struct wl1271 *wl = hw->priv; 362 struct wl1271 *wl = hw->priv;
362 struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; 363 struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
diff --git a/drivers/net/wireless/ti/wlcore/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h
index 8071654259ea..61d8434d859a 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.h
+++ b/drivers/net/wireless/ti/wlcore/testmode.h
@@ -26,6 +26,7 @@
26 26
27#include <net/mac80211.h> 27#include <net/mac80211.h>
28 28
29int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len); 29int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
30 void *data, int len);
30 31
31#endif /* __WL1271_TESTMODE_H__ */ 32#endif /* __WL1271_TESTMODE_H__ */
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index b8ba1f925e75..d39c4178c33a 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -75,8 +75,10 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
75 len = fw_entry->size; 75 len = fw_entry->size;
76 76
77 buf = kmalloc(1024, GFP_ATOMIC); 77 buf = kmalloc(1024, GFP_ATOMIC);
78 if (!buf) 78 if (!buf) {
79 err = -ENOMEM;
79 goto exit; 80 goto exit;
81 }
80 82
81 while (len > 0) { 83 while (len > 0) {
82 int translen = (len > 1024) ? 1024 : len; 84 int translen = (len > 1024) ? 1024 : len;
@@ -1764,8 +1766,10 @@ static int zd1201_probe(struct usb_interface *interface,
1764 zd->endp_out2 = 2; 1766 zd->endp_out2 = 2;
1765 zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 1767 zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
1766 zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 1768 zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1767 if (!zd->rx_urb || !zd->tx_urb) 1769 if (!zd->rx_urb || !zd->tx_urb) {
1770 err = -ENOMEM;
1768 goto err_zd; 1771 goto err_zd;
1772 }
1769 1773
1770 mdelay(100); 1774 mdelay(100);
1771 err = zd1201_drvr_start(zd); 1775 err = zd1201_drvr_start(zd);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a4d77ee9c5b..a1977430ddfb 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -45,31 +45,109 @@
45#include <xen/grant_table.h> 45#include <xen/grant_table.h>
46#include <xen/xenbus.h> 46#include <xen/xenbus.h>
47 47
48struct xen_netbk; 48typedef unsigned int pending_ring_idx_t;
49#define INVALID_PENDING_RING_IDX (~0U)
50
51/* For the head field in pending_tx_info: it is used to indicate
52 * whether this tx info is the head of one or more coalesced requests.
53 *
54 * When head != INVALID_PENDING_RING_IDX, it means the start of a new
55 * tx requests queue and the end of previous queue.
56 *
57 * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
58 *
59 * ...|0 I I I|5 I|9 I I I|...
60 * -->|<-INUSE----------------
61 *
62 * After consuming the first slot(s) we have:
63 *
64 * ...|V V V V|5 I|9 I I I|...
65 * -----FREE->|<-INUSE--------
66 *
67 * where V stands for "valid pending ring index". Any number other
68 * than INVALID_PENDING_RING_IDX is OK. These entries are considered
69 * free and can contain any number other than
70 * INVALID_PENDING_RING_IDX. In practice we use 0.
71 *
72 * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
73 * above example) number is the index into pending_tx_info and
74 * mmap_pages arrays.
75 */
76struct pending_tx_info {
77 struct xen_netif_tx_request req; /* coalesced tx request */
78 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
79 * if it is head of one or more tx
80 * reqs
81 */
82};
83
84#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
85#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
86
87struct xenvif_rx_meta {
88 int id;
89 int size;
90 int gso_size;
91};
92
93/* Discriminate from any valid pending_idx value. */
94#define INVALID_PENDING_IDX 0xFFFF
95
96#define MAX_BUFFER_OFFSET PAGE_SIZE
97
98#define MAX_PENDING_REQS 256
49 99
50struct xenvif { 100struct xenvif {
51 /* Unique identifier for this interface. */ 101 /* Unique identifier for this interface. */
52 domid_t domid; 102 domid_t domid;
53 unsigned int handle; 103 unsigned int handle;
54 104
55 /* Reference to netback processing backend. */ 105 /* Use NAPI for guest TX */
56 struct xen_netbk *netbk; 106 struct napi_struct napi;
107 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
108 unsigned int tx_irq;
109 /* Only used when feature-split-event-channels = 1 */
110 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
111 struct xen_netif_tx_back_ring tx;
112 struct sk_buff_head tx_queue;
113 struct page *mmap_pages[MAX_PENDING_REQS];
114 pending_ring_idx_t pending_prod;
115 pending_ring_idx_t pending_cons;
116 u16 pending_ring[MAX_PENDING_REQS];
117 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
118
119 /* Coalescing tx requests before copying makes number of grant
120 * copy ops greater or equal to number of slots required. In
121 * worst case a tx request consumes 2 gnttab_copy.
122 */
123 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
57 124
58 u8 fe_dev_addr[6];
59 125
126 /* Use kthread for guest RX */
127 struct task_struct *task;
128 wait_queue_head_t wq;
60 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 129 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
61 unsigned int tx_irq;
62 unsigned int rx_irq; 130 unsigned int rx_irq;
63 /* Only used when feature-split-event-channels = 1 */ 131 /* Only used when feature-split-event-channels = 1 */
64 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
65 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 132 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
133 struct xen_netif_rx_back_ring rx;
134 struct sk_buff_head rx_queue;
66 135
67 /* List of frontends to notify after a batch of frames sent. */ 136 /* Allow xenvif_start_xmit() to peek ahead in the rx request
68 struct list_head notify_list; 137 * ring. This is a prediction of what rx_req_cons will be
138 * once all queued skbs are put on the ring.
139 */
140 RING_IDX rx_req_cons_peek;
141
142 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
143 * head/fragment page uses 2 copy operations because it
144 * straddles two buffers in the frontend.
145 */
146 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
147 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
69 148
70 /* The shared rings and indexes. */ 149
71 struct xen_netif_tx_back_ring tx; 150 u8 fe_dev_addr[6];
72 struct xen_netif_rx_back_ring rx;
73 151
74 /* Frontend feature information. */ 152 /* Frontend feature information. */
75 u8 can_sg:1; 153 u8 can_sg:1;
@@ -80,13 +158,6 @@ struct xenvif {
80 /* Internal feature information. */ 158 /* Internal feature information. */
81 u8 can_queue:1; /* can queue packets for receiver? */ 159 u8 can_queue:1; /* can queue packets for receiver? */
82 160
83 /*
84 * Allow xenvif_start_xmit() to peek ahead in the rx request
85 * ring. This is a prediction of what rx_req_cons will be
86 * once all queued skbs are put on the ring.
87 */
88 RING_IDX rx_req_cons_peek;
89
90 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 161 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
91 unsigned long credit_bytes; 162 unsigned long credit_bytes;
92 unsigned long credit_usec; 163 unsigned long credit_usec;
@@ -97,11 +168,7 @@ struct xenvif {
97 unsigned long rx_gso_checksum_fixup; 168 unsigned long rx_gso_checksum_fixup;
98 169
99 /* Miscellaneous private stuff. */ 170 /* Miscellaneous private stuff. */
100 struct list_head schedule_list;
101 atomic_t refcnt;
102 struct net_device *dev; 171 struct net_device *dev;
103
104 wait_queue_head_t waiting_to_free;
105}; 172};
106 173
107static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) 174static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
@@ -109,9 +176,6 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
109 return to_xenbus_device(vif->dev->dev.parent); 176 return to_xenbus_device(vif->dev->dev.parent);
110} 177}
111 178
112#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
113#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
114
115struct xenvif *xenvif_alloc(struct device *parent, 179struct xenvif *xenvif_alloc(struct device *parent,
116 domid_t domid, 180 domid_t domid,
117 unsigned int handle); 181 unsigned int handle);
@@ -121,39 +185,26 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
121 unsigned int rx_evtchn); 185 unsigned int rx_evtchn);
122void xenvif_disconnect(struct xenvif *vif); 186void xenvif_disconnect(struct xenvif *vif);
123 187
124void xenvif_get(struct xenvif *vif);
125void xenvif_put(struct xenvif *vif);
126
127int xenvif_xenbus_init(void); 188int xenvif_xenbus_init(void);
128void xenvif_xenbus_fini(void); 189void xenvif_xenbus_fini(void);
129 190
130int xenvif_schedulable(struct xenvif *vif); 191int xenvif_schedulable(struct xenvif *vif);
131 192
132int xen_netbk_rx_ring_full(struct xenvif *vif); 193int xenvif_rx_ring_full(struct xenvif *vif);
133 194
134int xen_netbk_must_stop_queue(struct xenvif *vif); 195int xenvif_must_stop_queue(struct xenvif *vif);
135 196
136/* (Un)Map communication rings. */ 197/* (Un)Map communication rings. */
137void xen_netbk_unmap_frontend_rings(struct xenvif *vif); 198void xenvif_unmap_frontend_rings(struct xenvif *vif);
138int xen_netbk_map_frontend_rings(struct xenvif *vif, 199int xenvif_map_frontend_rings(struct xenvif *vif,
139 grant_ref_t tx_ring_ref, 200 grant_ref_t tx_ring_ref,
140 grant_ref_t rx_ring_ref); 201 grant_ref_t rx_ring_ref);
141
142/* (De)Register a xenvif with the netback backend. */
143void xen_netbk_add_xenvif(struct xenvif *vif);
144void xen_netbk_remove_xenvif(struct xenvif *vif);
145
146/* (De)Schedule backend processing for a xenvif */
147void xen_netbk_schedule_xenvif(struct xenvif *vif);
148void xen_netbk_deschedule_xenvif(struct xenvif *vif);
149 202
150/* Check for SKBs from frontend and schedule backend processing */ 203/* Check for SKBs from frontend and schedule backend processing */
151void xen_netbk_check_rx_xenvif(struct xenvif *vif); 204void xenvif_check_rx_xenvif(struct xenvif *vif);
152/* Receive an SKB from the frontend */
153void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
154 205
155/* Queue an SKB for transmission to the frontend */ 206/* Queue an SKB for transmission to the frontend */
156void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); 207void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
157/* Notify xenvif that ring now has space to send an skb to the frontend */ 208/* Notify xenvif that ring now has space to send an skb to the frontend */
158void xenvif_notify_tx_completion(struct xenvif *vif); 209void xenvif_notify_tx_completion(struct xenvif *vif);
159 210
@@ -161,7 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif);
161void xenvif_carrier_off(struct xenvif *vif); 212void xenvif_carrier_off(struct xenvif *vif);
162 213
163/* Returns number of ring slots required to send an skb to the frontend */ 214/* Returns number of ring slots required to send an skb to the frontend */
164unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 215unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
216
217int xenvif_tx_action(struct xenvif *vif, int budget);
218void xenvif_rx_action(struct xenvif *vif);
219
220int xenvif_kthread(void *data);
165 221
166extern bool separate_tx_rx_irq; 222extern bool separate_tx_rx_irq;
167 223
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 087d2db0389d..625c6f49cfba 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -30,6 +30,7 @@
30 30
31#include "common.h" 31#include "common.h"
32 32
33#include <linux/kthread.h>
33#include <linux/ethtool.h> 34#include <linux/ethtool.h>
34#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
35#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
@@ -38,17 +39,7 @@
38#include <asm/xen/hypercall.h> 39#include <asm/xen/hypercall.h>
39 40
40#define XENVIF_QUEUE_LENGTH 32 41#define XENVIF_QUEUE_LENGTH 32
41 42#define XENVIF_NAPI_WEIGHT 64
42void xenvif_get(struct xenvif *vif)
43{
44 atomic_inc(&vif->refcnt);
45}
46
47void xenvif_put(struct xenvif *vif)
48{
49 if (atomic_dec_and_test(&vif->refcnt))
50 wake_up(&vif->waiting_to_free);
51}
52 43
53int xenvif_schedulable(struct xenvif *vif) 44int xenvif_schedulable(struct xenvif *vif)
54{ 45{
@@ -57,28 +48,62 @@ int xenvif_schedulable(struct xenvif *vif)
57 48
58static int xenvif_rx_schedulable(struct xenvif *vif) 49static int xenvif_rx_schedulable(struct xenvif *vif)
59{ 50{
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); 51 return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
61} 52}
62 53
63static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 54static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
64{ 55{
65 struct xenvif *vif = dev_id; 56 struct xenvif *vif = dev_id;
66 57
67 if (vif->netbk == NULL) 58 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
68 return IRQ_HANDLED; 59 napi_schedule(&vif->napi);
69
70 xen_netbk_schedule_xenvif(vif);
71 60
72 return IRQ_HANDLED; 61 return IRQ_HANDLED;
73} 62}
74 63
64static int xenvif_poll(struct napi_struct *napi, int budget)
65{
66 struct xenvif *vif = container_of(napi, struct xenvif, napi);
67 int work_done;
68
69 work_done = xenvif_tx_action(vif, budget);
70
71 if (work_done < budget) {
72 int more_to_do = 0;
73 unsigned long flags;
74
75 /* It is necessary to disable IRQ before calling
76 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
77 * lose event from the frontend.
78 *
79 * Consider:
80 * RING_HAS_UNCONSUMED_REQUESTS
81 * <frontend generates event to trigger napi_schedule>
82 * __napi_complete
83 *
84 * This handler is still in scheduled state so the
85 * event has no effect at all. After __napi_complete
86 * this handler is descheduled and cannot get
87 * scheduled again. We lose event in this case and the ring
88 * will be completely stalled.
89 */
90
91 local_irq_save(flags);
92
93 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
94 if (!more_to_do)
95 __napi_complete(napi);
96
97 local_irq_restore(flags);
98 }
99
100 return work_done;
101}
102
75static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 103static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
76{ 104{
77 struct xenvif *vif = dev_id; 105 struct xenvif *vif = dev_id;
78 106
79 if (vif->netbk == NULL)
80 return IRQ_HANDLED;
81
82 if (xenvif_rx_schedulable(vif)) 107 if (xenvif_rx_schedulable(vif))
83 netif_wake_queue(vif->dev); 108 netif_wake_queue(vif->dev);
84 109
@@ -99,7 +124,8 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
99 124
100 BUG_ON(skb->dev != dev); 125 BUG_ON(skb->dev != dev);
101 126
102 if (vif->netbk == NULL) 127 /* Drop the packet if vif is not ready */
128 if (vif->task == NULL)
103 goto drop; 129 goto drop;
104 130
105 /* Drop the packet if the target domain has no receive buffers. */ 131 /* Drop the packet if the target domain has no receive buffers. */
@@ -107,13 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
107 goto drop; 133 goto drop;
108 134
109 /* Reserve ring slots for the worst-case number of fragments. */ 135 /* Reserve ring slots for the worst-case number of fragments. */
110 vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); 136 vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
111 xenvif_get(vif);
112 137
113 if (vif->can_queue && xen_netbk_must_stop_queue(vif)) 138 if (vif->can_queue && xenvif_must_stop_queue(vif))
114 netif_stop_queue(dev); 139 netif_stop_queue(dev);
115 140
116 xen_netbk_queue_tx_skb(vif, skb); 141 xenvif_queue_tx_skb(vif, skb);
117 142
118 return NETDEV_TX_OK; 143 return NETDEV_TX_OK;
119 144
@@ -123,11 +148,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
123 return NETDEV_TX_OK; 148 return NETDEV_TX_OK;
124} 149}
125 150
126void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
127{
128 netif_rx_ni(skb);
129}
130
131void xenvif_notify_tx_completion(struct xenvif *vif) 151void xenvif_notify_tx_completion(struct xenvif *vif)
132{ 152{
133 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) 153 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
@@ -142,21 +162,20 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
142 162
143static void xenvif_up(struct xenvif *vif) 163static void xenvif_up(struct xenvif *vif)
144{ 164{
145 xen_netbk_add_xenvif(vif); 165 napi_enable(&vif->napi);
146 enable_irq(vif->tx_irq); 166 enable_irq(vif->tx_irq);
147 if (vif->tx_irq != vif->rx_irq) 167 if (vif->tx_irq != vif->rx_irq)
148 enable_irq(vif->rx_irq); 168 enable_irq(vif->rx_irq);
149 xen_netbk_check_rx_xenvif(vif); 169 xenvif_check_rx_xenvif(vif);
150} 170}
151 171
152static void xenvif_down(struct xenvif *vif) 172static void xenvif_down(struct xenvif *vif)
153{ 173{
174 napi_disable(&vif->napi);
154 disable_irq(vif->tx_irq); 175 disable_irq(vif->tx_irq);
155 if (vif->tx_irq != vif->rx_irq) 176 if (vif->tx_irq != vif->rx_irq)
156 disable_irq(vif->rx_irq); 177 disable_irq(vif->rx_irq);
157 del_timer_sync(&vif->credit_timeout); 178 del_timer_sync(&vif->credit_timeout);
158 xen_netbk_deschedule_xenvif(vif);
159 xen_netbk_remove_xenvif(vif);
160} 179}
161 180
162static int xenvif_open(struct net_device *dev) 181static int xenvif_open(struct net_device *dev)
@@ -272,11 +291,12 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
272 struct net_device *dev; 291 struct net_device *dev;
273 struct xenvif *vif; 292 struct xenvif *vif;
274 char name[IFNAMSIZ] = {}; 293 char name[IFNAMSIZ] = {};
294 int i;
275 295
276 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 296 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
277 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 297 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
278 if (dev == NULL) { 298 if (dev == NULL) {
279 pr_warn("Could not allocate netdev\n"); 299 pr_warn("Could not allocate netdev for %s\n", name);
280 return ERR_PTR(-ENOMEM); 300 return ERR_PTR(-ENOMEM);
281 } 301 }
282 302
@@ -285,14 +305,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
285 vif = netdev_priv(dev); 305 vif = netdev_priv(dev);
286 vif->domid = domid; 306 vif->domid = domid;
287 vif->handle = handle; 307 vif->handle = handle;
288 vif->netbk = NULL;
289 vif->can_sg = 1; 308 vif->can_sg = 1;
290 vif->csum = 1; 309 vif->csum = 1;
291 atomic_set(&vif->refcnt, 1);
292 init_waitqueue_head(&vif->waiting_to_free);
293 vif->dev = dev; 310 vif->dev = dev;
294 INIT_LIST_HEAD(&vif->schedule_list);
295 INIT_LIST_HEAD(&vif->notify_list);
296 311
297 vif->credit_bytes = vif->remaining_credit = ~0UL; 312 vif->credit_bytes = vif->remaining_credit = ~0UL;
298 vif->credit_usec = 0UL; 313 vif->credit_usec = 0UL;
@@ -307,6 +322,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 322
308 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 323 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
309 324
325 skb_queue_head_init(&vif->rx_queue);
326 skb_queue_head_init(&vif->tx_queue);
327
328 vif->pending_cons = 0;
329 vif->pending_prod = MAX_PENDING_REQS;
330 for (i = 0; i < MAX_PENDING_REQS; i++)
331 vif->pending_ring[i] = i;
332 for (i = 0; i < MAX_PENDING_REQS; i++)
333 vif->mmap_pages[i] = NULL;
334
310 /* 335 /*
311 * Initialise a dummy MAC address. We choose the numerically 336 * Initialise a dummy MAC address. We choose the numerically
312 * largest non-broadcast address to prevent the address getting 337 * largest non-broadcast address to prevent the address getting
@@ -316,6 +341,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
316 memset(dev->dev_addr, 0xFF, ETH_ALEN); 341 memset(dev->dev_addr, 0xFF, ETH_ALEN);
317 dev->dev_addr[0] &= ~0x01; 342 dev->dev_addr[0] &= ~0x01;
318 343
344 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
345
319 netif_carrier_off(dev); 346 netif_carrier_off(dev);
320 347
321 err = register_netdev(dev); 348 err = register_netdev(dev);
@@ -341,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
341 368
342 __module_get(THIS_MODULE); 369 __module_get(THIS_MODULE);
343 370
344 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 371 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
345 if (err < 0) 372 if (err < 0)
346 goto err; 373 goto err;
347 374
@@ -377,7 +404,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
377 disable_irq(vif->rx_irq); 404 disable_irq(vif->rx_irq);
378 } 405 }
379 406
380 xenvif_get(vif); 407 init_waitqueue_head(&vif->wq);
408 vif->task = kthread_create(xenvif_kthread,
409 (void *)vif, vif->dev->name);
410 if (IS_ERR(vif->task)) {
411 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
412 err = PTR_ERR(vif->task);
413 goto err_rx_unbind;
414 }
381 415
382 rtnl_lock(); 416 rtnl_lock();
383 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 417 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
@@ -388,12 +422,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
388 xenvif_up(vif); 422 xenvif_up(vif);
389 rtnl_unlock(); 423 rtnl_unlock();
390 424
425 wake_up_process(vif->task);
426
391 return 0; 427 return 0;
428
429err_rx_unbind:
430 unbind_from_irqhandler(vif->rx_irq, vif);
431 vif->rx_irq = 0;
392err_tx_unbind: 432err_tx_unbind:
393 unbind_from_irqhandler(vif->tx_irq, vif); 433 unbind_from_irqhandler(vif->tx_irq, vif);
394 vif->tx_irq = 0; 434 vif->tx_irq = 0;
395err_unmap: 435err_unmap:
396 xen_netbk_unmap_frontend_rings(vif); 436 xenvif_unmap_frontend_rings(vif);
397err: 437err:
398 module_put(THIS_MODULE); 438 module_put(THIS_MODULE);
399 return err; 439 return err;
@@ -408,7 +448,6 @@ void xenvif_carrier_off(struct xenvif *vif)
408 if (netif_running(dev)) 448 if (netif_running(dev))
409 xenvif_down(vif); 449 xenvif_down(vif);
410 rtnl_unlock(); 450 rtnl_unlock();
411 xenvif_put(vif);
412} 451}
413 452
414void xenvif_disconnect(struct xenvif *vif) 453void xenvif_disconnect(struct xenvif *vif)
@@ -422,9 +461,6 @@ void xenvif_disconnect(struct xenvif *vif)
422 if (netif_carrier_ok(vif->dev)) 461 if (netif_carrier_ok(vif->dev))
423 xenvif_carrier_off(vif); 462 xenvif_carrier_off(vif);
424 463
425 atomic_dec(&vif->refcnt);
426 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
427
428 if (vif->tx_irq) { 464 if (vif->tx_irq) {
429 if (vif->tx_irq == vif->rx_irq) 465 if (vif->tx_irq == vif->rx_irq)
430 unbind_from_irqhandler(vif->tx_irq, vif); 466 unbind_from_irqhandler(vif->tx_irq, vif);
@@ -438,9 +474,14 @@ void xenvif_disconnect(struct xenvif *vif)
438 need_module_put = 1; 474 need_module_put = 1;
439 } 475 }
440 476
477 if (vif->task)
478 kthread_stop(vif->task);
479
480 netif_napi_del(&vif->napi);
481
441 unregister_netdev(vif->dev); 482 unregister_netdev(vif->dev);
442 483
443 xen_netbk_unmap_frontend_rings(vif); 484 xenvif_unmap_frontend_rings(vif);
444 485
445 free_netdev(vif->dev); 486 free_netdev(vif->dev);
446 487
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64828de25d9a..956130c70036 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -70,131 +70,26 @@ module_param(fatal_skb_slots, uint, 0444);
70 */ 70 */
71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN 71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
72 72
73typedef unsigned int pending_ring_idx_t;
74#define INVALID_PENDING_RING_IDX (~0U)
75
76struct pending_tx_info {
77 struct xen_netif_tx_request req; /* coalesced tx request */
78 struct xenvif *vif;
79 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
80 * if it is head of one or more tx
81 * reqs
82 */
83};
84
85struct netbk_rx_meta {
86 int id;
87 int size;
88 int gso_size;
89};
90
91#define MAX_PENDING_REQS 256
92
93/* Discriminate from any valid pending_idx value. */
94#define INVALID_PENDING_IDX 0xFFFF
95
96#define MAX_BUFFER_OFFSET PAGE_SIZE
97
98/* extra field used in struct page */
99union page_ext {
100 struct {
101#if BITS_PER_LONG < 64
102#define IDX_WIDTH 8
103#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
104 unsigned int group:GROUP_WIDTH;
105 unsigned int idx:IDX_WIDTH;
106#else
107 unsigned int group, idx;
108#endif
109 } e;
110 void *mapping;
111};
112
113struct xen_netbk {
114 wait_queue_head_t wq;
115 struct task_struct *task;
116
117 struct sk_buff_head rx_queue;
118 struct sk_buff_head tx_queue;
119
120 struct timer_list net_timer;
121
122 struct page *mmap_pages[MAX_PENDING_REQS];
123
124 pending_ring_idx_t pending_prod;
125 pending_ring_idx_t pending_cons;
126 struct list_head net_schedule_list;
127
128 /* Protect the net_schedule_list in netif. */
129 spinlock_t net_schedule_list_lock;
130
131 atomic_t netfront_count;
132
133 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
134 /* Coalescing tx requests before copying makes number of grant
135 * copy ops greater or equal to number of slots required. In
136 * worst case a tx request consumes 2 gnttab_copy.
137 */
138 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
139
140 u16 pending_ring[MAX_PENDING_REQS];
141
142 /*
143 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
144 * head/fragment page uses 2 copy operations because it
145 * straddles two buffers in the frontend.
146 */
147 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
148 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
149};
150
151static struct xen_netbk *xen_netbk;
152static int xen_netbk_group_nr;
153
154/* 73/*
155 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of 74 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
156 * one or more merged tx requests, otherwise it is the continuation of 75 * one or more merged tx requests, otherwise it is the continuation of
157 * previous tx request. 76 * previous tx request.
158 */ 77 */
159static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) 78static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
160{
161 return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
162}
163
164void xen_netbk_add_xenvif(struct xenvif *vif)
165{ 79{
166 int i; 80 return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
167 int min_netfront_count;
168 int min_group = 0;
169 struct xen_netbk *netbk;
170
171 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
172 for (i = 0; i < xen_netbk_group_nr; i++) {
173 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
174 if (netfront_count < min_netfront_count) {
175 min_group = i;
176 min_netfront_count = netfront_count;
177 }
178 }
179
180 netbk = &xen_netbk[min_group];
181
182 vif->netbk = netbk;
183 atomic_inc(&netbk->netfront_count);
184} 81}
185 82
186void xen_netbk_remove_xenvif(struct xenvif *vif) 83static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
187{ 84 u8 status);
188 struct xen_netbk *netbk = vif->netbk;
189 vif->netbk = NULL;
190 atomic_dec(&netbk->netfront_count);
191}
192 85
193static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
194 u8 status);
195static void make_tx_response(struct xenvif *vif, 86static void make_tx_response(struct xenvif *vif,
196 struct xen_netif_tx_request *txp, 87 struct xen_netif_tx_request *txp,
197 s8 st); 88 s8 st);
89
90static inline int tx_work_todo(struct xenvif *vif);
91static inline int rx_work_todo(struct xenvif *vif);
92
198static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 93static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
199 u16 id, 94 u16 id,
200 s8 st, 95 s8 st,
@@ -202,55 +97,16 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
202 u16 size, 97 u16 size,
203 u16 flags); 98 u16 flags);
204 99
205static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, 100static inline unsigned long idx_to_pfn(struct xenvif *vif,
206 u16 idx) 101 u16 idx)
207{ 102{
208 return page_to_pfn(netbk->mmap_pages[idx]); 103 return page_to_pfn(vif->mmap_pages[idx]);
209} 104}
210 105
211static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, 106static inline unsigned long idx_to_kaddr(struct xenvif *vif,
212 u16 idx) 107 u16 idx)
213{ 108{
214 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); 109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
215}
216
217/* extra field used in struct page */
218static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
219 unsigned int idx)
220{
221 unsigned int group = netbk - xen_netbk;
222 union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
223
224 BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
225 pg->mapping = ext.mapping;
226}
227
228static int get_page_ext(struct page *pg,
229 unsigned int *pgroup, unsigned int *pidx)
230{
231 union page_ext ext = { .mapping = pg->mapping };
232 struct xen_netbk *netbk;
233 unsigned int group, idx;
234
235 group = ext.e.group - 1;
236
237 if (group < 0 || group >= xen_netbk_group_nr)
238 return 0;
239
240 netbk = &xen_netbk[group];
241
242 idx = ext.e.idx;
243
244 if ((idx < 0) || (idx >= MAX_PENDING_REQS))
245 return 0;
246
247 if (netbk->mmap_pages[idx] != pg)
248 return 0;
249
250 *pgroup = group;
251 *pidx = idx;
252
253 return 1;
254} 110}
255 111
256/* 112/*
@@ -278,15 +134,10 @@ static inline pending_ring_idx_t pending_index(unsigned i)
278 return i & (MAX_PENDING_REQS-1); 134 return i & (MAX_PENDING_REQS-1);
279} 135}
280 136
281static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) 137static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
282{ 138{
283 return MAX_PENDING_REQS - 139 return MAX_PENDING_REQS -
284 netbk->pending_prod + netbk->pending_cons; 140 vif->pending_prod + vif->pending_cons;
285}
286
287static void xen_netbk_kick_thread(struct xen_netbk *netbk)
288{
289 wake_up(&netbk->wq);
290} 141}
291 142
292static int max_required_rx_slots(struct xenvif *vif) 143static int max_required_rx_slots(struct xenvif *vif)
@@ -300,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif)
300 return max; 151 return max;
301} 152}
302 153
303int xen_netbk_rx_ring_full(struct xenvif *vif) 154int xenvif_rx_ring_full(struct xenvif *vif)
304{ 155{
305 RING_IDX peek = vif->rx_req_cons_peek; 156 RING_IDX peek = vif->rx_req_cons_peek;
306 RING_IDX needed = max_required_rx_slots(vif); 157 RING_IDX needed = max_required_rx_slots(vif);
@@ -309,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif)
309 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); 160 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
310} 161}
311 162
312int xen_netbk_must_stop_queue(struct xenvif *vif) 163int xenvif_must_stop_queue(struct xenvif *vif)
313{ 164{
314 if (!xen_netbk_rx_ring_full(vif)) 165 if (!xenvif_rx_ring_full(vif))
315 return 0; 166 return 0;
316 167
317 vif->rx.sring->req_event = vif->rx_req_cons_peek + 168 vif->rx.sring->req_event = vif->rx_req_cons_peek +
318 max_required_rx_slots(vif); 169 max_required_rx_slots(vif);
319 mb(); /* request notification /then/ check the queue */ 170 mb(); /* request notification /then/ check the queue */
320 171
321 return xen_netbk_rx_ring_full(vif); 172 return xenvif_rx_ring_full(vif);
322} 173}
323 174
324/* 175/*
@@ -364,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
364/* 215/*
365 * Figure out how many ring slots we're going to need to send @skb to 216 * Figure out how many ring slots we're going to need to send @skb to
366 * the guest. This function is essentially a dry run of 217 * the guest. This function is essentially a dry run of
367 * netbk_gop_frag_copy. 218 * xenvif_gop_frag_copy.
368 */ 219 */
369unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) 220unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
370{ 221{
371 unsigned int count; 222 unsigned int count;
372 int i, copy_off; 223 int i, copy_off;
@@ -418,15 +269,15 @@ struct netrx_pending_operations {
418 unsigned copy_prod, copy_cons; 269 unsigned copy_prod, copy_cons;
419 unsigned meta_prod, meta_cons; 270 unsigned meta_prod, meta_cons;
420 struct gnttab_copy *copy; 271 struct gnttab_copy *copy;
421 struct netbk_rx_meta *meta; 272 struct xenvif_rx_meta *meta;
422 int copy_off; 273 int copy_off;
423 grant_ref_t copy_gref; 274 grant_ref_t copy_gref;
424}; 275};
425 276
426static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, 277static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
427 struct netrx_pending_operations *npo) 278 struct netrx_pending_operations *npo)
428{ 279{
429 struct netbk_rx_meta *meta; 280 struct xenvif_rx_meta *meta;
430 struct xen_netif_rx_request *req; 281 struct xen_netif_rx_request *req;
431 282
432 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 283 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
@@ -446,19 +297,13 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
446 * Set up the grant operations for this fragment. If it's a flipping 297 * Set up the grant operations for this fragment. If it's a flipping
447 * interface, we also set up the unmap request from here. 298 * interface, we also set up the unmap request from here.
448 */ 299 */
449static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 300static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
450 struct netrx_pending_operations *npo, 301 struct netrx_pending_operations *npo,
451 struct page *page, unsigned long size, 302 struct page *page, unsigned long size,
452 unsigned long offset, int *head) 303 unsigned long offset, int *head)
453{ 304{
454 struct gnttab_copy *copy_gop; 305 struct gnttab_copy *copy_gop;
455 struct netbk_rx_meta *meta; 306 struct xenvif_rx_meta *meta;
456 /*
457 * These variables are used iff get_page_ext returns true,
458 * in which case they are guaranteed to be initialized.
459 */
460 unsigned int uninitialized_var(group), uninitialized_var(idx);
461 int foreign = get_page_ext(page, &group, &idx);
462 unsigned long bytes; 307 unsigned long bytes;
463 308
464 /* Data must not cross a page boundary. */ 309 /* Data must not cross a page boundary. */
@@ -494,26 +339,15 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
494 339
495 copy_gop = npo->copy + npo->copy_prod++; 340 copy_gop = npo->copy + npo->copy_prod++;
496 copy_gop->flags = GNTCOPY_dest_gref; 341 copy_gop->flags = GNTCOPY_dest_gref;
497 if (foreign) { 342 copy_gop->len = bytes;
498 struct xen_netbk *netbk = &xen_netbk[group];
499 struct pending_tx_info *src_pend;
500
501 src_pend = &netbk->pending_tx_info[idx];
502 343
503 copy_gop->source.domid = src_pend->vif->domid; 344 copy_gop->source.domid = DOMID_SELF;
504 copy_gop->source.u.ref = src_pend->req.gref; 345 copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
505 copy_gop->flags |= GNTCOPY_source_gref;
506 } else {
507 void *vaddr = page_address(page);
508 copy_gop->source.domid = DOMID_SELF;
509 copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
510 }
511 copy_gop->source.offset = offset; 346 copy_gop->source.offset = offset;
512 copy_gop->dest.domid = vif->domid;
513 347
348 copy_gop->dest.domid = vif->domid;
514 copy_gop->dest.offset = npo->copy_off; 349 copy_gop->dest.offset = npo->copy_off;
515 copy_gop->dest.u.ref = npo->copy_gref; 350 copy_gop->dest.u.ref = npo->copy_gref;
516 copy_gop->len = bytes;
517 351
518 npo->copy_off += bytes; 352 npo->copy_off += bytes;
519 meta->size += bytes; 353 meta->size += bytes;
@@ -549,14 +383,14 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
549 * zero GSO descriptors (for non-GSO packets) or one descriptor (for 383 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
550 * frontend-side LRO). 384 * frontend-side LRO).
551 */ 385 */
552static int netbk_gop_skb(struct sk_buff *skb, 386static int xenvif_gop_skb(struct sk_buff *skb,
553 struct netrx_pending_operations *npo) 387 struct netrx_pending_operations *npo)
554{ 388{
555 struct xenvif *vif = netdev_priv(skb->dev); 389 struct xenvif *vif = netdev_priv(skb->dev);
556 int nr_frags = skb_shinfo(skb)->nr_frags; 390 int nr_frags = skb_shinfo(skb)->nr_frags;
557 int i; 391 int i;
558 struct xen_netif_rx_request *req; 392 struct xen_netif_rx_request *req;
559 struct netbk_rx_meta *meta; 393 struct xenvif_rx_meta *meta;
560 unsigned char *data; 394 unsigned char *data;
561 int head = 1; 395 int head = 1;
562 int old_meta_prod; 396 int old_meta_prod;
@@ -593,30 +427,30 @@ static int netbk_gop_skb(struct sk_buff *skb,
593 if (data + len > skb_tail_pointer(skb)) 427 if (data + len > skb_tail_pointer(skb))
594 len = skb_tail_pointer(skb) - data; 428 len = skb_tail_pointer(skb) - data;
595 429
596 netbk_gop_frag_copy(vif, skb, npo, 430 xenvif_gop_frag_copy(vif, skb, npo,
597 virt_to_page(data), len, offset, &head); 431 virt_to_page(data), len, offset, &head);
598 data += len; 432 data += len;
599 } 433 }
600 434
601 for (i = 0; i < nr_frags; i++) { 435 for (i = 0; i < nr_frags; i++) {
602 netbk_gop_frag_copy(vif, skb, npo, 436 xenvif_gop_frag_copy(vif, skb, npo,
603 skb_frag_page(&skb_shinfo(skb)->frags[i]), 437 skb_frag_page(&skb_shinfo(skb)->frags[i]),
604 skb_frag_size(&skb_shinfo(skb)->frags[i]), 438 skb_frag_size(&skb_shinfo(skb)->frags[i]),
605 skb_shinfo(skb)->frags[i].page_offset, 439 skb_shinfo(skb)->frags[i].page_offset,
606 &head); 440 &head);
607 } 441 }
608 442
609 return npo->meta_prod - old_meta_prod; 443 return npo->meta_prod - old_meta_prod;
610} 444}
611 445
612/* 446/*
613 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was 447 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
614 * used to set up the operations on the top of 448 * used to set up the operations on the top of
615 * netrx_pending_operations, which have since been done. Check that 449 * netrx_pending_operations, which have since been done. Check that
616 * they didn't give any errors and advance over them. 450 * they didn't give any errors and advance over them.
617 */ 451 */
618static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, 452static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
619 struct netrx_pending_operations *npo) 453 struct netrx_pending_operations *npo)
620{ 454{
621 struct gnttab_copy *copy_op; 455 struct gnttab_copy *copy_op;
622 int status = XEN_NETIF_RSP_OKAY; 456 int status = XEN_NETIF_RSP_OKAY;
@@ -635,9 +469,9 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
635 return status; 469 return status;
636} 470}
637 471
638static void netbk_add_frag_responses(struct xenvif *vif, int status, 472static void xenvif_add_frag_responses(struct xenvif *vif, int status,
639 struct netbk_rx_meta *meta, 473 struct xenvif_rx_meta *meta,
640 int nr_meta_slots) 474 int nr_meta_slots)
641{ 475{
642 int i; 476 int i;
643 unsigned long offset; 477 unsigned long offset;
@@ -665,9 +499,13 @@ struct skb_cb_overlay {
665 int meta_slots_used; 499 int meta_slots_used;
666}; 500};
667 501
668static void xen_netbk_rx_action(struct xen_netbk *netbk) 502static void xenvif_kick_thread(struct xenvif *vif)
503{
504 wake_up(&vif->wq);
505}
506
507void xenvif_rx_action(struct xenvif *vif)
669{ 508{
670 struct xenvif *vif = NULL, *tmp;
671 s8 status; 509 s8 status;
672 u16 flags; 510 u16 flags;
673 struct xen_netif_rx_response *resp; 511 struct xen_netif_rx_response *resp;
@@ -679,22 +517,23 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
679 int count; 517 int count;
680 unsigned long offset; 518 unsigned long offset;
681 struct skb_cb_overlay *sco; 519 struct skb_cb_overlay *sco;
520 int need_to_notify = 0;
682 521
683 struct netrx_pending_operations npo = { 522 struct netrx_pending_operations npo = {
684 .copy = netbk->grant_copy_op, 523 .copy = vif->grant_copy_op,
685 .meta = netbk->meta, 524 .meta = vif->meta,
686 }; 525 };
687 526
688 skb_queue_head_init(&rxq); 527 skb_queue_head_init(&rxq);
689 528
690 count = 0; 529 count = 0;
691 530
692 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { 531 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
693 vif = netdev_priv(skb->dev); 532 vif = netdev_priv(skb->dev);
694 nr_frags = skb_shinfo(skb)->nr_frags; 533 nr_frags = skb_shinfo(skb)->nr_frags;
695 534
696 sco = (struct skb_cb_overlay *)skb->cb; 535 sco = (struct skb_cb_overlay *)skb->cb;
697 sco->meta_slots_used = netbk_gop_skb(skb, &npo); 536 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
698 537
699 count += nr_frags + 1; 538 count += nr_frags + 1;
700 539
@@ -706,27 +545,27 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
706 break; 545 break;
707 } 546 }
708 547
709 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); 548 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
710 549
711 if (!npo.copy_prod) 550 if (!npo.copy_prod)
712 return; 551 return;
713 552
714 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); 553 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
715 gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); 554 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
716 555
717 while ((skb = __skb_dequeue(&rxq)) != NULL) { 556 while ((skb = __skb_dequeue(&rxq)) != NULL) {
718 sco = (struct skb_cb_overlay *)skb->cb; 557 sco = (struct skb_cb_overlay *)skb->cb;
719 558
720 vif = netdev_priv(skb->dev); 559 vif = netdev_priv(skb->dev);
721 560
722 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { 561 if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
723 resp = RING_GET_RESPONSE(&vif->rx, 562 resp = RING_GET_RESPONSE(&vif->rx,
724 vif->rx.rsp_prod_pvt++); 563 vif->rx.rsp_prod_pvt++);
725 564
726 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; 565 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
727 566
728 resp->offset = netbk->meta[npo.meta_cons].gso_size; 567 resp->offset = vif->meta[npo.meta_cons].gso_size;
729 resp->id = netbk->meta[npo.meta_cons].id; 568 resp->id = vif->meta[npo.meta_cons].id;
730 resp->status = sco->meta_slots_used; 569 resp->status = sco->meta_slots_used;
731 570
732 npo.meta_cons++; 571 npo.meta_cons++;
@@ -737,7 +576,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
737 vif->dev->stats.tx_bytes += skb->len; 576 vif->dev->stats.tx_bytes += skb->len;
738 vif->dev->stats.tx_packets++; 577 vif->dev->stats.tx_packets++;
739 578
740 status = netbk_check_gop(vif, sco->meta_slots_used, &npo); 579 status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
741 580
742 if (sco->meta_slots_used == 1) 581 if (sco->meta_slots_used == 1)
743 flags = 0; 582 flags = 0;
@@ -751,12 +590,12 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
751 flags |= XEN_NETRXF_data_validated; 590 flags |= XEN_NETRXF_data_validated;
752 591
753 offset = 0; 592 offset = 0;
754 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, 593 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
755 status, offset, 594 status, offset,
756 netbk->meta[npo.meta_cons].size, 595 vif->meta[npo.meta_cons].size,
757 flags); 596 flags);
758 597
759 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { 598 if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
760 struct xen_netif_extra_info *gso = 599 struct xen_netif_extra_info *gso =
761 (struct xen_netif_extra_info *) 600 (struct xen_netif_extra_info *)
762 RING_GET_RESPONSE(&vif->rx, 601 RING_GET_RESPONSE(&vif->rx,
@@ -764,7 +603,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
764 603
765 resp->flags |= XEN_NETRXF_extra_info; 604 resp->flags |= XEN_NETRXF_extra_info;
766 605
767 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; 606 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
768 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 607 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
769 gso->u.gso.pad = 0; 608 gso->u.gso.pad = 0;
770 gso->u.gso.features = 0; 609 gso->u.gso.features = 0;
@@ -773,123 +612,44 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
773 gso->flags = 0; 612 gso->flags = 0;
774 } 613 }
775 614
776 netbk_add_frag_responses(vif, status, 615 xenvif_add_frag_responses(vif, status,
777 netbk->meta + npo.meta_cons + 1, 616 vif->meta + npo.meta_cons + 1,
778 sco->meta_slots_used); 617 sco->meta_slots_used);
779 618
780 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 619 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
781 620
621 if (ret)
622 need_to_notify = 1;
623
782 xenvif_notify_tx_completion(vif); 624 xenvif_notify_tx_completion(vif);
783 625
784 if (ret && list_empty(&vif->notify_list))
785 list_add_tail(&vif->notify_list, &notify);
786 else
787 xenvif_put(vif);
788 npo.meta_cons += sco->meta_slots_used; 626 npo.meta_cons += sco->meta_slots_used;
789 dev_kfree_skb(skb); 627 dev_kfree_skb(skb);
790 } 628 }
791 629
792 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 630 if (need_to_notify)
793 notify_remote_via_irq(vif->rx_irq); 631 notify_remote_via_irq(vif->rx_irq);
794 list_del_init(&vif->notify_list);
795 xenvif_put(vif);
796 }
797 632
798 /* More work to do? */ 633 /* More work to do? */
799 if (!skb_queue_empty(&netbk->rx_queue) && 634 if (!skb_queue_empty(&vif->rx_queue))
800 !timer_pending(&netbk->net_timer)) 635 xenvif_kick_thread(vif);
801 xen_netbk_kick_thread(netbk);
802}
803
804void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
805{
806 struct xen_netbk *netbk = vif->netbk;
807
808 skb_queue_tail(&netbk->rx_queue, skb);
809
810 xen_netbk_kick_thread(netbk);
811}
812
813static void xen_netbk_alarm(unsigned long data)
814{
815 struct xen_netbk *netbk = (struct xen_netbk *)data;
816 xen_netbk_kick_thread(netbk);
817}
818
819static int __on_net_schedule_list(struct xenvif *vif)
820{
821 return !list_empty(&vif->schedule_list);
822}
823
824/* Must be called with net_schedule_list_lock held */
825static void remove_from_net_schedule_list(struct xenvif *vif)
826{
827 if (likely(__on_net_schedule_list(vif))) {
828 list_del_init(&vif->schedule_list);
829 xenvif_put(vif);
830 }
831}
832
833static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
834{
835 struct xenvif *vif = NULL;
836
837 spin_lock_irq(&netbk->net_schedule_list_lock);
838 if (list_empty(&netbk->net_schedule_list))
839 goto out;
840
841 vif = list_first_entry(&netbk->net_schedule_list,
842 struct xenvif, schedule_list);
843 if (!vif)
844 goto out;
845
846 xenvif_get(vif);
847
848 remove_from_net_schedule_list(vif);
849out:
850 spin_unlock_irq(&netbk->net_schedule_list_lock);
851 return vif;
852} 636}
853 637
854void xen_netbk_schedule_xenvif(struct xenvif *vif) 638void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
855{ 639{
856 unsigned long flags; 640 skb_queue_tail(&vif->rx_queue, skb);
857 struct xen_netbk *netbk = vif->netbk;
858
859 if (__on_net_schedule_list(vif))
860 goto kick;
861
862 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
863 if (!__on_net_schedule_list(vif) &&
864 likely(xenvif_schedulable(vif))) {
865 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
866 xenvif_get(vif);
867 }
868 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
869
870kick:
871 smp_mb();
872 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
873 !list_empty(&netbk->net_schedule_list))
874 xen_netbk_kick_thread(netbk);
875}
876 641
877void xen_netbk_deschedule_xenvif(struct xenvif *vif) 642 xenvif_kick_thread(vif);
878{
879 struct xen_netbk *netbk = vif->netbk;
880 spin_lock_irq(&netbk->net_schedule_list_lock);
881 remove_from_net_schedule_list(vif);
882 spin_unlock_irq(&netbk->net_schedule_list_lock);
883} 643}
884 644
885void xen_netbk_check_rx_xenvif(struct xenvif *vif) 645void xenvif_check_rx_xenvif(struct xenvif *vif)
886{ 646{
887 int more_to_do; 647 int more_to_do;
888 648
889 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); 649 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
890 650
891 if (more_to_do) 651 if (more_to_do)
892 xen_netbk_schedule_xenvif(vif); 652 napi_schedule(&vif->napi);
893} 653}
894 654
895static void tx_add_credit(struct xenvif *vif) 655static void tx_add_credit(struct xenvif *vif)
@@ -916,11 +676,11 @@ static void tx_credit_callback(unsigned long data)
916{ 676{
917 struct xenvif *vif = (struct xenvif *)data; 677 struct xenvif *vif = (struct xenvif *)data;
918 tx_add_credit(vif); 678 tx_add_credit(vif);
919 xen_netbk_check_rx_xenvif(vif); 679 xenvif_check_rx_xenvif(vif);
920} 680}
921 681
922static void netbk_tx_err(struct xenvif *vif, 682static void xenvif_tx_err(struct xenvif *vif,
923 struct xen_netif_tx_request *txp, RING_IDX end) 683 struct xen_netif_tx_request *txp, RING_IDX end)
924{ 684{
925 RING_IDX cons = vif->tx.req_cons; 685 RING_IDX cons = vif->tx.req_cons;
926 686
@@ -931,21 +691,18 @@ static void netbk_tx_err(struct xenvif *vif,
931 txp = RING_GET_REQUEST(&vif->tx, cons++); 691 txp = RING_GET_REQUEST(&vif->tx, cons++);
932 } while (1); 692 } while (1);
933 vif->tx.req_cons = cons; 693 vif->tx.req_cons = cons;
934 xen_netbk_check_rx_xenvif(vif);
935 xenvif_put(vif);
936} 694}
937 695
938static void netbk_fatal_tx_err(struct xenvif *vif) 696static void xenvif_fatal_tx_err(struct xenvif *vif)
939{ 697{
940 netdev_err(vif->dev, "fatal error; disabling device\n"); 698 netdev_err(vif->dev, "fatal error; disabling device\n");
941 xenvif_carrier_off(vif); 699 xenvif_carrier_off(vif);
942 xenvif_put(vif);
943} 700}
944 701
945static int netbk_count_requests(struct xenvif *vif, 702static int xenvif_count_requests(struct xenvif *vif,
946 struct xen_netif_tx_request *first, 703 struct xen_netif_tx_request *first,
947 struct xen_netif_tx_request *txp, 704 struct xen_netif_tx_request *txp,
948 int work_to_do) 705 int work_to_do)
949{ 706{
950 RING_IDX cons = vif->tx.req_cons; 707 RING_IDX cons = vif->tx.req_cons;
951 int slots = 0; 708 int slots = 0;
@@ -962,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif,
962 netdev_err(vif->dev, 719 netdev_err(vif->dev,
963 "Asked for %d slots but exceeds this limit\n", 720 "Asked for %d slots but exceeds this limit\n",
964 work_to_do); 721 work_to_do);
965 netbk_fatal_tx_err(vif); 722 xenvif_fatal_tx_err(vif);
966 return -ENODATA; 723 return -ENODATA;
967 } 724 }
968 725
@@ -973,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif,
973 netdev_err(vif->dev, 730 netdev_err(vif->dev,
974 "Malicious frontend using %d slots, threshold %u\n", 731 "Malicious frontend using %d slots, threshold %u\n",
975 slots, fatal_skb_slots); 732 slots, fatal_skb_slots);
976 netbk_fatal_tx_err(vif); 733 xenvif_fatal_tx_err(vif);
977 return -E2BIG; 734 return -E2BIG;
978 } 735 }
979 736
@@ -1021,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif,
1021 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 778 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
1022 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", 779 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
1023 txp->offset, txp->size); 780 txp->offset, txp->size);
1024 netbk_fatal_tx_err(vif); 781 xenvif_fatal_tx_err(vif);
1025 return -EINVAL; 782 return -EINVAL;
1026 } 783 }
1027 784
@@ -1033,30 +790,30 @@ static int netbk_count_requests(struct xenvif *vif,
1033 } while (more_data); 790 } while (more_data);
1034 791
1035 if (drop_err) { 792 if (drop_err) {
1036 netbk_tx_err(vif, first, cons + slots); 793 xenvif_tx_err(vif, first, cons + slots);
1037 return drop_err; 794 return drop_err;
1038 } 795 }
1039 796
1040 return slots; 797 return slots;
1041} 798}
1042 799
1043static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, 800static struct page *xenvif_alloc_page(struct xenvif *vif,
1044 u16 pending_idx) 801 u16 pending_idx)
1045{ 802{
1046 struct page *page; 803 struct page *page;
1047 page = alloc_page(GFP_KERNEL|__GFP_COLD); 804
805 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1048 if (!page) 806 if (!page)
1049 return NULL; 807 return NULL;
1050 set_page_ext(page, netbk, pending_idx); 808 vif->mmap_pages[pending_idx] = page;
1051 netbk->mmap_pages[pending_idx] = page; 809
1052 return page; 810 return page;
1053} 811}
1054 812
1055static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, 813static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
1056 struct xenvif *vif, 814 struct sk_buff *skb,
1057 struct sk_buff *skb, 815 struct xen_netif_tx_request *txp,
1058 struct xen_netif_tx_request *txp, 816 struct gnttab_copy *gop)
1059 struct gnttab_copy *gop)
1060{ 817{
1061 struct skb_shared_info *shinfo = skb_shinfo(skb); 818 struct skb_shared_info *shinfo = skb_shinfo(skb);
1062 skb_frag_t *frags = shinfo->frags; 819 skb_frag_t *frags = shinfo->frags;
@@ -1079,14 +836,14 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1079 836
1080 /* Coalesce tx requests, at this point the packet passed in 837 /* Coalesce tx requests, at this point the packet passed in
1081 * should be <= 64K. Any packets larger than 64K have been 838 * should be <= 64K. Any packets larger than 64K have been
1082 * handled in netbk_count_requests(). 839 * handled in xenvif_count_requests().
1083 */ 840 */
1084 for (shinfo->nr_frags = slot = start; slot < nr_slots; 841 for (shinfo->nr_frags = slot = start; slot < nr_slots;
1085 shinfo->nr_frags++) { 842 shinfo->nr_frags++) {
1086 struct pending_tx_info *pending_tx_info = 843 struct pending_tx_info *pending_tx_info =
1087 netbk->pending_tx_info; 844 vif->pending_tx_info;
1088 845
1089 page = alloc_page(GFP_KERNEL|__GFP_COLD); 846 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1090 if (!page) 847 if (!page)
1091 goto err; 848 goto err;
1092 849
@@ -1121,21 +878,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1121 gop->len = txp->size; 878 gop->len = txp->size;
1122 dst_offset += gop->len; 879 dst_offset += gop->len;
1123 880
1124 index = pending_index(netbk->pending_cons++); 881 index = pending_index(vif->pending_cons++);
1125 882
1126 pending_idx = netbk->pending_ring[index]; 883 pending_idx = vif->pending_ring[index];
1127 884
1128 memcpy(&pending_tx_info[pending_idx].req, txp, 885 memcpy(&pending_tx_info[pending_idx].req, txp,
1129 sizeof(*txp)); 886 sizeof(*txp));
1130 xenvif_get(vif);
1131
1132 pending_tx_info[pending_idx].vif = vif;
1133 887
1134 /* Poison these fields, corresponding 888 /* Poison these fields, corresponding
1135 * fields for head tx req will be set 889 * fields for head tx req will be set
1136 * to correct values after the loop. 890 * to correct values after the loop.
1137 */ 891 */
1138 netbk->mmap_pages[pending_idx] = (void *)(~0UL); 892 vif->mmap_pages[pending_idx] = (void *)(~0UL);
1139 pending_tx_info[pending_idx].head = 893 pending_tx_info[pending_idx].head =
1140 INVALID_PENDING_RING_IDX; 894 INVALID_PENDING_RING_IDX;
1141 895
@@ -1155,8 +909,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1155 first->req.offset = 0; 909 first->req.offset = 0;
1156 first->req.size = dst_offset; 910 first->req.size = dst_offset;
1157 first->head = start_idx; 911 first->head = start_idx;
1158 set_page_ext(page, netbk, head_idx); 912 vif->mmap_pages[head_idx] = page;
1159 netbk->mmap_pages[head_idx] = page;
1160 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); 913 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
1161 } 914 }
1162 915
@@ -1166,20 +919,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1166err: 919err:
1167 /* Unwind, freeing all pages and sending error responses. */ 920 /* Unwind, freeing all pages and sending error responses. */
1168 while (shinfo->nr_frags-- > start) { 921 while (shinfo->nr_frags-- > start) {
1169 xen_netbk_idx_release(netbk, 922 xenvif_idx_release(vif,
1170 frag_get_pending_idx(&frags[shinfo->nr_frags]), 923 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1171 XEN_NETIF_RSP_ERROR); 924 XEN_NETIF_RSP_ERROR);
1172 } 925 }
1173 /* The head too, if necessary. */ 926 /* The head too, if necessary. */
1174 if (start) 927 if (start)
1175 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 928 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
1176 929
1177 return NULL; 930 return NULL;
1178} 931}
1179 932
1180static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, 933static int xenvif_tx_check_gop(struct xenvif *vif,
1181 struct sk_buff *skb, 934 struct sk_buff *skb,
1182 struct gnttab_copy **gopp) 935 struct gnttab_copy **gopp)
1183{ 936{
1184 struct gnttab_copy *gop = *gopp; 937 struct gnttab_copy *gop = *gopp;
1185 u16 pending_idx = *((u16 *)skb->data); 938 u16 pending_idx = *((u16 *)skb->data);
@@ -1192,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1192 /* Check status of header. */ 945 /* Check status of header. */
1193 err = gop->status; 946 err = gop->status;
1194 if (unlikely(err)) 947 if (unlikely(err))
1195 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 948 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
1196 949
1197 /* Skip first skb fragment if it is on same page as header fragment. */ 950 /* Skip first skb fragment if it is on same page as header fragment. */
1198 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 951 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
@@ -1202,7 +955,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1202 pending_ring_idx_t head; 955 pending_ring_idx_t head;
1203 956
1204 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 957 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1205 tx_info = &netbk->pending_tx_info[pending_idx]; 958 tx_info = &vif->pending_tx_info[pending_idx];
1206 head = tx_info->head; 959 head = tx_info->head;
1207 960
1208 /* Check error status: if okay then remember grant handle. */ 961 /* Check error status: if okay then remember grant handle. */
@@ -1210,18 +963,19 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1210 newerr = (++gop)->status; 963 newerr = (++gop)->status;
1211 if (newerr) 964 if (newerr)
1212 break; 965 break;
1213 peek = netbk->pending_ring[pending_index(++head)]; 966 peek = vif->pending_ring[pending_index(++head)];
1214 } while (!pending_tx_is_head(netbk, peek)); 967 } while (!pending_tx_is_head(vif, peek));
1215 968
1216 if (likely(!newerr)) { 969 if (likely(!newerr)) {
1217 /* Had a previous error? Invalidate this fragment. */ 970 /* Had a previous error? Invalidate this fragment. */
1218 if (unlikely(err)) 971 if (unlikely(err))
1219 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 972 xenvif_idx_release(vif, pending_idx,
973 XEN_NETIF_RSP_OKAY);
1220 continue; 974 continue;
1221 } 975 }
1222 976
1223 /* Error on this fragment: respond to client with an error. */ 977 /* Error on this fragment: respond to client with an error. */
1224 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 978 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
1225 979
1226 /* Not the first error? Preceding frags already invalidated. */ 980 /* Not the first error? Preceding frags already invalidated. */
1227 if (err) 981 if (err)
@@ -1229,10 +983,11 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1229 983
1230 /* First error: invalidate header and preceding fragments. */ 984 /* First error: invalidate header and preceding fragments. */
1231 pending_idx = *((u16 *)skb->data); 985 pending_idx = *((u16 *)skb->data);
1232 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 986 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1233 for (j = start; j < i; j++) { 987 for (j = start; j < i; j++) {
1234 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 988 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1235 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 989 xenvif_idx_release(vif, pending_idx,
990 XEN_NETIF_RSP_OKAY);
1236 } 991 }
1237 992
1238 /* Remember the error: invalidate all subsequent fragments. */ 993 /* Remember the error: invalidate all subsequent fragments. */
@@ -1243,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1243 return err; 998 return err;
1244} 999}
1245 1000
1246static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) 1001static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1247{ 1002{
1248 struct skb_shared_info *shinfo = skb_shinfo(skb); 1003 struct skb_shared_info *shinfo = skb_shinfo(skb);
1249 int nr_frags = shinfo->nr_frags; 1004 int nr_frags = shinfo->nr_frags;
@@ -1257,20 +1012,20 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1257 1012
1258 pending_idx = frag_get_pending_idx(frag); 1013 pending_idx = frag_get_pending_idx(frag);
1259 1014
1260 txp = &netbk->pending_tx_info[pending_idx].req; 1015 txp = &vif->pending_tx_info[pending_idx].req;
1261 page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); 1016 page = virt_to_page(idx_to_kaddr(vif, pending_idx));
1262 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1017 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1263 skb->len += txp->size; 1018 skb->len += txp->size;
1264 skb->data_len += txp->size; 1019 skb->data_len += txp->size;
1265 skb->truesize += txp->size; 1020 skb->truesize += txp->size;
1266 1021
1267 /* Take an extra reference to offset xen_netbk_idx_release */ 1022 /* Take an extra reference to offset xenvif_idx_release */
1268 get_page(netbk->mmap_pages[pending_idx]); 1023 get_page(vif->mmap_pages[pending_idx]);
1269 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1024 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1270 } 1025 }
1271} 1026}
1272 1027
1273static int xen_netbk_get_extras(struct xenvif *vif, 1028static int xenvif_get_extras(struct xenvif *vif,
1274 struct xen_netif_extra_info *extras, 1029 struct xen_netif_extra_info *extras,
1275 int work_to_do) 1030 int work_to_do)
1276{ 1031{
@@ -1280,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1280 do { 1035 do {
1281 if (unlikely(work_to_do-- <= 0)) { 1036 if (unlikely(work_to_do-- <= 0)) {
1282 netdev_err(vif->dev, "Missing extra info\n"); 1037 netdev_err(vif->dev, "Missing extra info\n");
1283 netbk_fatal_tx_err(vif); 1038 xenvif_fatal_tx_err(vif);
1284 return -EBADR; 1039 return -EBADR;
1285 } 1040 }
1286 1041
@@ -1291,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1291 vif->tx.req_cons = ++cons; 1046 vif->tx.req_cons = ++cons;
1292 netdev_err(vif->dev, 1047 netdev_err(vif->dev,
1293 "Invalid extra type: %d\n", extra.type); 1048 "Invalid extra type: %d\n", extra.type);
1294 netbk_fatal_tx_err(vif); 1049 xenvif_fatal_tx_err(vif);
1295 return -EINVAL; 1050 return -EINVAL;
1296 } 1051 }
1297 1052
@@ -1302,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1302 return work_to_do; 1057 return work_to_do;
1303} 1058}
1304 1059
1305static int netbk_set_skb_gso(struct xenvif *vif, 1060static int xenvif_set_skb_gso(struct xenvif *vif,
1306 struct sk_buff *skb, 1061 struct sk_buff *skb,
1307 struct xen_netif_extra_info *gso) 1062 struct xen_netif_extra_info *gso)
1308{ 1063{
1309 if (!gso->u.gso.size) { 1064 if (!gso->u.gso.size) {
1310 netdev_err(vif->dev, "GSO size must not be zero.\n"); 1065 netdev_err(vif->dev, "GSO size must not be zero.\n");
1311 netbk_fatal_tx_err(vif); 1066 xenvif_fatal_tx_err(vif);
1312 return -EINVAL; 1067 return -EINVAL;
1313 } 1068 }
1314 1069
1315 /* Currently only TCPv4 S.O. is supported. */ 1070 /* Currently only TCPv4 S.O. is supported. */
1316 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1071 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1317 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1072 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1318 netbk_fatal_tx_err(vif); 1073 xenvif_fatal_tx_err(vif);
1319 return -EINVAL; 1074 return -EINVAL;
1320 } 1075 }
1321 1076
@@ -1426,16 +1181,14 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1426 return false; 1181 return false;
1427} 1182}
1428 1183
1429static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) 1184static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1430{ 1185{
1431 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; 1186 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1432 struct sk_buff *skb; 1187 struct sk_buff *skb;
1433 int ret; 1188 int ret;
1434 1189
1435 while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX 1190 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1436 < MAX_PENDING_REQS) && 1191 < MAX_PENDING_REQS)) {
1437 !list_empty(&netbk->net_schedule_list)) {
1438 struct xenvif *vif;
1439 struct xen_netif_tx_request txreq; 1192 struct xen_netif_tx_request txreq;
1440 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1193 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1441 struct page *page; 1194 struct page *page;
@@ -1446,16 +1199,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1446 unsigned int data_len; 1199 unsigned int data_len;
1447 pending_ring_idx_t index; 1200 pending_ring_idx_t index;
1448 1201
1449 /* Get a netif from the list with work to do. */
1450 vif = poll_net_schedule_list(netbk);
1451 /* This can sometimes happen because the test of
1452 * list_empty(net_schedule_list) at the top of the
1453 * loop is unlocked. Just go back and have another
1454 * look.
1455 */
1456 if (!vif)
1457 continue;
1458
1459 if (vif->tx.sring->req_prod - vif->tx.req_cons > 1202 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1460 XEN_NETIF_TX_RING_SIZE) { 1203 XEN_NETIF_TX_RING_SIZE) {
1461 netdev_err(vif->dev, 1204 netdev_err(vif->dev,
@@ -1463,15 +1206,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1463 "req_prod %d, req_cons %d, size %ld\n", 1206 "req_prod %d, req_cons %d, size %ld\n",
1464 vif->tx.sring->req_prod, vif->tx.req_cons, 1207 vif->tx.sring->req_prod, vif->tx.req_cons,
1465 XEN_NETIF_TX_RING_SIZE); 1208 XEN_NETIF_TX_RING_SIZE);
1466 netbk_fatal_tx_err(vif); 1209 xenvif_fatal_tx_err(vif);
1467 continue; 1210 continue;
1468 } 1211 }
1469 1212
1470 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1213 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1471 if (!work_to_do) { 1214 if (!work_to_do)
1472 xenvif_put(vif); 1215 break;
1473 continue;
1474 }
1475 1216
1476 idx = vif->tx.req_cons; 1217 idx = vif->tx.req_cons;
1477 rmb(); /* Ensure that we see the request before we copy it. */ 1218 rmb(); /* Ensure that we see the request before we copy it. */
@@ -1479,10 +1220,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1479 1220
1480 /* Credit-based scheduling. */ 1221 /* Credit-based scheduling. */
1481 if (txreq.size > vif->remaining_credit && 1222 if (txreq.size > vif->remaining_credit &&
1482 tx_credit_exceeded(vif, txreq.size)) { 1223 tx_credit_exceeded(vif, txreq.size))
1483 xenvif_put(vif); 1224 break;
1484 continue;
1485 }
1486 1225
1487 vif->remaining_credit -= txreq.size; 1226 vif->remaining_credit -= txreq.size;
1488 1227
@@ -1491,24 +1230,24 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1491 1230
1492 memset(extras, 0, sizeof(extras)); 1231 memset(extras, 0, sizeof(extras));
1493 if (txreq.flags & XEN_NETTXF_extra_info) { 1232 if (txreq.flags & XEN_NETTXF_extra_info) {
1494 work_to_do = xen_netbk_get_extras(vif, extras, 1233 work_to_do = xenvif_get_extras(vif, extras,
1495 work_to_do); 1234 work_to_do);
1496 idx = vif->tx.req_cons; 1235 idx = vif->tx.req_cons;
1497 if (unlikely(work_to_do < 0)) 1236 if (unlikely(work_to_do < 0))
1498 continue; 1237 break;
1499 } 1238 }
1500 1239
1501 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1240 ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
1502 if (unlikely(ret < 0)) 1241 if (unlikely(ret < 0))
1503 continue; 1242 break;
1504 1243
1505 idx += ret; 1244 idx += ret;
1506 1245
1507 if (unlikely(txreq.size < ETH_HLEN)) { 1246 if (unlikely(txreq.size < ETH_HLEN)) {
1508 netdev_dbg(vif->dev, 1247 netdev_dbg(vif->dev,
1509 "Bad packet size: %d\n", txreq.size); 1248 "Bad packet size: %d\n", txreq.size);
1510 netbk_tx_err(vif, &txreq, idx); 1249 xenvif_tx_err(vif, &txreq, idx);
1511 continue; 1250 break;
1512 } 1251 }
1513 1252
1514 /* No crossing a page as the payload mustn't fragment. */ 1253 /* No crossing a page as the payload mustn't fragment. */
@@ -1517,12 +1256,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1517 "txreq.offset: %x, size: %u, end: %lu\n", 1256 "txreq.offset: %x, size: %u, end: %lu\n",
1518 txreq.offset, txreq.size, 1257 txreq.offset, txreq.size,
1519 (txreq.offset&~PAGE_MASK) + txreq.size); 1258 (txreq.offset&~PAGE_MASK) + txreq.size);
1520 netbk_fatal_tx_err(vif); 1259 xenvif_fatal_tx_err(vif);
1521 continue; 1260 break;
1522 } 1261 }
1523 1262
1524 index = pending_index(netbk->pending_cons); 1263 index = pending_index(vif->pending_cons);
1525 pending_idx = netbk->pending_ring[index]; 1264 pending_idx = vif->pending_ring[index];
1526 1265
1527 data_len = (txreq.size > PKT_PROT_LEN && 1266 data_len = (txreq.size > PKT_PROT_LEN &&
1528 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1267 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1533,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1533 if (unlikely(skb == NULL)) { 1272 if (unlikely(skb == NULL)) {
1534 netdev_dbg(vif->dev, 1273 netdev_dbg(vif->dev,
1535 "Can't allocate a skb in start_xmit.\n"); 1274 "Can't allocate a skb in start_xmit.\n");
1536 netbk_tx_err(vif, &txreq, idx); 1275 xenvif_tx_err(vif, &txreq, idx);
1537 break; 1276 break;
1538 } 1277 }
1539 1278
@@ -1544,19 +1283,19 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1544 struct xen_netif_extra_info *gso; 1283 struct xen_netif_extra_info *gso;
1545 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1284 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1546 1285
1547 if (netbk_set_skb_gso(vif, skb, gso)) { 1286 if (xenvif_set_skb_gso(vif, skb, gso)) {
1548 /* Failure in netbk_set_skb_gso is fatal. */ 1287 /* Failure in xenvif_set_skb_gso is fatal. */
1549 kfree_skb(skb); 1288 kfree_skb(skb);
1550 continue; 1289 break;
1551 } 1290 }
1552 } 1291 }
1553 1292
1554 /* XXX could copy straight to head */ 1293 /* XXX could copy straight to head */
1555 page = xen_netbk_alloc_page(netbk, pending_idx); 1294 page = xenvif_alloc_page(vif, pending_idx);
1556 if (!page) { 1295 if (!page) {
1557 kfree_skb(skb); 1296 kfree_skb(skb);
1558 netbk_tx_err(vif, &txreq, idx); 1297 xenvif_tx_err(vif, &txreq, idx);
1559 continue; 1298 break;
1560 } 1299 }
1561 1300
1562 gop->source.u.ref = txreq.gref; 1301 gop->source.u.ref = txreq.gref;
@@ -1572,10 +1311,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1572 1311
1573 gop++; 1312 gop++;
1574 1313
1575 memcpy(&netbk->pending_tx_info[pending_idx].req, 1314 memcpy(&vif->pending_tx_info[pending_idx].req,
1576 &txreq, sizeof(txreq)); 1315 &txreq, sizeof(txreq));
1577 netbk->pending_tx_info[pending_idx].vif = vif; 1316 vif->pending_tx_info[pending_idx].head = index;
1578 netbk->pending_tx_info[pending_idx].head = index;
1579 *((u16 *)skb->data) = pending_idx; 1317 *((u16 *)skb->data) = pending_idx;
1580 1318
1581 __skb_put(skb, data_len); 1319 __skb_put(skb, data_len);
@@ -1590,46 +1328,45 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1590 INVALID_PENDING_IDX); 1328 INVALID_PENDING_IDX);
1591 } 1329 }
1592 1330
1593 netbk->pending_cons++; 1331 vif->pending_cons++;
1594 1332
1595 request_gop = xen_netbk_get_requests(netbk, vif, 1333 request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
1596 skb, txfrags, gop);
1597 if (request_gop == NULL) { 1334 if (request_gop == NULL) {
1598 kfree_skb(skb); 1335 kfree_skb(skb);
1599 netbk_tx_err(vif, &txreq, idx); 1336 xenvif_tx_err(vif, &txreq, idx);
1600 continue; 1337 break;
1601 } 1338 }
1602 gop = request_gop; 1339 gop = request_gop;
1603 1340
1604 __skb_queue_tail(&netbk->tx_queue, skb); 1341 __skb_queue_tail(&vif->tx_queue, skb);
1605 1342
1606 vif->tx.req_cons = idx; 1343 vif->tx.req_cons = idx;
1607 xen_netbk_check_rx_xenvif(vif);
1608 1344
1609 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) 1345 if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
1610 break; 1346 break;
1611 } 1347 }
1612 1348
1613 return gop - netbk->tx_copy_ops; 1349 return gop - vif->tx_copy_ops;
1614} 1350}
1615 1351
1616static void xen_netbk_tx_submit(struct xen_netbk *netbk) 1352
1353static int xenvif_tx_submit(struct xenvif *vif, int budget)
1617{ 1354{
1618 struct gnttab_copy *gop = netbk->tx_copy_ops; 1355 struct gnttab_copy *gop = vif->tx_copy_ops;
1619 struct sk_buff *skb; 1356 struct sk_buff *skb;
1357 int work_done = 0;
1620 1358
1621 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { 1359 while (work_done < budget &&
1360 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1622 struct xen_netif_tx_request *txp; 1361 struct xen_netif_tx_request *txp;
1623 struct xenvif *vif;
1624 u16 pending_idx; 1362 u16 pending_idx;
1625 unsigned data_len; 1363 unsigned data_len;
1626 1364
1627 pending_idx = *((u16 *)skb->data); 1365 pending_idx = *((u16 *)skb->data);
1628 vif = netbk->pending_tx_info[pending_idx].vif; 1366 txp = &vif->pending_tx_info[pending_idx].req;
1629 txp = &netbk->pending_tx_info[pending_idx].req;
1630 1367
1631 /* Check the remap error code. */ 1368 /* Check the remap error code. */
1632 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { 1369 if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
1633 netdev_dbg(vif->dev, "netback grant failed.\n"); 1370 netdev_dbg(vif->dev, "netback grant failed.\n");
1634 skb_shinfo(skb)->nr_frags = 0; 1371 skb_shinfo(skb)->nr_frags = 0;
1635 kfree_skb(skb); 1372 kfree_skb(skb);
@@ -1638,7 +1375,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1638 1375
1639 data_len = skb->len; 1376 data_len = skb->len;
1640 memcpy(skb->data, 1377 memcpy(skb->data,
1641 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), 1378 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1642 data_len); 1379 data_len);
1643 if (data_len < txp->size) { 1380 if (data_len < txp->size) {
1644 /* Append the packet payload as a fragment. */ 1381 /* Append the packet payload as a fragment. */
@@ -1646,7 +1383,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1646 txp->size -= data_len; 1383 txp->size -= data_len;
1647 } else { 1384 } else {
1648 /* Schedule a response immediately. */ 1385 /* Schedule a response immediately. */
1649 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1386 xenvif_idx_release(vif, pending_idx,
1387 XEN_NETIF_RSP_OKAY);
1650 } 1388 }
1651 1389
1652 if (txp->flags & XEN_NETTXF_csum_blank) 1390 if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1654,7 +1392,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1654 else if (txp->flags & XEN_NETTXF_data_validated) 1392 else if (txp->flags & XEN_NETTXF_data_validated)
1655 skb->ip_summed = CHECKSUM_UNNECESSARY; 1393 skb->ip_summed = CHECKSUM_UNNECESSARY;
1656 1394
1657 xen_netbk_fill_frags(netbk, skb); 1395 xenvif_fill_frags(vif, skb);
1658 1396
1659 /* 1397 /*
1660 * If the initial fragment was < PKT_PROT_LEN then 1398 * If the initial fragment was < PKT_PROT_LEN then
@@ -1682,53 +1420,61 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1682 vif->dev->stats.rx_bytes += skb->len; 1420 vif->dev->stats.rx_bytes += skb->len;
1683 vif->dev->stats.rx_packets++; 1421 vif->dev->stats.rx_packets++;
1684 1422
1685 xenvif_receive_skb(vif, skb); 1423 work_done++;
1424
1425 netif_receive_skb(skb);
1686 } 1426 }
1427
1428 return work_done;
1687} 1429}
1688 1430
1689/* Called after netfront has transmitted */ 1431/* Called after netfront has transmitted */
1690static void xen_netbk_tx_action(struct xen_netbk *netbk) 1432int xenvif_tx_action(struct xenvif *vif, int budget)
1691{ 1433{
1692 unsigned nr_gops; 1434 unsigned nr_gops;
1435 int work_done;
1436
1437 if (unlikely(!tx_work_todo(vif)))
1438 return 0;
1693 1439
1694 nr_gops = xen_netbk_tx_build_gops(netbk); 1440 nr_gops = xenvif_tx_build_gops(vif);
1695 1441
1696 if (nr_gops == 0) 1442 if (nr_gops == 0)
1697 return; 1443 return 0;
1698 1444
1699 gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); 1445 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1700 1446
1701 xen_netbk_tx_submit(netbk); 1447 work_done = xenvif_tx_submit(vif, nr_gops);
1448
1449 return work_done;
1702} 1450}
1703 1451
1704static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, 1452static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1705 u8 status) 1453 u8 status)
1706{ 1454{
1707 struct xenvif *vif;
1708 struct pending_tx_info *pending_tx_info; 1455 struct pending_tx_info *pending_tx_info;
1709 pending_ring_idx_t head; 1456 pending_ring_idx_t head;
1710 u16 peek; /* peek into next tx request */ 1457 u16 peek; /* peek into next tx request */
1711 1458
1712 BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); 1459 BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
1713 1460
1714 /* Already complete? */ 1461 /* Already complete? */
1715 if (netbk->mmap_pages[pending_idx] == NULL) 1462 if (vif->mmap_pages[pending_idx] == NULL)
1716 return; 1463 return;
1717 1464
1718 pending_tx_info = &netbk->pending_tx_info[pending_idx]; 1465 pending_tx_info = &vif->pending_tx_info[pending_idx];
1719 1466
1720 vif = pending_tx_info->vif;
1721 head = pending_tx_info->head; 1467 head = pending_tx_info->head;
1722 1468
1723 BUG_ON(!pending_tx_is_head(netbk, head)); 1469 BUG_ON(!pending_tx_is_head(vif, head));
1724 BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); 1470 BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
1725 1471
1726 do { 1472 do {
1727 pending_ring_idx_t index; 1473 pending_ring_idx_t index;
1728 pending_ring_idx_t idx = pending_index(head); 1474 pending_ring_idx_t idx = pending_index(head);
1729 u16 info_idx = netbk->pending_ring[idx]; 1475 u16 info_idx = vif->pending_ring[idx];
1730 1476
1731 pending_tx_info = &netbk->pending_tx_info[info_idx]; 1477 pending_tx_info = &vif->pending_tx_info[info_idx];
1732 make_tx_response(vif, &pending_tx_info->req, status); 1478 make_tx_response(vif, &pending_tx_info->req, status);
1733 1479
1734 /* Setting any number other than 1480 /* Setting any number other than
@@ -1737,18 +1483,15 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1737 */ 1483 */
1738 pending_tx_info->head = 0; 1484 pending_tx_info->head = 0;
1739 1485
1740 index = pending_index(netbk->pending_prod++); 1486 index = pending_index(vif->pending_prod++);
1741 netbk->pending_ring[index] = netbk->pending_ring[info_idx]; 1487 vif->pending_ring[index] = vif->pending_ring[info_idx];
1742
1743 xenvif_put(vif);
1744 1488
1745 peek = netbk->pending_ring[pending_index(++head)]; 1489 peek = vif->pending_ring[pending_index(++head)];
1746 1490
1747 } while (!pending_tx_is_head(netbk, peek)); 1491 } while (!pending_tx_is_head(vif, peek));
1748 1492
1749 netbk->mmap_pages[pending_idx]->mapping = 0; 1493 put_page(vif->mmap_pages[pending_idx]);
1750 put_page(netbk->mmap_pages[pending_idx]); 1494 vif->mmap_pages[pending_idx] = NULL;
1751 netbk->mmap_pages[pending_idx] = NULL;
1752} 1495}
1753 1496
1754 1497
@@ -1796,46 +1539,23 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1796 return resp; 1539 return resp;
1797} 1540}
1798 1541
1799static inline int rx_work_todo(struct xen_netbk *netbk) 1542static inline int rx_work_todo(struct xenvif *vif)
1800{ 1543{
1801 return !skb_queue_empty(&netbk->rx_queue); 1544 return !skb_queue_empty(&vif->rx_queue);
1802} 1545}
1803 1546
1804static inline int tx_work_todo(struct xen_netbk *netbk) 1547static inline int tx_work_todo(struct xenvif *vif)
1805{ 1548{
1806 1549
1807 if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX 1550 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
1808 < MAX_PENDING_REQS) && 1551 (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1809 !list_empty(&netbk->net_schedule_list)) 1552 < MAX_PENDING_REQS))
1810 return 1; 1553 return 1;
1811 1554
1812 return 0; 1555 return 0;
1813} 1556}
1814 1557
1815static int xen_netbk_kthread(void *data) 1558void xenvif_unmap_frontend_rings(struct xenvif *vif)
1816{
1817 struct xen_netbk *netbk = data;
1818 while (!kthread_should_stop()) {
1819 wait_event_interruptible(netbk->wq,
1820 rx_work_todo(netbk) ||
1821 tx_work_todo(netbk) ||
1822 kthread_should_stop());
1823 cond_resched();
1824
1825 if (kthread_should_stop())
1826 break;
1827
1828 if (rx_work_todo(netbk))
1829 xen_netbk_rx_action(netbk);
1830
1831 if (tx_work_todo(netbk))
1832 xen_netbk_tx_action(netbk);
1833 }
1834
1835 return 0;
1836}
1837
1838void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1839{ 1559{
1840 if (vif->tx.sring) 1560 if (vif->tx.sring)
1841 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1561 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
@@ -1845,9 +1565,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1845 vif->rx.sring); 1565 vif->rx.sring);
1846} 1566}
1847 1567
1848int xen_netbk_map_frontend_rings(struct xenvif *vif, 1568int xenvif_map_frontend_rings(struct xenvif *vif,
1849 grant_ref_t tx_ring_ref, 1569 grant_ref_t tx_ring_ref,
1850 grant_ref_t rx_ring_ref) 1570 grant_ref_t rx_ring_ref)
1851{ 1571{
1852 void *addr; 1572 void *addr;
1853 struct xen_netif_tx_sring *txs; 1573 struct xen_netif_tx_sring *txs;
@@ -1876,15 +1596,33 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
1876 return 0; 1596 return 0;
1877 1597
1878err: 1598err:
1879 xen_netbk_unmap_frontend_rings(vif); 1599 xenvif_unmap_frontend_rings(vif);
1880 return err; 1600 return err;
1881} 1601}
1882 1602
1603int xenvif_kthread(void *data)
1604{
1605 struct xenvif *vif = data;
1606
1607 while (!kthread_should_stop()) {
1608 wait_event_interruptible(vif->wq,
1609 rx_work_todo(vif) ||
1610 kthread_should_stop());
1611 if (kthread_should_stop())
1612 break;
1613
1614 if (rx_work_todo(vif))
1615 xenvif_rx_action(vif);
1616
1617 cond_resched();
1618 }
1619
1620 return 0;
1621}
1622
1883static int __init netback_init(void) 1623static int __init netback_init(void)
1884{ 1624{
1885 int i;
1886 int rc = 0; 1625 int rc = 0;
1887 int group;
1888 1626
1889 if (!xen_domain()) 1627 if (!xen_domain())
1890 return -ENODEV; 1628 return -ENODEV;
@@ -1895,48 +1633,6 @@ static int __init netback_init(void)
1895 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; 1633 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1896 } 1634 }
1897 1635
1898 xen_netbk_group_nr = num_online_cpus();
1899 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1900 if (!xen_netbk)
1901 return -ENOMEM;
1902
1903 for (group = 0; group < xen_netbk_group_nr; group++) {
1904 struct xen_netbk *netbk = &xen_netbk[group];
1905 skb_queue_head_init(&netbk->rx_queue);
1906 skb_queue_head_init(&netbk->tx_queue);
1907
1908 init_timer(&netbk->net_timer);
1909 netbk->net_timer.data = (unsigned long)netbk;
1910 netbk->net_timer.function = xen_netbk_alarm;
1911
1912 netbk->pending_cons = 0;
1913 netbk->pending_prod = MAX_PENDING_REQS;
1914 for (i = 0; i < MAX_PENDING_REQS; i++)
1915 netbk->pending_ring[i] = i;
1916
1917 init_waitqueue_head(&netbk->wq);
1918 netbk->task = kthread_create(xen_netbk_kthread,
1919 (void *)netbk,
1920 "netback/%u", group);
1921
1922 if (IS_ERR(netbk->task)) {
1923 pr_alert("kthread_create() fails at netback\n");
1924 del_timer(&netbk->net_timer);
1925 rc = PTR_ERR(netbk->task);
1926 goto failed_init;
1927 }
1928
1929 kthread_bind(netbk->task, group);
1930
1931 INIT_LIST_HEAD(&netbk->net_schedule_list);
1932
1933 spin_lock_init(&netbk->net_schedule_list_lock);
1934
1935 atomic_set(&netbk->netfront_count, 0);
1936
1937 wake_up_process(netbk->task);
1938 }
1939
1940 rc = xenvif_xenbus_init(); 1636 rc = xenvif_xenbus_init();
1941 if (rc) 1637 if (rc)
1942 goto failed_init; 1638 goto failed_init;
@@ -1944,35 +1640,14 @@ static int __init netback_init(void)
1944 return 0; 1640 return 0;
1945 1641
1946failed_init: 1642failed_init:
1947 while (--group >= 0) {
1948 struct xen_netbk *netbk = &xen_netbk[group];
1949 del_timer(&netbk->net_timer);
1950 kthread_stop(netbk->task);
1951 }
1952 vfree(xen_netbk);
1953 return rc; 1643 return rc;
1954
1955} 1644}
1956 1645
1957module_init(netback_init); 1646module_init(netback_init);
1958 1647
1959static void __exit netback_fini(void) 1648static void __exit netback_fini(void)
1960{ 1649{
1961 int i, j;
1962
1963 xenvif_xenbus_fini(); 1650 xenvif_xenbus_fini();
1964
1965 for (i = 0; i < xen_netbk_group_nr; i++) {
1966 struct xen_netbk *netbk = &xen_netbk[i];
1967 del_timer_sync(&netbk->net_timer);
1968 kthread_stop(netbk->task);
1969 for (j = 0; j < MAX_PENDING_REQS; j++) {
1970 if (netbk->mmap_pages[j])
1971 __free_page(netbk->mmap_pages[j]);
1972 }
1973 }
1974
1975 vfree(xen_netbk);
1976} 1651}
1977module_exit(netback_fini); 1652module_exit(netback_fini);
1978 1653
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index c5c30fb1d7bf..9a53f13c88df 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -60,7 +60,7 @@ struct nfcsim {
60static struct nfcsim *dev0; 60static struct nfcsim *dev0;
61static struct nfcsim *dev1; 61static struct nfcsim *dev1;
62 62
63struct workqueue_struct *wq; 63static struct workqueue_struct *wq;
64 64
65static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown) 65static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
66{ 66{
@@ -481,7 +481,7 @@ static void nfcsim_free_device(struct nfcsim *dev)
481 kfree(dev); 481 kfree(dev);
482} 482}
483 483
484int __init nfcsim_init(void) 484static int __init nfcsim_init(void)
485{ 485{
486 int rc; 486 int rc;
487 487
@@ -522,7 +522,7 @@ exit:
522 return rc; 522 return rc;
523} 523}
524 524
525void __exit nfcsim_exit(void) 525static void __exit nfcsim_exit(void)
526{ 526{
527 nfcsim_cleanup_dev(dev0, 1); 527 nfcsim_cleanup_dev(dev0, 1);
528 nfcsim_cleanup_dev(dev1, 1); 528 nfcsim_cleanup_dev(dev1, 1);
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index daf92ac209f8..5df730be88a3 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -83,12 +83,20 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
83 83
84/* How much time we spend listening for initiators */ 84/* How much time we spend listening for initiators */
85#define PN533_LISTEN_TIME 2 85#define PN533_LISTEN_TIME 2
86/* Delay between each poll frame (ms) */
87#define PN533_POLL_INTERVAL 10
86 88
87/* Standard pn533 frame definitions */ 89/* Standard pn533 frame definitions (standard and extended)*/
88#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \ 90#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
89 + 2) /* data[0] TFI, data[1] CC */ 91 + 2) /* data[0] TFI, data[1] CC */
90#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ 92#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
91 93
94#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
95 + 2) /* data[0] TFI, data[1] CC */
96
97#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
98#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */
99
92/* 100/*
93 * Max extended frame payload len, excluding TFI and CC 101 * Max extended frame payload len, excluding TFI and CC
94 * which are already in PN533_FRAME_HEADER_LEN. 102 * which are already in PN533_FRAME_HEADER_LEN.
@@ -99,6 +107,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
99 Postamble (1) */ 107 Postamble (1) */
100#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen]) 108#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
101#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1]) 109#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
110/* Half start code (3), LEN (4) should be 0xffff for extended frame */
111#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
112 && (hdr)->datalen_checksum == 0xFF)
113#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
102 114
103/* start of frame */ 115/* start of frame */
104#define PN533_STD_FRAME_SOF 0x00FF 116#define PN533_STD_FRAME_SOF 0x00FF
@@ -124,7 +136,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
124#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83 136#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
125 137
126/* PN533 Commands */ 138/* PN533 Commands */
127#define PN533_STD_FRAME_CMD(f) (f->data[1]) 139#define PN533_FRAME_CMD(f) (f->data[1])
128 140
129#define PN533_CMD_GET_FIRMWARE_VERSION 0x02 141#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
130#define PN533_CMD_RF_CONFIGURATION 0x32 142#define PN533_CMD_RF_CONFIGURATION 0x32
@@ -168,8 +180,9 @@ struct pn533_fw_version {
168#define PN533_CFGITEM_MAX_RETRIES 0x05 180#define PN533_CFGITEM_MAX_RETRIES 0x05
169#define PN533_CFGITEM_PASORI 0x82 181#define PN533_CFGITEM_PASORI 0x82
170 182
171#define PN533_CFGITEM_RF_FIELD_ON 0x1 183#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2
172#define PN533_CFGITEM_RF_FIELD_OFF 0x0 184#define PN533_CFGITEM_RF_FIELD_ON 0x1
185#define PN533_CFGITEM_RF_FIELD_OFF 0x0
173 186
174#define PN533_CONFIG_TIMING_102 0xb 187#define PN533_CONFIG_TIMING_102 0xb
175#define PN533_CONFIG_TIMING_204 0xc 188#define PN533_CONFIG_TIMING_204 0xc
@@ -257,7 +270,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
257 .initiator_data.felica = { 270 .initiator_data.felica = {
258 .opcode = PN533_FELICA_OPC_SENSF_REQ, 271 .opcode = PN533_FELICA_OPC_SENSF_REQ,
259 .sc = PN533_FELICA_SENSF_SC_ALL, 272 .sc = PN533_FELICA_SENSF_SC_ALL,
260 .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, 273 .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
261 .tsn = 0x03, 274 .tsn = 0x03,
262 }, 275 },
263 }, 276 },
@@ -270,7 +283,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
270 .initiator_data.felica = { 283 .initiator_data.felica = {
271 .opcode = PN533_FELICA_OPC_SENSF_REQ, 284 .opcode = PN533_FELICA_OPC_SENSF_REQ,
272 .sc = PN533_FELICA_SENSF_SC_ALL, 285 .sc = PN533_FELICA_SENSF_SC_ALL,
273 .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, 286 .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
274 .tsn = 0x03, 287 .tsn = 0x03,
275 }, 288 },
276 }, 289 },
@@ -352,13 +365,16 @@ struct pn533 {
352 struct urb *in_urb; 365 struct urb *in_urb;
353 366
354 struct sk_buff_head resp_q; 367 struct sk_buff_head resp_q;
368 struct sk_buff_head fragment_skb;
355 369
356 struct workqueue_struct *wq; 370 struct workqueue_struct *wq;
357 struct work_struct cmd_work; 371 struct work_struct cmd_work;
358 struct work_struct cmd_complete_work; 372 struct work_struct cmd_complete_work;
359 struct work_struct poll_work; 373 struct delayed_work poll_work;
360 struct work_struct mi_work; 374 struct work_struct mi_rx_work;
375 struct work_struct mi_tx_work;
361 struct work_struct tg_work; 376 struct work_struct tg_work;
377 struct work_struct rf_work;
362 378
363 struct list_head cmd_queue; 379 struct list_head cmd_queue;
364 struct pn533_cmd *cmd; 380 struct pn533_cmd *cmd;
@@ -366,6 +382,7 @@ struct pn533 {
366 struct mutex cmd_lock; /* protects cmd queue */ 382 struct mutex cmd_lock; /* protects cmd queue */
367 383
368 void *cmd_complete_mi_arg; 384 void *cmd_complete_mi_arg;
385 void *cmd_complete_dep_arg;
369 386
370 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; 387 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
371 u8 poll_mod_count; 388 u8 poll_mod_count;
@@ -404,6 +421,15 @@ struct pn533_std_frame {
404 u8 data[]; 421 u8 data[];
405} __packed; 422} __packed;
406 423
424struct pn533_ext_frame { /* Extended Information frame */
425 u8 preamble;
426 __be16 start_frame;
427 __be16 eif_flag; /* fixed to 0xFFFF */
428 __be16 datalen;
429 u8 datalen_checksum;
430 u8 data[];
431} __packed;
432
407struct pn533_frame_ops { 433struct pn533_frame_ops {
408 void (*tx_frame_init)(void *frame, u8 cmd_code); 434 void (*tx_frame_init)(void *frame, u8 cmd_code);
409 void (*tx_frame_finish)(void *frame); 435 void (*tx_frame_finish)(void *frame);
@@ -411,7 +437,7 @@ struct pn533_frame_ops {
411 int tx_header_len; 437 int tx_header_len;
412 int tx_tail_len; 438 int tx_tail_len;
413 439
414 bool (*rx_is_frame_valid)(void *frame); 440 bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
415 int (*rx_frame_size)(void *frame); 441 int (*rx_frame_size)(void *frame);
416 int rx_header_len; 442 int rx_header_len;
417 int rx_tail_len; 443 int rx_tail_len;
@@ -486,7 +512,7 @@ static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
486 frame->datalen += len; 512 frame->datalen += len;
487} 513}
488 514
489static bool pn533_acr122_is_rx_frame_valid(void *_frame) 515static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
490{ 516{
491 struct pn533_acr122_rx_frame *frame = _frame; 517 struct pn533_acr122_rx_frame *frame = _frame;
492 518
@@ -511,7 +537,7 @@ static u8 pn533_acr122_get_cmd_code(void *frame)
511{ 537{
512 struct pn533_acr122_rx_frame *f = frame; 538 struct pn533_acr122_rx_frame *f = frame;
513 539
514 return PN533_STD_FRAME_CMD(f); 540 return PN533_FRAME_CMD(f);
515} 541}
516 542
517static struct pn533_frame_ops pn533_acr122_frame_ops = { 543static struct pn533_frame_ops pn533_acr122_frame_ops = {
@@ -530,6 +556,12 @@ static struct pn533_frame_ops pn533_acr122_frame_ops = {
530 .get_cmd_code = pn533_acr122_get_cmd_code, 556 .get_cmd_code = pn533_acr122_get_cmd_code,
531}; 557};
532 558
559/* The rule: value(high byte) + value(low byte) + checksum = 0 */
560static inline u8 pn533_ext_checksum(u16 value)
561{
562 return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1;
563}
564
533/* The rule: value + checksum = 0 */ 565/* The rule: value + checksum = 0 */
534static inline u8 pn533_std_checksum(u8 value) 566static inline u8 pn533_std_checksum(u8 value)
535{ 567{
@@ -555,7 +587,7 @@ static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
555 frame->preamble = 0; 587 frame->preamble = 0;
556 frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF); 588 frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
557 PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT; 589 PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
558 PN533_STD_FRAME_CMD(frame) = cmd_code; 590 PN533_FRAME_CMD(frame) = cmd_code;
559 frame->datalen = 2; 591 frame->datalen = 2;
560} 592}
561 593
@@ -578,21 +610,41 @@ static void pn533_std_tx_update_payload_len(void *_frame, int len)
578 frame->datalen += len; 610 frame->datalen += len;
579} 611}
580 612
581static bool pn533_std_rx_frame_is_valid(void *_frame) 613static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
582{ 614{
583 u8 checksum; 615 u8 checksum;
584 struct pn533_std_frame *frame = _frame; 616 struct pn533_std_frame *stdf = _frame;
585 617
586 if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) 618 if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
587 return false; 619 return false;
588 620
589 checksum = pn533_std_checksum(frame->datalen); 621 if (likely(!PN533_STD_IS_EXTENDED(stdf))) {
590 if (checksum != frame->datalen_checksum) 622 /* Standard frame code */
591 return false; 623 dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN;
592 624
593 checksum = pn533_std_data_checksum(frame->data, frame->datalen); 625 checksum = pn533_std_checksum(stdf->datalen);
594 if (checksum != PN533_STD_FRAME_CHECKSUM(frame)) 626 if (checksum != stdf->datalen_checksum)
595 return false; 627 return false;
628
629 checksum = pn533_std_data_checksum(stdf->data, stdf->datalen);
630 if (checksum != PN533_STD_FRAME_CHECKSUM(stdf))
631 return false;
632 } else {
633 /* Extended */
634 struct pn533_ext_frame *eif = _frame;
635
636 dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN;
637
638 checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen));
639 if (checksum != eif->datalen_checksum)
640 return false;
641
642 /* check data checksum */
643 checksum = pn533_std_data_checksum(eif->data,
644 be16_to_cpu(eif->datalen));
645 if (checksum != PN533_EXT_FRAME_CHECKSUM(eif))
646 return false;
647 }
596 648
597 return true; 649 return true;
598} 650}
@@ -612,6 +664,14 @@ static inline int pn533_std_rx_frame_size(void *frame)
612{ 664{
613 struct pn533_std_frame *f = frame; 665 struct pn533_std_frame *f = frame;
614 666
667 /* check for Extended Information frame */
668 if (PN533_STD_IS_EXTENDED(f)) {
669 struct pn533_ext_frame *eif = frame;
670
671 return sizeof(struct pn533_ext_frame)
672 + be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN;
673 }
674
615 return sizeof(struct pn533_std_frame) + f->datalen + 675 return sizeof(struct pn533_std_frame) + f->datalen +
616 PN533_STD_FRAME_TAIL_LEN; 676 PN533_STD_FRAME_TAIL_LEN;
617} 677}
@@ -619,8 +679,12 @@ static inline int pn533_std_rx_frame_size(void *frame)
619static u8 pn533_std_get_cmd_code(void *frame) 679static u8 pn533_std_get_cmd_code(void *frame)
620{ 680{
621 struct pn533_std_frame *f = frame; 681 struct pn533_std_frame *f = frame;
682 struct pn533_ext_frame *eif = frame;
622 683
623 return PN533_STD_FRAME_CMD(f); 684 if (PN533_STD_IS_EXTENDED(f))
685 return PN533_FRAME_CMD(eif);
686 else
687 return PN533_FRAME_CMD(f);
624} 688}
625 689
626static struct pn533_frame_ops pn533_std_frame_ops = { 690static struct pn533_frame_ops pn533_std_frame_ops = {
@@ -675,7 +739,7 @@ static void pn533_recv_response(struct urb *urb)
675 print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame, 739 print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
676 dev->ops->rx_frame_size(in_frame), false); 740 dev->ops->rx_frame_size(in_frame), false);
677 741
678 if (!dev->ops->rx_is_frame_valid(in_frame)) { 742 if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
679 nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); 743 nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
680 cmd->status = -EIO; 744 cmd->status = -EIO;
681 goto sched_wq; 745 goto sched_wq;
@@ -1657,7 +1721,56 @@ static void pn533_listen_mode_timer(unsigned long data)
1657 1721
1658 pn533_poll_next_mod(dev); 1722 pn533_poll_next_mod(dev);
1659 1723
1660 queue_work(dev->wq, &dev->poll_work); 1724 queue_delayed_work(dev->wq, &dev->poll_work,
1725 msecs_to_jiffies(PN533_POLL_INTERVAL));
1726}
1727
1728static int pn533_rf_complete(struct pn533 *dev, void *arg,
1729 struct sk_buff *resp)
1730{
1731 int rc = 0;
1732
1733 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1734
1735 if (IS_ERR(resp)) {
1736 rc = PTR_ERR(resp);
1737
1738 nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
1739 __func__, rc);
1740
1741 return rc;
1742 }
1743
1744 queue_delayed_work(dev->wq, &dev->poll_work,
1745 msecs_to_jiffies(PN533_POLL_INTERVAL));
1746
1747 dev_kfree_skb(resp);
1748 return rc;
1749}
1750
1751static void pn533_wq_rf(struct work_struct *work)
1752{
1753 struct pn533 *dev = container_of(work, struct pn533, rf_work);
1754 struct sk_buff *skb;
1755 int rc;
1756
1757 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1758
1759 skb = pn533_alloc_skb(dev, 2);
1760 if (!skb)
1761 return;
1762
1763 *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
1764 *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
1765
1766 rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
1767 pn533_rf_complete, NULL);
1768 if (rc < 0) {
1769 dev_kfree_skb(skb);
1770 nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
1771 }
1772
1773 return;
1661} 1774}
1662 1775
1663static int pn533_poll_complete(struct pn533 *dev, void *arg, 1776static int pn533_poll_complete(struct pn533 *dev, void *arg,
@@ -1705,7 +1818,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
1705 } 1818 }
1706 1819
1707 pn533_poll_next_mod(dev); 1820 pn533_poll_next_mod(dev);
1708 queue_work(dev->wq, &dev->poll_work); 1821 /* Not target found, turn radio off */
1822 queue_work(dev->wq, &dev->rf_work);
1709 1823
1710done: 1824done:
1711 dev_kfree_skb(resp); 1825 dev_kfree_skb(resp);
@@ -1770,7 +1884,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
1770 1884
1771static void pn533_wq_poll(struct work_struct *work) 1885static void pn533_wq_poll(struct work_struct *work)
1772{ 1886{
1773 struct pn533 *dev = container_of(work, struct pn533, poll_work); 1887 struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
1774 struct pn533_poll_modulations *cur_mod; 1888 struct pn533_poll_modulations *cur_mod;
1775 int rc; 1889 int rc;
1776 1890
@@ -1799,6 +1913,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
1799 u32 im_protocols, u32 tm_protocols) 1913 u32 im_protocols, u32 tm_protocols)
1800{ 1914{
1801 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1915 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1916 u8 rand_mod;
1802 1917
1803 nfc_dev_dbg(&dev->interface->dev, 1918 nfc_dev_dbg(&dev->interface->dev,
1804 "%s: im protocols 0x%x tm protocols 0x%x", 1919 "%s: im protocols 0x%x tm protocols 0x%x",
@@ -1822,11 +1937,15 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
1822 tm_protocols = 0; 1937 tm_protocols = 0;
1823 } 1938 }
1824 1939
1825 dev->poll_mod_curr = 0;
1826 pn533_poll_create_mod_list(dev, im_protocols, tm_protocols); 1940 pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
1827 dev->poll_protocols = im_protocols; 1941 dev->poll_protocols = im_protocols;
1828 dev->listen_protocols = tm_protocols; 1942 dev->listen_protocols = tm_protocols;
1829 1943
1944 /* Do not always start polling from the same modulation */
1945 get_random_bytes(&rand_mod, sizeof(rand_mod));
1946 rand_mod %= dev->poll_mod_count;
1947 dev->poll_mod_curr = rand_mod;
1948
1830 return pn533_send_poll_frame(dev); 1949 return pn533_send_poll_frame(dev);
1831} 1950}
1832 1951
@@ -1845,6 +1964,7 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
1845 } 1964 }
1846 1965
1847 pn533_abort_cmd(dev, GFP_KERNEL); 1966 pn533_abort_cmd(dev, GFP_KERNEL);
1967 flush_delayed_work(&dev->poll_work);
1848 pn533_poll_reset_mod_list(dev); 1968 pn533_poll_reset_mod_list(dev);
1849} 1969}
1850 1970
@@ -2037,28 +2157,15 @@ error:
2037 return rc; 2157 return rc;
2038} 2158}
2039 2159
2040static int pn533_mod_to_baud(struct pn533 *dev) 2160static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
2041{
2042 switch (dev->poll_mod_curr) {
2043 case PN533_POLL_MOD_106KBPS_A:
2044 return 0;
2045 case PN533_POLL_MOD_212KBPS_FELICA:
2046 return 1;
2047 case PN533_POLL_MOD_424KBPS_FELICA:
2048 return 2;
2049 default:
2050 return -EINVAL;
2051 }
2052}
2053
2054#define PASSIVE_DATA_LEN 5 2161#define PASSIVE_DATA_LEN 5
2055static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, 2162static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
2056 u8 comm_mode, u8 *gb, size_t gb_len) 2163 u8 comm_mode, u8 *gb, size_t gb_len)
2057{ 2164{
2058 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 2165 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
2059 struct sk_buff *skb; 2166 struct sk_buff *skb;
2060 int rc, baud, skb_len; 2167 int rc, skb_len;
2061 u8 *next, *arg; 2168 u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
2062 2169
2063 u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3}; 2170 u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
2064 2171
@@ -2076,41 +2183,39 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
2076 return -EBUSY; 2183 return -EBUSY;
2077 } 2184 }
2078 2185
2079 baud = pn533_mod_to_baud(dev);
2080 if (baud < 0) {
2081 nfc_dev_err(&dev->interface->dev,
2082 "Invalid curr modulation %d", dev->poll_mod_curr);
2083 return baud;
2084 }
2085
2086 skb_len = 3 + gb_len; /* ActPass + BR + Next */ 2186 skb_len = 3 + gb_len; /* ActPass + BR + Next */
2087 if (comm_mode == NFC_COMM_PASSIVE) 2187 skb_len += PASSIVE_DATA_LEN;
2088 skb_len += PASSIVE_DATA_LEN;
2089 2188
2090 if (target && target->nfcid2_len) 2189 /* NFCID3 */
2091 skb_len += NFC_NFCID3_MAXSIZE; 2190 skb_len += NFC_NFCID3_MAXSIZE;
2191 if (target && !target->nfcid2_len) {
2192 nfcid3[0] = 0x1;
2193 nfcid3[1] = 0xfe;
2194 get_random_bytes(nfcid3 + 2, 6);
2195 }
2092 2196
2093 skb = pn533_alloc_skb(dev, skb_len); 2197 skb = pn533_alloc_skb(dev, skb_len);
2094 if (!skb) 2198 if (!skb)
2095 return -ENOMEM; 2199 return -ENOMEM;
2096 2200
2097 *skb_put(skb, 1) = !comm_mode; /* ActPass */ 2201 *skb_put(skb, 1) = !comm_mode; /* ActPass */
2098 *skb_put(skb, 1) = baud; /* Baud rate */ 2202 *skb_put(skb, 1) = 0x02; /* 424 kbps */
2099 2203
2100 next = skb_put(skb, 1); /* Next */ 2204 next = skb_put(skb, 1); /* Next */
2101 *next = 0; 2205 *next = 0;
2102 2206
2103 if (comm_mode == NFC_COMM_PASSIVE && baud > 0) { 2207 /* Copy passive data */
2104 memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, 2208 memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
2105 PASSIVE_DATA_LEN); 2209 *next |= 1;
2106 *next |= 1;
2107 }
2108 2210
2109 if (target && target->nfcid2_len) { 2211 /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
2212 if (target && target->nfcid2_len)
2110 memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2, 2213 memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
2111 target->nfcid2_len); 2214 target->nfcid2_len);
2112 *next |= 2; 2215 else
2113 } 2216 memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
2217 NFC_NFCID3_MAXSIZE);
2218 *next |= 2;
2114 2219
2115 if (gb != NULL && gb_len > 0) { 2220 if (gb != NULL && gb_len > 0) {
2116 memcpy(skb_put(skb, gb_len), gb, gb_len); 2221 memcpy(skb_put(skb, gb_len), gb, gb_len);
@@ -2127,6 +2232,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
2127 2232
2128 *arg = !comm_mode; 2233 *arg = !comm_mode;
2129 2234
2235 pn533_rf_field(dev->nfc_dev, 0);
2236
2130 rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb, 2237 rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
2131 pn533_in_dep_link_up_complete, arg); 2238 pn533_in_dep_link_up_complete, arg);
2132 2239
@@ -2232,7 +2339,15 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
2232 2339
2233 if (mi) { 2340 if (mi) {
2234 dev->cmd_complete_mi_arg = arg; 2341 dev->cmd_complete_mi_arg = arg;
2235 queue_work(dev->wq, &dev->mi_work); 2342 queue_work(dev->wq, &dev->mi_rx_work);
2343 return -EINPROGRESS;
2344 }
2345
2346 /* Prepare for the next round */
2347 if (skb_queue_len(&dev->fragment_skb) > 0) {
2348 dev->cmd_complete_dep_arg = arg;
2349 queue_work(dev->wq, &dev->mi_tx_work);
2350
2236 return -EINPROGRESS; 2351 return -EINPROGRESS;
2237 } 2352 }
2238 2353
@@ -2253,6 +2368,50 @@ _error:
2253 return rc; 2368 return rc;
2254} 2369}
2255 2370
2371/* Split the Tx skb into small chunks */
2372static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
2373{
2374 struct sk_buff *frag;
2375 int frag_size;
2376
2377 do {
2378 /* Remaining size */
2379 if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
2380 frag_size = PN533_CMD_DATAFRAME_MAXLEN;
2381 else
2382 frag_size = skb->len;
2383
2384 /* Allocate and reserve */
2385 frag = pn533_alloc_skb(dev, frag_size);
2386 if (!frag) {
2387 skb_queue_purge(&dev->fragment_skb);
2388 break;
2389 }
2390
2391 /* Reserve the TG/MI byte */
2392 skb_reserve(frag, 1);
2393
2394 /* MI + TG */
2395 if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
2396 *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
2397 else
2398 *skb_push(frag, sizeof(u8)) = 1; /* TG */
2399
2400 memcpy(skb_put(frag, frag_size), skb->data, frag_size);
2401
2402 /* Reduce the size of incoming buffer */
2403 skb_pull(skb, frag_size);
2404
2405 /* Add this to skb_queue */
2406 skb_queue_tail(&dev->fragment_skb, frag);
2407
2408 } while (skb->len > 0);
2409
2410 dev_kfree_skb(skb);
2411
2412 return skb_queue_len(&dev->fragment_skb);
2413}
2414
2256static int pn533_transceive(struct nfc_dev *nfc_dev, 2415static int pn533_transceive(struct nfc_dev *nfc_dev,
2257 struct nfc_target *target, struct sk_buff *skb, 2416 struct nfc_target *target, struct sk_buff *skb,
2258 data_exchange_cb_t cb, void *cb_context) 2417 data_exchange_cb_t cb, void *cb_context)
@@ -2263,15 +2422,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
2263 2422
2264 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 2423 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
2265 2424
2266 if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
2267 /* TODO: Implement support to multi-part data exchange */
2268 nfc_dev_err(&dev->interface->dev,
2269 "Data length greater than the max allowed: %d",
2270 PN533_CMD_DATAEXCH_DATA_MAXLEN);
2271 rc = -ENOSYS;
2272 goto error;
2273 }
2274
2275 if (!dev->tgt_active_prot) { 2425 if (!dev->tgt_active_prot) {
2276 nfc_dev_err(&dev->interface->dev, 2426 nfc_dev_err(&dev->interface->dev,
2277 "Can't exchange data if there is no active target"); 2427 "Can't exchange data if there is no active target");
@@ -2299,7 +2449,20 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
2299 break; 2449 break;
2300 } 2450 }
2301 default: 2451 default:
2302 *skb_push(skb, sizeof(u8)) = 1; /*TG*/ 2452 /* jumbo frame ? */
2453 if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
2454 rc = pn533_fill_fragment_skbs(dev, skb);
2455 if (rc <= 0)
2456 goto error;
2457
2458 skb = skb_dequeue(&dev->fragment_skb);
2459 if (!skb) {
2460 rc = -EIO;
2461 goto error;
2462 }
2463 } else {
2464 *skb_push(skb, sizeof(u8)) = 1; /* TG */
2465 }
2303 2466
2304 rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE, 2467 rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
2305 skb, pn533_data_exchange_complete, 2468 skb, pn533_data_exchange_complete,
@@ -2370,7 +2533,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
2370 2533
2371static void pn533_wq_mi_recv(struct work_struct *work) 2534static void pn533_wq_mi_recv(struct work_struct *work)
2372{ 2535{
2373 struct pn533 *dev = container_of(work, struct pn533, mi_work); 2536 struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
2374 2537
2375 struct sk_buff *skb; 2538 struct sk_buff *skb;
2376 int rc; 2539 int rc;
@@ -2418,6 +2581,61 @@ error:
2418 queue_work(dev->wq, &dev->cmd_work); 2581 queue_work(dev->wq, &dev->cmd_work);
2419} 2582}
2420 2583
2584static void pn533_wq_mi_send(struct work_struct *work)
2585{
2586 struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
2587 struct sk_buff *skb;
2588 int rc;
2589
2590 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
2591
2592 /* Grab the first skb in the queue */
2593 skb = skb_dequeue(&dev->fragment_skb);
2594
2595 if (skb == NULL) { /* No more data */
2596 /* Reset the queue for future use */
2597 skb_queue_head_init(&dev->fragment_skb);
2598 goto error;
2599 }
2600
2601 switch (dev->device_type) {
2602 case PN533_DEVICE_PASORI:
2603 if (dev->tgt_active_prot != NFC_PROTO_FELICA) {
2604 rc = -EIO;
2605 break;
2606 }
2607
2608 rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU,
2609 skb,
2610 pn533_data_exchange_complete,
2611 dev->cmd_complete_dep_arg);
2612
2613 break;
2614
2615 default:
2616 /* Still some fragments? */
2617 rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE,
2618 skb,
2619 pn533_data_exchange_complete,
2620 dev->cmd_complete_dep_arg);
2621
2622 break;
2623 }
2624
2625 if (rc == 0) /* success */
2626 return;
2627
2628 nfc_dev_err(&dev->interface->dev,
2629 "Error %d when trying to perform data_exchange", rc);
2630
2631 dev_kfree_skb(skb);
2632 kfree(dev->cmd_complete_dep_arg);
2633
2634error:
2635 pn533_send_ack(dev, GFP_KERNEL);
2636 queue_work(dev->wq, &dev->cmd_work);
2637}
2638
2421static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 2639static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
2422 u8 cfgdata_len) 2640 u8 cfgdata_len)
2423{ 2641{
@@ -2562,6 +2780,8 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
2562 u8 rf_field = !!rf; 2780 u8 rf_field = !!rf;
2563 int rc; 2781 int rc;
2564 2782
2783 rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
2784
2565 rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD, 2785 rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
2566 (u8 *)&rf_field, 1); 2786 (u8 *)&rf_field, 1);
2567 if (rc) { 2787 if (rc) {
@@ -2605,17 +2825,6 @@ static int pn533_setup(struct pn533 *dev)
2605 2825
2606 switch (dev->device_type) { 2826 switch (dev->device_type) {
2607 case PN533_DEVICE_STD: 2827 case PN533_DEVICE_STD:
2608 max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
2609 max_retries.mx_rty_psl = 2;
2610 max_retries.mx_rty_passive_act =
2611 PN533_CONFIG_MAX_RETRIES_NO_RETRY;
2612
2613 timing.rfu = PN533_CONFIG_TIMING_102;
2614 timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
2615 timing.dep_timeout = PN533_CONFIG_TIMING_409;
2616
2617 break;
2618
2619 case PN533_DEVICE_PASORI: 2828 case PN533_DEVICE_PASORI:
2620 case PN533_DEVICE_ACR122U: 2829 case PN533_DEVICE_ACR122U:
2621 max_retries.mx_rty_atr = 0x2; 2830 max_retries.mx_rty_atr = 0x2;
@@ -2729,9 +2938,11 @@ static int pn533_probe(struct usb_interface *interface,
2729 2938
2730 INIT_WORK(&dev->cmd_work, pn533_wq_cmd); 2939 INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
2731 INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete); 2940 INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
2732 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); 2941 INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
2942 INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
2733 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); 2943 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
2734 INIT_WORK(&dev->poll_work, pn533_wq_poll); 2944 INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
2945 INIT_WORK(&dev->rf_work, pn533_wq_rf);
2735 dev->wq = alloc_ordered_workqueue("pn533", 0); 2946 dev->wq = alloc_ordered_workqueue("pn533", 0);
2736 if (dev->wq == NULL) 2947 if (dev->wq == NULL)
2737 goto error; 2948 goto error;
@@ -2741,6 +2952,7 @@ static int pn533_probe(struct usb_interface *interface,
2741 dev->listen_timer.function = pn533_listen_mode_timer; 2952 dev->listen_timer.function = pn533_listen_mode_timer;
2742 2953
2743 skb_queue_head_init(&dev->resp_q); 2954 skb_queue_head_init(&dev->resp_q);
2955 skb_queue_head_init(&dev->fragment_skb);
2744 2956
2745 INIT_LIST_HEAD(&dev->cmd_queue); 2957 INIT_LIST_HEAD(&dev->cmd_queue);
2746 2958
@@ -2842,6 +3054,7 @@ static void pn533_disconnect(struct usb_interface *interface)
2842 usb_kill_urb(dev->in_urb); 3054 usb_kill_urb(dev->in_urb);
2843 usb_kill_urb(dev->out_urb); 3055 usb_kill_urb(dev->out_urb);
2844 3056
3057 flush_delayed_work(&dev->poll_work);
2845 destroy_workqueue(dev->wq); 3058 destroy_workqueue(dev->wq);
2846 3059
2847 skb_queue_purge(&dev->resp_q); 3060 skb_queue_purge(&dev->resp_q);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 8cf64c19f022..01e27d4bdd0d 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -25,11 +25,14 @@
25#include <linux/miscdevice.h> 25#include <linux/miscdevice.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28 28#include <linux/nfc.h>
29#include <linux/firmware.h>
30#include <linux/unaligned/access_ok.h>
29#include <linux/platform_data/pn544.h> 31#include <linux/platform_data/pn544.h>
30 32
31#include <net/nfc/hci.h> 33#include <net/nfc/hci.h>
32#include <net/nfc/llc.h> 34#include <net/nfc/llc.h>
35#include <net/nfc/nfc.h>
33 36
34#include "pn544.h" 37#include "pn544.h"
35 38
@@ -55,6 +58,58 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
55 58
56#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c" 59#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
57 60
61#define PN544_FW_CMD_WRITE 0x08
62#define PN544_FW_CMD_CHECK 0x06
63
64struct pn544_i2c_fw_frame_write {
65 u8 cmd;
66 u16 be_length;
67 u8 be_dest_addr[3];
68 u16 be_datalen;
69 u8 data[];
70} __packed;
71
72struct pn544_i2c_fw_frame_check {
73 u8 cmd;
74 u16 be_length;
75 u8 be_start_addr[3];
76 u16 be_datalen;
77 u16 be_crc;
78} __packed;
79
80struct pn544_i2c_fw_frame_response {
81 u8 status;
82 u16 be_length;
83} __packed;
84
85struct pn544_i2c_fw_blob {
86 u32 be_size;
87 u32 be_destaddr;
88 u8 data[];
89};
90
91#define PN544_FW_CMD_RESULT_TIMEOUT 0x01
92#define PN544_FW_CMD_RESULT_BAD_CRC 0x02
93#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
94#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
95#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
96#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
97#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
98
99#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
100
101#define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7
102#define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE
103#define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8
104#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
105 PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
106 PN544_FW_WRITE_BUFFER_MAX_LEN)
107
108#define FW_WORK_STATE_IDLE 1
109#define FW_WORK_STATE_START 2
110#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
111#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
112
58struct pn544_i2c_phy { 113struct pn544_i2c_phy {
59 struct i2c_client *i2c_dev; 114 struct i2c_client *i2c_dev;
60 struct nfc_hci_dev *hdev; 115 struct nfc_hci_dev *hdev;
@@ -64,7 +119,18 @@ struct pn544_i2c_phy {
64 unsigned int gpio_fw; 119 unsigned int gpio_fw;
65 unsigned int en_polarity; 120 unsigned int en_polarity;
66 121
122 struct work_struct fw_work;
123 int fw_work_state;
124 char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
125 const struct firmware *fw;
126 u32 fw_blob_dest_addr;
127 size_t fw_blob_size;
128 const u8 *fw_blob_data;
129 size_t fw_written;
130 int fw_cmd_result;
131
67 int powered; 132 int powered;
133 int run_mode;
68 134
69 int hard_fault; /* 135 int hard_fault; /*
70 * < 0 if hardware error occured (e.g. i2c err) 136 * < 0 if hardware error occured (e.g. i2c err)
@@ -122,15 +188,22 @@ out:
122 gpio_set_value(phy->gpio_en, !phy->en_polarity); 188 gpio_set_value(phy->gpio_en, !phy->en_polarity);
123} 189}
124 190
191static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
192{
193 gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0);
194 gpio_set_value(phy->gpio_en, phy->en_polarity);
195 usleep_range(10000, 15000);
196
197 phy->run_mode = run_mode;
198}
199
125static int pn544_hci_i2c_enable(void *phy_id) 200static int pn544_hci_i2c_enable(void *phy_id)
126{ 201{
127 struct pn544_i2c_phy *phy = phy_id; 202 struct pn544_i2c_phy *phy = phy_id;
128 203
129 pr_info(DRIVER_DESC ": %s\n", __func__); 204 pr_info(DRIVER_DESC ": %s\n", __func__);
130 205
131 gpio_set_value(phy->gpio_fw, 0); 206 pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
132 gpio_set_value(phy->gpio_en, phy->en_polarity);
133 usleep_range(10000, 15000);
134 207
135 phy->powered = 1; 208 phy->powered = 1;
136 209
@@ -305,6 +378,42 @@ flush:
305 return r; 378 return r;
306} 379}
307 380
381static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
382{
383 int r;
384 struct pn544_i2c_fw_frame_response response;
385 struct i2c_client *client = phy->i2c_dev;
386
387 r = i2c_master_recv(client, (char *) &response, sizeof(response));
388 if (r != sizeof(response)) {
389 dev_err(&client->dev, "cannot read fw status\n");
390 return -EIO;
391 }
392
393 usleep_range(3000, 6000);
394
395 switch (response.status) {
396 case 0:
397 return 0;
398 case PN544_FW_CMD_RESULT_TIMEOUT:
399 return -ETIMEDOUT;
400 case PN544_FW_CMD_RESULT_BAD_CRC:
401 return -ENODATA;
402 case PN544_FW_CMD_RESULT_ACCESS_DENIED:
403 return -EACCES;
404 case PN544_FW_CMD_RESULT_PROTOCOL_ERROR:
405 return -EPROTO;
406 case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
407 return -EINVAL;
408 case PN544_FW_CMD_RESULT_INVALID_LENGTH:
409 return -EBADMSG;
410 case PN544_FW_CMD_RESULT_WRITE_FAILED:
411 return -EIO;
412 default:
413 return -EIO;
414 }
415}
416
308/* 417/*
309 * Reads an shdlc frame from the chip. This is not as straightforward as it 418 * Reads an shdlc frame from the chip. This is not as straightforward as it
310 * seems. There are cases where we could loose the frame start synchronization. 419 * seems. There are cases where we could loose the frame start synchronization.
@@ -339,19 +448,23 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
339 if (phy->hard_fault != 0) 448 if (phy->hard_fault != 0)
340 return IRQ_HANDLED; 449 return IRQ_HANDLED;
341 450
342 r = pn544_hci_i2c_read(phy, &skb); 451 if (phy->run_mode == PN544_FW_MODE) {
343 if (r == -EREMOTEIO) { 452 phy->fw_cmd_result = pn544_hci_i2c_fw_read_status(phy);
344 phy->hard_fault = r; 453 schedule_work(&phy->fw_work);
454 } else {
455 r = pn544_hci_i2c_read(phy, &skb);
456 if (r == -EREMOTEIO) {
457 phy->hard_fault = r;
345 458
346 nfc_hci_recv_frame(phy->hdev, NULL); 459 nfc_hci_recv_frame(phy->hdev, NULL);
347 460
348 return IRQ_HANDLED; 461 return IRQ_HANDLED;
349 } else if ((r == -ENOMEM) || (r == -EBADMSG)) { 462 } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
350 return IRQ_HANDLED; 463 return IRQ_HANDLED;
351 } 464 }
352
353 nfc_hci_recv_frame(phy->hdev, skb);
354 465
466 nfc_hci_recv_frame(phy->hdev, skb);
467 }
355 return IRQ_HANDLED; 468 return IRQ_HANDLED;
356} 469}
357 470
@@ -361,6 +474,215 @@ static struct nfc_phy_ops i2c_phy_ops = {
361 .disable = pn544_hci_i2c_disable, 474 .disable = pn544_hci_i2c_disable,
362}; 475};
363 476
477static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
478{
479 struct pn544_i2c_phy *phy = phy_id;
480
481 pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
482 firmware_name);
483
484 strcpy(phy->firmware_name, firmware_name);
485
486 phy->fw_work_state = FW_WORK_STATE_START;
487
488 schedule_work(&phy->fw_work);
489
490 return 0;
491}
492
493static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
494 int result)
495{
496 pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
497
498 pn544_hci_i2c_disable(phy);
499
500 phy->fw_work_state = FW_WORK_STATE_IDLE;
501
502 if (phy->fw) {
503 release_firmware(phy->fw);
504 phy->fw = NULL;
505 }
506
507 nfc_fw_download_done(phy->hdev->ndev, phy->firmware_name, (u32) -result);
508}
509
510static int pn544_hci_i2c_fw_write_cmd(struct i2c_client *client, u32 dest_addr,
511 const u8 *data, u16 datalen)
512{
513 u8 frame[PN544_FW_I2C_MAX_PAYLOAD];
514 struct pn544_i2c_fw_frame_write *framep;
515 u16 params_len;
516 int framelen;
517 int r;
518
519 if (datalen > PN544_FW_I2C_WRITE_DATA_MAX_LEN)
520 datalen = PN544_FW_I2C_WRITE_DATA_MAX_LEN;
521
522 framep = (struct pn544_i2c_fw_frame_write *) frame;
523
524 params_len = sizeof(framep->be_dest_addr) +
525 sizeof(framep->be_datalen) + datalen;
526 framelen = params_len + sizeof(framep->cmd) +
527 sizeof(framep->be_length);
528
529 framep->cmd = PN544_FW_CMD_WRITE;
530
531 put_unaligned_be16(params_len, &framep->be_length);
532
533 framep->be_dest_addr[0] = (dest_addr & 0xff0000) >> 16;
534 framep->be_dest_addr[1] = (dest_addr & 0xff00) >> 8;
535 framep->be_dest_addr[2] = dest_addr & 0xff;
536
537 put_unaligned_be16(datalen, &framep->be_datalen);
538
539 memcpy(framep->data, data, datalen);
540
541 r = i2c_master_send(client, frame, framelen);
542
543 if (r == framelen)
544 return datalen;
545 else if (r < 0)
546 return r;
547 else
548 return -EIO;
549}
550
551static int pn544_hci_i2c_fw_check_cmd(struct i2c_client *client, u32 start_addr,
552 const u8 *data, u16 datalen)
553{
554 struct pn544_i2c_fw_frame_check frame;
555 int r;
556 u16 crc;
557
558 /* calculate local crc for the data we want to check */
559 crc = crc_ccitt(0xffff, data, datalen);
560
561 frame.cmd = PN544_FW_CMD_CHECK;
562
563 put_unaligned_be16(sizeof(frame.be_start_addr) +
564 sizeof(frame.be_datalen) + sizeof(frame.be_crc),
565 &frame.be_length);
566
567 /* tell the chip the memory region to which our crc applies */
568 frame.be_start_addr[0] = (start_addr & 0xff0000) >> 16;
569 frame.be_start_addr[1] = (start_addr & 0xff00) >> 8;
570 frame.be_start_addr[2] = start_addr & 0xff;
571
572 put_unaligned_be16(datalen, &frame.be_datalen);
573
574 /*
575 * and give our local crc. Chip will calculate its own crc for the
576 * region and compare with ours.
577 */
578 put_unaligned_be16(crc, &frame.be_crc);
579
580 r = i2c_master_send(client, (const char *) &frame, sizeof(frame));
581
582 if (r == sizeof(frame))
583 return 0;
584 else if (r < 0)
585 return r;
586 else
587 return -EIO;
588}
589
590static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
591{
592 int r;
593
594 r = pn544_hci_i2c_fw_write_cmd(phy->i2c_dev,
595 phy->fw_blob_dest_addr + phy->fw_written,
596 phy->fw_blob_data + phy->fw_written,
597 phy->fw_blob_size - phy->fw_written);
598 if (r < 0)
599 return r;
600
601 phy->fw_written += r;
602 phy->fw_work_state = FW_WORK_STATE_WAIT_WRITE_ANSWER;
603
604 return 0;
605}
606
607static void pn544_hci_i2c_fw_work(struct work_struct *work)
608{
609 struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
610 fw_work);
611 int r;
612 struct pn544_i2c_fw_blob *blob;
613
614 switch (phy->fw_work_state) {
615 case FW_WORK_STATE_START:
616 pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
617
618 r = request_firmware(&phy->fw, phy->firmware_name,
619 &phy->i2c_dev->dev);
620 if (r < 0)
621 goto exit_state_start;
622
623 blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
624 phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
625 phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
626 phy->fw_blob_data = blob->data;
627
628 phy->fw_written = 0;
629 r = pn544_hci_i2c_fw_write_chunk(phy);
630
631exit_state_start:
632 if (r < 0)
633 pn544_hci_i2c_fw_work_complete(phy, r);
634 break;
635
636 case FW_WORK_STATE_WAIT_WRITE_ANSWER:
637 r = phy->fw_cmd_result;
638 if (r < 0)
639 goto exit_state_wait_write_answer;
640
641 if (phy->fw_written == phy->fw_blob_size) {
642 r = pn544_hci_i2c_fw_check_cmd(phy->i2c_dev,
643 phy->fw_blob_dest_addr,
644 phy->fw_blob_data,
645 phy->fw_blob_size);
646 if (r < 0)
647 goto exit_state_wait_write_answer;
648 phy->fw_work_state = FW_WORK_STATE_WAIT_CHECK_ANSWER;
649 break;
650 }
651
652 r = pn544_hci_i2c_fw_write_chunk(phy);
653
654exit_state_wait_write_answer:
655 if (r < 0)
656 pn544_hci_i2c_fw_work_complete(phy, r);
657 break;
658
659 case FW_WORK_STATE_WAIT_CHECK_ANSWER:
660 r = phy->fw_cmd_result;
661 if (r < 0)
662 goto exit_state_wait_check_answer;
663
664 blob = (struct pn544_i2c_fw_blob *) (phy->fw_blob_data +
665 phy->fw_blob_size);
666 phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
667 if (phy->fw_blob_size != 0) {
668 phy->fw_blob_dest_addr =
669 get_unaligned_be32(&blob->be_destaddr);
670 phy->fw_blob_data = blob->data;
671
672 phy->fw_written = 0;
673 r = pn544_hci_i2c_fw_write_chunk(phy);
674 }
675
676exit_state_wait_check_answer:
677 if (r < 0 || phy->fw_blob_size == 0)
678 pn544_hci_i2c_fw_work_complete(phy, r);
679 break;
680
681 default:
682 break;
683 }
684}
685
364static int pn544_hci_i2c_probe(struct i2c_client *client, 686static int pn544_hci_i2c_probe(struct i2c_client *client,
365 const struct i2c_device_id *id) 687 const struct i2c_device_id *id)
366{ 688{
@@ -384,6 +706,9 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
384 return -ENOMEM; 706 return -ENOMEM;
385 } 707 }
386 708
709 INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work);
710 phy->fw_work_state = FW_WORK_STATE_IDLE;
711
387 phy->i2c_dev = client; 712 phy->i2c_dev = client;
388 i2c_set_clientdata(client, phy); 713 i2c_set_clientdata(client, phy);
389 714
@@ -420,7 +745,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
420 745
421 r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, 746 r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
422 PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM, 747 PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
423 PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev); 748 PN544_HCI_I2C_LLC_MAX_PAYLOAD,
749 pn544_hci_i2c_fw_download, &phy->hdev);
424 if (r < 0) 750 if (r < 0)
425 goto err_hci; 751 goto err_hci;
426 752
@@ -443,6 +769,10 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
443 769
444 dev_dbg(&client->dev, "%s\n", __func__); 770 dev_dbg(&client->dev, "%s\n", __func__);
445 771
772 cancel_work_sync(&phy->fw_work);
773 if (phy->fw_work_state != FW_WORK_STATE_IDLE)
774 pn544_hci_i2c_fw_work_complete(phy, -ENODEV);
775
446 pn544_hci_remove(phy->hdev); 776 pn544_hci_remove(phy->hdev);
447 777
448 if (phy->powered) 778 if (phy->powered)
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index b5d3d18179eb..ee67de50c36f 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -45,7 +45,7 @@ static int pn544_mei_probe(struct mei_cl_device *device,
45 45
46 r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, 46 r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
47 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, 47 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
48 &phy->hdev); 48 NULL, &phy->hdev);
49 if (r < 0) { 49 if (r < 0) {
50 nfc_mei_phy_free(phy); 50 nfc_mei_phy_free(phy);
51 51
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 0d17da7675b7..078e62feba17 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -31,9 +31,6 @@
31/* Timing restrictions (ms) */ 31/* Timing restrictions (ms) */
32#define PN544_HCI_RESETVEN_TIME 30 32#define PN544_HCI_RESETVEN_TIME 30
33 33
34#define HCI_MODE 0
35#define FW_MODE 1
36
37enum pn544_state { 34enum pn544_state {
38 PN544_ST_COLD, 35 PN544_ST_COLD,
39 PN544_ST_FW_READY, 36 PN544_ST_FW_READY,
@@ -130,6 +127,8 @@ struct pn544_hci_info {
130 int async_cb_type; 127 int async_cb_type;
131 data_exchange_cb_t async_cb; 128 data_exchange_cb_t async_cb;
132 void *async_cb_context; 129 void *async_cb_context;
130
131 fw_download_t fw_download;
133}; 132};
134 133
135static int pn544_hci_open(struct nfc_hci_dev *hdev) 134static int pn544_hci_open(struct nfc_hci_dev *hdev)
@@ -782,6 +781,17 @@ exit:
782 return r; 781 return r;
783} 782}
784 783
784static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
785 const char *firmware_name)
786{
787 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
788
789 if (info->fw_download == NULL)
790 return -ENOTSUPP;
791
792 return info->fw_download(info->phy_id, firmware_name);
793}
794
785static struct nfc_hci_ops pn544_hci_ops = { 795static struct nfc_hci_ops pn544_hci_ops = {
786 .open = pn544_hci_open, 796 .open = pn544_hci_open,
787 .close = pn544_hci_close, 797 .close = pn544_hci_close,
@@ -796,11 +806,12 @@ static struct nfc_hci_ops pn544_hci_ops = {
796 .tm_send = pn544_hci_tm_send, 806 .tm_send = pn544_hci_tm_send,
797 .check_presence = pn544_hci_check_presence, 807 .check_presence = pn544_hci_check_presence,
798 .event_received = pn544_hci_event_received, 808 .event_received = pn544_hci_event_received,
809 .fw_download = pn544_hci_fw_download,
799}; 810};
800 811
801int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, 812int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
802 int phy_headroom, int phy_tailroom, int phy_payload, 813 int phy_headroom, int phy_tailroom, int phy_payload,
803 struct nfc_hci_dev **hdev) 814 fw_download_t fw_download, struct nfc_hci_dev **hdev)
804{ 815{
805 struct pn544_hci_info *info; 816 struct pn544_hci_info *info;
806 u32 protocols; 817 u32 protocols;
@@ -816,6 +827,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
816 827
817 info->phy_ops = phy_ops; 828 info->phy_ops = phy_ops;
818 info->phy_id = phy_id; 829 info->phy_id = phy_id;
830 info->fw_download = fw_download;
819 info->state = PN544_ST_COLD; 831 info->state = PN544_ST_COLD;
820 mutex_init(&info->info_lock); 832 mutex_init(&info->info_lock);
821 833
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index f47c6454914b..01020e585443 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -24,9 +24,14 @@
24 24
25#define DRIVER_DESC "HCI NFC driver for PN544" 25#define DRIVER_DESC "HCI NFC driver for PN544"
26 26
27#define PN544_HCI_MODE 0
28#define PN544_FW_MODE 1
29
30typedef int (*fw_download_t)(void *context, const char *firmware_name);
31
27int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, 32int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
28 int phy_headroom, int phy_tailroom, int phy_payload, 33 int phy_headroom, int phy_tailroom, int phy_payload,
29 struct nfc_hci_dev **hdev); 34 fw_download_t fw_download, struct nfc_hci_dev **hdev);
30void pn544_hci_remove(struct nfc_hci_dev *hdev); 35void pn544_hci_remove(struct nfc_hci_dev *hdev);
31 36
32#endif /* __LOCAL_PN544_H_ */ 37#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b821a62958fd..e8ccf6c0f08a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3998,6 +3998,49 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
3998} 3998}
3999 3999
4000/** 4000/**
4001 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4002 * @dev: PCI device to query
4003 * @speed: storage for minimum speed
4004 * @width: storage for minimum width
4005 *
4006 * This function will walk up the PCI device chain and determine the minimum
4007 * link width and speed of the device.
4008 */
4009int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4010 enum pcie_link_width *width)
4011{
4012 int ret;
4013
4014 *speed = PCI_SPEED_UNKNOWN;
4015 *width = PCIE_LNK_WIDTH_UNKNOWN;
4016
4017 while (dev) {
4018 u16 lnksta;
4019 enum pci_bus_speed next_speed;
4020 enum pcie_link_width next_width;
4021
4022 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4023 if (ret)
4024 return ret;
4025
4026 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4027 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4028 PCI_EXP_LNKSTA_NLW_SHIFT;
4029
4030 if (next_speed < *speed)
4031 *speed = next_speed;
4032
4033 if (next_width < *width)
4034 *width = next_width;
4035
4036 dev = dev->bus->self;
4037 }
4038
4039 return 0;
4040}
4041EXPORT_SYMBOL(pcie_get_minimum_link);
4042
4043/**
4001 * pci_select_bars - Make BAR mask from the type of resource 4044 * pci_select_bars - Make BAR mask from the type of resource
4002 * @dev: the PCI device for which BAR mask is made 4045 * @dev: the PCI device for which BAR mask is made
4003 * @flags: resource type mask to be selected 4046 * @flags: resource type mask to be selected
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 816c297f170c..8a00c063d7bc 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -6,6 +6,9 @@
6#define PCI_CFG_SPACE_SIZE 256 6#define PCI_CFG_SPACE_SIZE 256
7#define PCI_CFG_SPACE_EXP_SIZE 4096 7#define PCI_CFG_SPACE_EXP_SIZE 4096
8 8
9extern const unsigned char pcix_bus_speed[];
10extern const unsigned char pcie_link_speed[];
11
9/* Functions internal to the PCI core code */ 12/* Functions internal to the PCI core code */
10 13
11int pci_create_sysfs_dev_files(struct pci_dev *pdev); 14int pci_create_sysfs_dev_files(struct pci_dev *pdev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index eeb50bd62402..4f9cc93c3b59 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -518,7 +518,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
518 return bridge; 518 return bridge;
519} 519}
520 520
521static unsigned char pcix_bus_speed[] = { 521const unsigned char pcix_bus_speed[] = {
522 PCI_SPEED_UNKNOWN, /* 0 */ 522 PCI_SPEED_UNKNOWN, /* 0 */
523 PCI_SPEED_66MHz_PCIX, /* 1 */ 523 PCI_SPEED_66MHz_PCIX, /* 1 */
524 PCI_SPEED_100MHz_PCIX, /* 2 */ 524 PCI_SPEED_100MHz_PCIX, /* 2 */
@@ -537,7 +537,7 @@ static unsigned char pcix_bus_speed[] = {
537 PCI_SPEED_133MHz_PCIX_533 /* F */ 537 PCI_SPEED_133MHz_PCIX_533 /* F */
538}; 538};
539 539
540static unsigned char pcie_link_speed[] = { 540const unsigned char pcie_link_speed[] = {
541 PCI_SPEED_UNKNOWN, /* 0 */ 541 PCI_SPEED_UNKNOWN, /* 0 */
542 PCIE_SPEED_2_5GT, /* 1 */ 542 PCIE_SPEED_2_5GT, /* 1 */
543 PCIE_SPEED_5_0GT, /* 2 */ 543 PCIE_SPEED_5_0GT, /* 2 */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 37049e433c9e..7052a839b0ea 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 {
581#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 581#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
582#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) 582#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
583#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 583#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
584#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) 584#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
585#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 585#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
586#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
587#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
586 u16 cq_num_wqes; 588 u16 cq_num_wqes;
587#elif defined(__LITTLE_ENDIAN) 589#elif defined(__LITTLE_ENDIAN)
588 u16 cq_num_wqes; 590 u16 cq_num_wqes;
@@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 {
593#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 595#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
594#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) 596#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
595#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 597#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
596#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) 598#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
597#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 599#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
600#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
601#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
598 u8 cq_log_wqes_per_page; 602 u8 cq_log_wqes_per_page;
599#endif 603#endif
600#if defined(__BIG_ENDIAN) 604#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index b6f6f436777b..34c294b42c84 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -172,16 +172,14 @@ void bnx2i_start(void *handle)
172 struct bnx2i_hba *hba = handle; 172 struct bnx2i_hba *hba = handle;
173 int i = HZ; 173 int i = HZ;
174 174
175 /* 175 /* On some bnx2x devices, it is possible that iSCSI is no
176 * We should never register devices that don't support iSCSI 176 * longer supported after firmware is downloaded. In that
177 * (see bnx2i_init_one), so something is wrong if we try to 177 * case, the iscsi_init_msg will return failure.
178 * start a iSCSI adapter on hardware with 0 supported iSCSI
179 * connections
180 */ 178 */
181 BUG_ON(!hba->cnic->max_iscsi_conn);
182 179
183 bnx2i_send_fw_iscsi_init_msg(hba); 180 bnx2i_send_fw_iscsi_init_msg(hba);
184 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) 181 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) &&
182 !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--)
185 msleep(BNX2I_INIT_POLL_TIME); 183 msleep(BNX2I_INIT_POLL_TIME);
186} 184}
187 185
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 36171fd2826b..2cd9b0e44a41 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -138,7 +138,7 @@ config SSB_DRIVER_MIPS
138 138
139config SSB_SFLASH 139config SSB_SFLASH
140 bool "SSB serial flash support" 140 bool "SSB serial flash support"
141 depends on SSB_DRIVER_MIPS && BROKEN 141 depends on SSB_DRIVER_MIPS
142 default y 142 default y
143 143
144# Assumption: We are on embedded, if we compile the MIPS core. 144# Assumption: We are on embedded, if we compile the MIPS core.
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index e84cf04f4416..50328de712fa 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -151,8 +151,8 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
151 sflash->size = sflash->blocksize * sflash->numblocks; 151 sflash->size = sflash->blocksize * sflash->numblocks;
152 sflash->present = true; 152 sflash->present = true;
153 153
154 pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", 154 pr_info("Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
155 e->name, e->blocksize, e->numblocks); 155 e->name, sflash->size / 1024, e->blocksize, e->numblocks);
156 156
157 /* Prepare platform device, but don't register it yet. It's too early, 157 /* Prepare platform device, but don't register it yet. It's too early,
158 * malloc (required by device_private_init) is not available yet. */ 158 * malloc (required by device_private_init) is not available yet. */
@@ -160,7 +160,5 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
160 sflash->size; 160 sflash->size;
161 ssb_sflash_dev.dev.platform_data = sflash; 161 ssb_sflash_dev.dev.platform_data = sflash;
162 162
163 pr_err("Serial flash support is not implemented yet!\n"); 163 return 0;
164
165 return -ENOTSUPP;
166} 164}
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 57a08c5771f2..8acff44a9e75 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
86 86
87 apdev_priv = netdev_priv(pDevice->apdev); 87 apdev_priv = netdev_priv(pDevice->apdev);
88 *apdev_priv = *pDevice; 88 *apdev_priv = *pDevice;
89 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); 89 eth_hw_addr_inherit(pDevice->apdev, dev);
90 90
91 pDevice->apdev->netdev_ops = &apdev_netdev_ops; 91 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
92 92
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 46e0e41e7e60..b5cd2e44e53d 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -460,7 +460,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
460 } 460 }
461 if (sValue.dwValue == 1) { 461 if (sValue.dwValue == 1) {
462 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n"); 462 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
463 memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN); 463 eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev);
464 pDevice->bWPADEVUp = true; 464 pDevice->bWPADEVUp = true;
465 } else { 465 } else {
466 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n"); 466 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 869f62c678e6..e8d9ecd2913a 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -96,7 +96,7 @@ static int wpa_init_wpadev(PSDevice pDevice)
96 96
97 wpadev_priv = netdev_priv(pDevice->wpadev); 97 wpadev_priv = netdev_priv(pDevice->wpadev);
98 *wpadev_priv = *pDevice; 98 *wpadev_priv = *pDevice;
99 memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN); 99 eth_hw_addr_inherit(pDevice->wpadev, dev);
100 pDevice->wpadev->base_addr = dev->base_addr; 100 pDevice->wpadev->base_addr = dev->base_addr;
101 pDevice->wpadev->irq = dev->irq; 101 pDevice->wpadev->irq = dev->irq;
102 pDevice->wpadev->mem_start = dev->mem_start; 102 pDevice->wpadev->mem_start = dev->mem_start;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 969a85960e9f..831eb4fd197d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -276,12 +276,12 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
276 * of used idx. Once lower device DMA done contiguously, we will signal KVM 276 * of used idx. Once lower device DMA done contiguously, we will signal KVM
277 * guest used idx. 277 * guest used idx.
278 */ 278 */
279static int vhost_zerocopy_signal_used(struct vhost_net *net, 279static void vhost_zerocopy_signal_used(struct vhost_net *net,
280 struct vhost_virtqueue *vq) 280 struct vhost_virtqueue *vq)
281{ 281{
282 struct vhost_net_virtqueue *nvq = 282 struct vhost_net_virtqueue *nvq =
283 container_of(vq, struct vhost_net_virtqueue, vq); 283 container_of(vq, struct vhost_net_virtqueue, vq);
284 int i; 284 int i, add;
285 int j = 0; 285 int j = 0;
286 286
287 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { 287 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
@@ -289,15 +289,17 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
289 vhost_net_tx_err(net); 289 vhost_net_tx_err(net);
290 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { 290 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
291 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; 291 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
292 vhost_add_used_and_signal(vq->dev, vq,
293 vq->heads[i].id, 0);
294 ++j; 292 ++j;
295 } else 293 } else
296 break; 294 break;
297 } 295 }
298 if (j) 296 while (j) {
299 nvq->done_idx = i; 297 add = min(UIO_MAXIOV - nvq->done_idx, j);
300 return j; 298 vhost_add_used_and_signal_n(vq->dev, vq,
299 &vq->heads[nvq->done_idx], add);
300 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
301 j -= add;
302 }
301} 303}
302 304
303static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) 305static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
@@ -306,6 +308,11 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
306 struct vhost_virtqueue *vq = ubufs->vq; 308 struct vhost_virtqueue *vq = ubufs->vq;
307 int cnt = atomic_read(&ubufs->kref.refcount); 309 int cnt = atomic_read(&ubufs->kref.refcount);
308 310
311 /* set len to mark this desc buffers done DMA */
312 vq->heads[ubuf->desc].len = success ?
313 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
314 vhost_net_ubuf_put(ubufs);
315
309 /* 316 /*
310 * Trigger polling thread if guest stopped submitting new buffers: 317 * Trigger polling thread if guest stopped submitting new buffers:
311 * in this case, the refcount after decrement will eventually reach 1 318 * in this case, the refcount after decrement will eventually reach 1
@@ -316,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
316 */ 323 */
317 if (cnt <= 2 || !(cnt % 16)) 324 if (cnt <= 2 || !(cnt % 16))
318 vhost_poll_queue(&vq->poll); 325 vhost_poll_queue(&vq->poll);
319 /* set len to mark this desc buffers done DMA */
320 vq->heads[ubuf->desc].len = success ?
321 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
322 vhost_net_ubuf_put(ubufs);
323} 326}
324 327
325/* Expects to be always run from workqueue - which acts as 328/* Expects to be always run from workqueue - which acts as
@@ -360,6 +363,13 @@ static void handle_tx(struct vhost_net *net)
360 if (zcopy) 363 if (zcopy)
361 vhost_zerocopy_signal_used(net, vq); 364 vhost_zerocopy_signal_used(net, vq);
362 365
366 /* If more outstanding DMAs, queue the work.
367 * Handle upend_idx wrap around
368 */
369 if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
370 % UIO_MAXIOV == nvq->done_idx))
371 break;
372
363 head = vhost_get_vq_desc(&net->dev, vq, vq->iov, 373 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
364 ARRAY_SIZE(vq->iov), 374 ARRAY_SIZE(vq->iov),
365 &out, &in, 375 &out, &in,
@@ -369,17 +379,6 @@ static void handle_tx(struct vhost_net *net)
369 break; 379 break;
370 /* Nothing new? Wait for eventfd to tell us they refilled. */ 380 /* Nothing new? Wait for eventfd to tell us they refilled. */
371 if (head == vq->num) { 381 if (head == vq->num) {
372 int num_pends;
373
374 /* If more outstanding DMAs, queue the work.
375 * Handle upend_idx wrap around
376 */
377 num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
378 (nvq->upend_idx - nvq->done_idx) :
379 (nvq->upend_idx + UIO_MAXIOV -
380 nvq->done_idx);
381 if (unlikely(num_pends > VHOST_MAX_PEND))
382 break;
383 if (unlikely(vhost_enable_notify(&net->dev, vq))) { 382 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
384 vhost_disable_notify(&net->dev, vq); 383 vhost_disable_notify(&net->dev, vq);
385 continue; 384 continue;
@@ -402,43 +401,36 @@ static void handle_tx(struct vhost_net *net)
402 iov_length(nvq->hdr, s), hdr_size); 401 iov_length(nvq->hdr, s), hdr_size);
403 break; 402 break;
404 } 403 }
405 zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || 404
406 nvq->upend_idx != nvq->done_idx); 405 zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
406 && (nvq->upend_idx + 1) % UIO_MAXIOV !=
407 nvq->done_idx
408 && vhost_net_tx_select_zcopy(net);
407 409
408 /* use msg_control to pass vhost zerocopy ubuf info to skb */ 410 /* use msg_control to pass vhost zerocopy ubuf info to skb */
409 if (zcopy_used) { 411 if (zcopy_used) {
412 struct ubuf_info *ubuf;
413 ubuf = nvq->ubuf_info + nvq->upend_idx;
414
410 vq->heads[nvq->upend_idx].id = head; 415 vq->heads[nvq->upend_idx].id = head;
411 if (!vhost_net_tx_select_zcopy(net) || 416 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
412 len < VHOST_GOODCOPY_LEN) { 417 ubuf->callback = vhost_zerocopy_callback;
413 /* copy don't need to wait for DMA done */ 418 ubuf->ctx = nvq->ubufs;
414 vq->heads[nvq->upend_idx].len = 419 ubuf->desc = nvq->upend_idx;
415 VHOST_DMA_DONE_LEN; 420 msg.msg_control = ubuf;
416 msg.msg_control = NULL; 421 msg.msg_controllen = sizeof(ubuf);
417 msg.msg_controllen = 0; 422 ubufs = nvq->ubufs;
418 ubufs = NULL; 423 kref_get(&ubufs->kref);
419 } else {
420 struct ubuf_info *ubuf;
421 ubuf = nvq->ubuf_info + nvq->upend_idx;
422
423 vq->heads[nvq->upend_idx].len =
424 VHOST_DMA_IN_PROGRESS;
425 ubuf->callback = vhost_zerocopy_callback;
426 ubuf->ctx = nvq->ubufs;
427 ubuf->desc = nvq->upend_idx;
428 msg.msg_control = ubuf;
429 msg.msg_controllen = sizeof(ubuf);
430 ubufs = nvq->ubufs;
431 kref_get(&ubufs->kref);
432 }
433 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 424 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
434 } else 425 } else {
435 msg.msg_control = NULL; 426 msg.msg_control = NULL;
427 ubufs = NULL;
428 }
436 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 429 /* TODO: Check specific error and bomb out unless ENOBUFS? */
437 err = sock->ops->sendmsg(NULL, sock, &msg, len); 430 err = sock->ops->sendmsg(NULL, sock, &msg, len);
438 if (unlikely(err < 0)) { 431 if (unlikely(err < 0)) {
439 if (zcopy_used) { 432 if (zcopy_used) {
440 if (ubufs) 433 vhost_net_ubuf_put(ubufs);
441 vhost_net_ubuf_put(ubufs);
442 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) 434 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
443 % UIO_MAXIOV; 435 % UIO_MAXIOV;
444 } 436 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e58cf0001cee..9a9502a4aa50 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -13,7 +13,7 @@
13 13
14#include <linux/eventfd.h> 14#include <linux/eventfd.h>
15#include <linux/vhost.h> 15#include <linux/vhost.h>
16#include <linux/socket.h> /* memcpy_fromiovec */ 16#include <linux/uio.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/mmu_context.h> 18#include <linux/mmu_context.h>
19#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
@@ -1332,48 +1332,9 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1332 * want to notify the guest, using eventfd. */ 1332 * want to notify the guest, using eventfd. */
1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1334{ 1334{
1335 struct vring_used_elem __user *used; 1335 struct vring_used_elem heads = { head, len };
1336 1336
1337 /* The virtqueue contains a ring of used buffers. Get a pointer to the 1337 return vhost_add_used_n(vq, &heads, 1);
1338 * next entry in that used ring. */
1339 used = &vq->used->ring[vq->last_used_idx % vq->num];
1340 if (__put_user(head, &used->id)) {
1341 vq_err(vq, "Failed to write used id");
1342 return -EFAULT;
1343 }
1344 if (__put_user(len, &used->len)) {
1345 vq_err(vq, "Failed to write used len");
1346 return -EFAULT;
1347 }
1348 /* Make sure buffer is written before we update index. */
1349 smp_wmb();
1350 if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1351 vq_err(vq, "Failed to increment used idx");
1352 return -EFAULT;
1353 }
1354 if (unlikely(vq->log_used)) {
1355 /* Make sure data is seen before log. */
1356 smp_wmb();
1357 /* Log used ring entry write. */
1358 log_write(vq->log_base,
1359 vq->log_addr +
1360 ((void __user *)used - (void __user *)vq->used),
1361 sizeof *used);
1362 /* Log used index update. */
1363 log_write(vq->log_base,
1364 vq->log_addr + offsetof(struct vring_used, idx),
1365 sizeof vq->used->idx);
1366 if (vq->log_ctx)
1367 eventfd_signal(vq->log_ctx, 1);
1368 }
1369 vq->last_used_idx++;
1370 /* If the driver never bothers to signal in a very long while,
1371 * used index might wrap around. If that happens, invalidate
1372 * signalled_used index we stored. TODO: make sure driver
1373 * signals at least once in 2^16 and remove this. */
1374 if (unlikely(vq->last_used_idx == vq->signalled_used))
1375 vq->signalled_used_valid = false;
1376 return 0;
1377} 1338}
1378EXPORT_SYMBOL_GPL(vhost_add_used); 1339EXPORT_SYMBOL_GPL(vhost_add_used);
1379 1340
@@ -1387,7 +1348,16 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1387 1348
1388 start = vq->last_used_idx % vq->num; 1349 start = vq->last_used_idx % vq->num;
1389 used = vq->used->ring + start; 1350 used = vq->used->ring + start;
1390 if (__copy_to_user(used, heads, count * sizeof *used)) { 1351 if (count == 1) {
1352 if (__put_user(heads[0].id, &used->id)) {
1353 vq_err(vq, "Failed to write used id");
1354 return -EFAULT;
1355 }
1356 if (__put_user(heads[0].len, &used->len)) {
1357 vq_err(vq, "Failed to write used len");
1358 return -EFAULT;
1359 }
1360 } else if (__copy_to_user(used, heads, count * sizeof *used)) {
1391 vq_err(vq, "Failed to write used"); 1361 vq_err(vq, "Failed to write used");
1392 return -EFAULT; 1362 return -EFAULT;
1393 } 1363 }